BA
/
BaBoRo1
Embed:
(wiki syntax)
Show/hide line numbers
test_exporters.py
00001 """ 00002 mbed SDK 00003 Copyright (c) 2011-2014 ARM Limited 00004 00005 Licensed under the Apache License, Version 2.0 (the "License"); 00006 you may not use this file except in compliance with the License. 00007 You may obtain a copy of the License at 00008 00009 http://www.apache.org/licenses/LICENSE-2.0 00010 00011 Unless required by applicable law or agreed to in writing, software 00012 distributed under the License is distributed on an "AS IS" BASIS, 00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00014 See the License for the specific language governing permissions and 00015 limitations under the License. 00016 00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com> 00018 """ 00019 00020 from tools.utils import construct_enum, mkdir 00021 from prettytable import PrettyTable 00022 import os 00023 00024 ResultExporterType = construct_enum(HTML='Html_Exporter', 00025 JUNIT='JUnit_Exporter', 00026 JUNIT_OPER='JUnit_Exporter_Interoperability', 00027 BUILD='Build_Exporter', 00028 TEXT='Text_Exporter', 00029 PRINT='Print_Exporter') 00030 00031 00032 class ReportExporter (): 00033 """ Class exports extended test result Python data structure to 00034 different formats like HTML, JUnit XML. 00035 00036 Parameter 'test_result_ext' format: 00037 00038 u'uARM': { u'LPC1768': { 'MBED_2': { 0: { 'copy_method': 'shutils.copy()', 00039 'duration': 20, 00040 'elapsed_time': 1.7929999828338623, 00041 'output': 'Host test instrumentation on ...\r\n', 00042 'result': 'OK', 00043 'target_name': u'LPC1768', 00044 'description': 'stdio', 00045 'id': u'MBED_2', 00046 'toolchain_name': u'uARM'}}, 00047 """ 00048 CSS_STYLE = """<style> 00049 .name{ 00050 border: 1px solid; 00051 border-radius: 25px; 00052 width: 100px; 00053 } 00054 .tooltip{ 00055 position:absolute; 00056 background-color: #F5DA81; 00057 display:none; 00058 } 00059 </style> 00060 """ 00061 00062 JAVASCRIPT = """ 00063 <script type="text/javascript"> 00064 function show (elem) { 00065 elem.style.display = "block"; 00066 } 00067 function hide (elem) { 00068 elem.style.display = ""; 00069 } 00070 </script> 00071 """ 00072 00073 def __init__(self, result_exporter_type, package="test"): 00074 self.result_exporter_type = result_exporter_type 00075 self.package = package 00076 00077 def report (self, test_summary_ext, test_suite_properties=None, 00078 print_log_for_failures=True): 00079 """ Invokes report depending on exporter_type set in constructor 00080 """ 00081 if self.result_exporter_type == ResultExporterType.HTML: 00082 # HTML exporter 00083 return self.exporter_html (test_summary_ext, test_suite_properties) 00084 elif self.result_exporter_type == ResultExporterType.JUNIT: 00085 # JUNIT exporter for results from test suite 00086 return self.exporter_junit (test_summary_ext, test_suite_properties) 00087 elif self.result_exporter_type == ResultExporterType.JUNIT_OPER: 00088 # JUNIT exporter for interoperability test 00089 return self.exporter_junit_ioper (test_summary_ext, test_suite_properties) 00090 elif self.result_exporter_type == ResultExporterType.PRINT: 00091 # JUNIT exporter for interoperability test 00092 return self.exporter_print (test_summary_ext, print_log_for_failures=print_log_for_failures) 00093 elif self.result_exporter_type == ResultExporterType.TEXT: 00094 return self.exporter_text (test_summary_ext) 00095 return None 00096 00097 def report_to_file (self, test_summary_ext, file_name, test_suite_properties=None): 00098 """ Stores report to specified file 00099 """ 00100 report = self.report (test_summary_ext, test_suite_properties=test_suite_properties) 00101 self.write_to_file (report, file_name) 00102 00103 def write_to_file(self, report, file_name): 00104 if report is not None: 00105 dirname = os.path.dirname(file_name) 00106 if dirname: 00107 mkdir(dirname) 00108 with open(file_name, 'w') as f: 00109 f.write(report) 00110 00111 def get_tooltip_name (self, toolchain, target, test_id, loop_no): 00112 """ Generate simple unique tool-tip name which can be used. 00113 For example as HTML <div> section id attribute. 00114 """ 00115 return "target_test_%s_%s_%s_%s"% (toolchain.lower(), target.lower(), test_id.lower(), loop_no) 00116 00117 def get_result_div_sections (self, test, test_no): 00118 """ Generates separate <DIV> sections which contains test results output. 00119 """ 00120 00121 RESULT_COLORS = {'OK': 'LimeGreen', 00122 'FAIL': 'Orange', 00123 'ERROR': 'LightCoral', 00124 'OTHER': 'LightGray', 00125 } 00126 00127 tooltip_name = self.get_tooltip_name (test['toolchain_name'], test['target_name'], test['id'], test_no) 00128 background_color = RESULT_COLORS[test['result'] if test['result'] in RESULT_COLORS else 'OTHER'] 00129 result_div_style = "background-color: %s"% background_color 00130 00131 result = """<div class="name" style="%s" onmouseover="show(%s)" onmouseout="hide(%s)"> 00132 <center>%s</center> 00133 <div class = "tooltip" id= "%s"> 00134 <b>%s</b><br /> 00135 <hr /> 00136 <b>%s</b> in <b>%.2f sec</b><br /> 00137 <hr /> 00138 <small> 00139 %s 00140 </small> 00141 </div> 00142 </div> 00143 """% (result_div_style, 00144 tooltip_name, 00145 tooltip_name, 00146 test['result'], 00147 tooltip_name, 00148 test['target_name_unique'], 00149 test['description'], 00150 test['elapsed_time'], 00151 test['output'].replace('\n', '<br />')) 00152 return result 00153 00154 def get_result_tree (self, test_results): 00155 """ If test was run in a loop (we got few results from the same test) 00156 we will show it in a column to see all results. 00157 This function produces HTML table with corresponding results. 00158 """ 00159 result = '' 00160 for i, test_result in enumerate(test_results): 00161 result += '<table>' 00162 test_ids = sorted(test_result.keys()) 00163 for test_no in test_ids: 00164 test = test_result[test_no] 00165 result += """<tr> 00166 <td valign="top">%s</td> 00167 </tr>"""% self.get_result_div_sections (test, "%d_%d" % (test_no, i)) 00168 result += '</table>' 00169 return result 00170 00171 def get_all_unique_test_ids (self, test_result_ext): 00172 """ Gets all unique test ids from all ran tests. 00173 We need this to create complete list of all test ran. 00174 """ 00175 result = [] 00176 targets = test_result_ext.keys() 00177 for target in targets: 00178 toolchains = test_result_ext[target].keys() 00179 for toolchain in toolchains: 00180 tests = test_result_ext[target][toolchain].keys() 00181 result.extend(tests) 00182 return sorted(list(set(result))) 00183 00184 # 00185 # Exporters functions 00186 # 00187 00188 def exporter_html (self, test_result_ext, test_suite_properties=None): 00189 """ Export test results in proprietary HTML format. 00190 """ 00191 result = """<html> 00192 <head> 00193 <title>mbed SDK test suite test result report</title> 00194 %s 00195 %s 00196 </head> 00197 <body> 00198 """% (self.CSS_STYLE , self.JAVASCRIPT ) 00199 00200 unique_test_ids = self.get_all_unique_test_ids (test_result_ext) 00201 targets = sorted(test_result_ext.keys()) 00202 result += '<table>' 00203 for target in targets: 00204 toolchains = sorted(test_result_ext[target].keys()) 00205 for toolchain in toolchains: 00206 result += '<tr>' 00207 result += '<td></td>' 00208 result += '<td></td>' 00209 00210 tests = sorted(test_result_ext[target][toolchain].keys()) 00211 for test in unique_test_ids: 00212 result += """<td align="center">%s</td>"""% test 00213 result += """</tr> 00214 <tr> 00215 <td valign="center">%s</td> 00216 <td valign="center"><b>%s</b></td> 00217 """% (toolchain, target) 00218 00219 for test in unique_test_ids: 00220 test_result = self.get_result_tree (test_result_ext[target][toolchain][test]) if test in tests else '' 00221 result += '<td>%s</td>'% (test_result) 00222 00223 result += '</tr>' 00224 result += '</table>' 00225 result += '</body></html>' 00226 return result 00227 00228 def exporter_junit_ioper(self, test_result_ext, test_suite_properties=None): 00229 from junit_xml import TestSuite, TestCase 00230 test_suites = [] 00231 test_cases = [] 00232 00233 for platform in sorted(test_result_ext.keys()): 00234 # {platform : ['Platform', 'Result', 'Scope', 'Description']) 00235 test_cases = [] 00236 for tr_result in test_result_ext[platform]: 00237 result, name, scope, description = tr_result 00238 00239 classname = 'test.ioper.%s.%s.%s' % (platform, name, scope) 00240 elapsed_sec = 0 00241 _stdout = description 00242 _stderr = '' 00243 # Test case 00244 tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr) 00245 # Test case extra failure / error info 00246 if result == 'FAIL': 00247 tc.add_failure_info(description, _stdout) 00248 elif result == 'ERROR': 00249 tc.add_error_info(description, _stdout) 00250 elif result == 'SKIP' or result == 'NOT_SUPPORTED': 00251 tc.add_skipped_info(description, _stdout) 00252 00253 test_cases.append(tc) 00254 ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases) 00255 test_suites.append(ts) 00256 return TestSuite.to_xml_string(test_suites) 00257 00258 def exporter_junit (self, test_result_ext, test_suite_properties=None): 00259 """ Export test results in JUnit XML compliant format 00260 """ 00261 from junit_xml import TestSuite, TestCase 00262 test_suites = [] 00263 test_cases = [] 00264 00265 targets = sorted(test_result_ext.keys()) 00266 for target in targets: 00267 toolchains = sorted(test_result_ext[target].keys()) 00268 for toolchain in toolchains: 00269 test_cases = [] 00270 tests = sorted(test_result_ext[target][toolchain].keys()) 00271 for test in tests: 00272 test_results = test_result_ext[target][toolchain][test] 00273 for test_res in test_results: 00274 test_ids = sorted(test_res.keys()) 00275 for test_no in test_ids: 00276 test_result = test_res[test_no] 00277 name = test_result['description'] 00278 classname = '%s.%s.%s.%s'% (self.package , target, toolchain, test_result['id']) 00279 elapsed_sec = test_result['elapsed_time'] 00280 _stdout = test_result['output'] 00281 00282 if 'target_name_unique' in test_result: 00283 _stderr = test_result['target_name_unique'] 00284 else: 00285 _stderr = test_result['target_name'] 00286 00287 # Test case 00288 tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr) 00289 00290 # Test case extra failure / error info 00291 message = test_result['result'] 00292 if test_result['result'] == 'FAIL': 00293 tc.add_failure_info(message, _stdout) 00294 elif test_result['result'] == 'SKIP' or test_result["result"] == 'NOT_SUPPORTED': 00295 tc.add_skipped_info(message, _stdout) 00296 elif test_result['result'] != 'OK': 00297 tc.add_error_info(message, _stdout) 00298 00299 test_cases.append(tc) 00300 00301 ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain]) 00302 test_suites.append(ts) 00303 return TestSuite.to_xml_string(test_suites) 00304 00305 def exporter_print_helper(self, array, print_log=False): 00306 for item in array: 00307 print(" * %s::%s::%s" % (item["target_name"], 00308 item["toolchain_name"], 00309 item["id"])) 00310 if print_log: 00311 log_lines = item["output"].split("\n") 00312 for log_line in log_lines: 00313 print(" %s" % log_line) 00314 00315 def exporter_print (self, test_result_ext, print_log_for_failures=False): 00316 """ Export test results in print format. 00317 """ 00318 failures = [] 00319 skips = [] 00320 successes = [] 00321 00322 unique_test_ids = self.get_all_unique_test_ids (test_result_ext) 00323 targets = sorted(test_result_ext.keys()) 00324 00325 for target in targets: 00326 toolchains = sorted(test_result_ext[target].keys()) 00327 for toolchain in toolchains: 00328 tests = sorted(test_result_ext[target][toolchain].keys()) 00329 for test in tests: 00330 test_runs = test_result_ext[target][toolchain][test] 00331 for test_runner in test_runs: 00332 #test_run = test_result_ext[target][toolchain][test][test_run_number][0] 00333 test_run = test_runner[0] 00334 00335 if "result" in test_run: 00336 if test_run["result"] == "FAIL": 00337 failures.append(test_run) 00338 elif test_run["result"] == "SKIP" or test_run["result"] == "NOT_SUPPORTED": 00339 skips.append(test_run) 00340 elif test_run["result"] == "OK": 00341 successes.append(test_run) 00342 else: 00343 raise Exception("Unhandled result type: %s" % (test_run["result"])) 00344 else: 00345 raise Exception("'test_run' did not have a 'result' value") 00346 00347 if successes: 00348 print("\n\nBuild successes:") 00349 self.exporter_print_helper (successes) 00350 00351 if skips: 00352 print("\n\nBuild skips:") 00353 self.exporter_print_helper (skips) 00354 00355 if failures: 00356 print("\n\nBuild failures:") 00357 self.exporter_print_helper (failures, print_log=print_log_for_failures) 00358 return False 00359 else: 00360 return True 00361 00362 def exporter_text (self, test_result_ext): 00363 """ Prints well-formed summary with results (SQL table like) 00364 table shows target x test results matrix across 00365 """ 00366 success_code = 0 # Success code that can be leter returned to 00367 # Pretty table package is used to print results 00368 pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", 00369 "Elapsed Time", "Timeout"]) 00370 pt.align["Result"] = "l" # Left align 00371 pt.align["Target"] = "l" # Left align 00372 pt.align["Toolchain"] = "l" # Left align 00373 pt.align["Test ID"] = "l" # Left align 00374 pt.align["Test Description"] = "l" # Left align 00375 pt.padding_width = 1 # One space between column edges and contents (default) 00376 00377 result_dict = {"OK" : 0, 00378 "FAIL" : 0, 00379 "ERROR" : 0, 00380 "UNDEF" : 0, 00381 "IOERR_COPY" : 0, 00382 "IOERR_DISK" : 0, 00383 "IOERR_SERIAL" : 0, 00384 "TIMEOUT" : 0, 00385 "NO_IMAGE" : 0, 00386 "MBED_ASSERT" : 0, 00387 "BUILD_FAILED" : 0, 00388 "NOT_SUPPORTED" : 0 00389 } 00390 unique_test_ids = self.get_all_unique_test_ids (test_result_ext) 00391 targets = sorted(test_result_ext.keys()) 00392 for target in targets: 00393 toolchains = sorted(test_result_ext[target].keys()) 00394 for toolchain in toolchains: 00395 test_cases = [] 00396 tests = sorted(test_result_ext[target][toolchain].keys()) 00397 for test in tests: 00398 test_results = test_result_ext[target][toolchain][test] 00399 for test_res in test_results: 00400 test_ids = sorted(test_res.keys()) 00401 for test_no in test_ids: 00402 test_result = test_res[test_no] 00403 result_dict[test_result['result']] += 1 00404 pt.add_row([test_result['result'], 00405 test_result['target_name'], 00406 test_result['toolchain_name'], 00407 test_result['id'], 00408 test_result['description'], 00409 test_result['elapsed_time'], 00410 test_result['duration']]) 00411 result = pt.get_string() 00412 result += "\n" 00413 00414 # Print result count 00415 result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()]) 00416 return result
Generated on Tue Jul 12 2022 12:22:24 by
