Brian Daniels / mbed-tools

Fork of mbed-tools by Morpheus

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 
00020 import os
00021 import re
00022 import sys
00023 import json
00024 import uuid
00025 import pprint
00026 import random
00027 import optparse
00028 import datetime
00029 import threading
00030 import ctypes
00031 from types import ListType
00032 from colorama import Fore, Back, Style
00033 from prettytable import PrettyTable
00034 
00035 from time import sleep, time
00036 from Queue import Queue, Empty
00037 from os.path import join, exists, basename
00038 from threading import Thread, Lock
00039 from subprocess import Popen, PIPE
00040 
00041 # Imports related to mbed build api
00042 from tools.tests import TESTS
00043 from tools.tests import TEST_MAP
00044 from tools.paths import BUILD_DIR
00045 from tools.paths import HOST_TESTS
00046 from tools.utils import ToolException
00047 from tools.utils import NotSupportedException
00048 from tools.utils import construct_enum
00049 from tools.targets import TARGET_MAP
00050 from tools.test_db import BaseDBAccess
00051 from tools.build_api import build_project, build_mbed_libs, build_lib
00052 from tools.build_api import get_target_supported_toolchains
00053 from tools.build_api import write_build_report
00054 from tools.build_api import prep_report
00055 from tools.build_api import prep_properties
00056 from tools.build_api import create_result
00057 from tools.build_api import add_result_to_report
00058 from tools.libraries import LIBRARIES, LIBRARY_MAP
00059 from tools.toolchains import TOOLCHAIN_BIN_PATH
00060 from tools.test_exporters import ReportExporter, ResultExporterType
00061 
00062 import tools.host_tests.host_tests_plugins as host_tests_plugins
00063 
00064 try:
00065     import mbed_lstools
00066     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00067 except:
00068     pass
00069 
00070 
00071 class ProcessObserver(Thread):
00072     def __init__(self, proc):
00073         Thread.__init__(self)
00074         self.proc = proc
00075         self.queue = Queue()
00076         self.daemon = True
00077         self.active = True
00078         self.start()
00079 
00080     def run(self):
00081         while self.active:
00082             c = self.proc.stdout.read(1)
00083             self.queue.put(c)
00084 
00085     def stop(self):
00086         self.active = False
00087         try:
00088             self.proc.terminate()
00089         except Exception, _:
00090             pass
00091 
00092 
00093 class SingleTestExecutor (threading.Thread):
00094     """ Example: Single test class in separate thread usage
00095     """
00096     def __init__(self, single_test):
00097         self.single_test  = single_test
00098         threading.Thread.__init__(self)
00099 
00100     def run(self):
00101         start = time()
00102         # Execute tests depending on options and filter applied
00103         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00104         elapsed_time = time() - start
00105 
00106         # Human readable summary
00107         if not self.single_test .opts_suppress_summary:
00108             # prints well-formed summary with results (SQL table like)
00109             print self.single_test .generate_test_summary(test_summary, shuffle_seed)
00110         if self.single_test .opts_test_x_toolchain_summary:
00111             # prints well-formed summary with results (SQL table like)
00112             # table shows text x toolchain test result matrix
00113             print self.single_test .generate_test_summary_by_target(test_summary, shuffle_seed)
00114         print "Completed in %.2f sec"% (elapsed_time)
00115 
00116 
00117 class SingleTestRunner (object):
00118     """ Object wrapper for single test run which may involve multiple MUTs
00119     """
00120     RE_DETECT_TESTCASE_RESULT = None
00121 
00122     # Return codes for test script
00123     TEST_RESULT_OK = "OK"
00124     TEST_RESULT_FAIL = "FAIL"
00125     TEST_RESULT_ERROR = "ERROR"
00126     TEST_RESULT_UNDEF = "UNDEF"
00127     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00128     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00129     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00130     TEST_RESULT_TIMEOUT = "TIMEOUT"
00131     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00132     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00133     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00134     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00135 
00136     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00137     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00138     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00139 
00140     muts = {} # MUTs descriptor (from external file)
00141     test_spec = {} # Test specification (from external file)
00142 
00143     # mbed test suite -> SingleTestRunner
00144     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00145                            "failure" : TEST_RESULT_FAIL,
00146                            "error" : TEST_RESULT_ERROR,
00147                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00148                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00149                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00150                            "timeout" : TEST_RESULT_TIMEOUT,
00151                            "no_image" : TEST_RESULT_NO_IMAGE,
00152                            "end" : TEST_RESULT_UNDEF,
00153                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00154                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00155                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00156     }
00157 
00158     def __init__ (self,
00159                  _global_loops_count=1,
00160                  _test_loops_list=None,
00161                  _muts={},
00162                  _clean=False,
00163                  _opts_db_url=None,
00164                  _opts_log_file_name=None,
00165                  _opts_report_html_file_name=None,
00166                  _opts_report_junit_file_name=None,
00167                  _opts_report_build_file_name=None,
00168                  _opts_build_report={},
00169                  _opts_build_properties={},
00170                  _test_spec={},
00171                  _opts_goanna_for_mbed_sdk=None,
00172                  _opts_goanna_for_tests=None,
00173                  _opts_shuffle_test_order=False,
00174                  _opts_shuffle_test_seed=None,
00175                  _opts_test_by_names=None,
00176                  _opts_peripheral_by_names=None,
00177                  _opts_test_only_peripheral=False,
00178                  _opts_test_only_common=False,
00179                  _opts_verbose_skipped_tests=False,
00180                  _opts_verbose_test_result_only=False,
00181                  _opts_verbose=False,
00182                  _opts_firmware_global_name=None,
00183                  _opts_only_build_tests=False,
00184                  _opts_parallel_test_exec=False,
00185                  _opts_suppress_summary=False,
00186                  _opts_test_x_toolchain_summary=False,
00187                  _opts_copy_method=None,
00188                  _opts_mut_reset_type=None,
00189                  _opts_jobs=None,
00190                  _opts_waterfall_test=None,
00191                  _opts_consolidate_waterfall_test=None,
00192                  _opts_extend_test_timeout=None,
00193                  _opts_auto_detect=None,
00194                  _opts_include_non_automated=False):
00195         """ Let's try hard to init this object
00196         """
00197         from colorama import init
00198         init()
00199 
00200         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00201         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00202         # Settings related to test loops counters
00203         try:
00204             _global_loops_count = int(_global_loops_count)
00205         except:
00206             _global_loops_count = 1
00207         if _global_loops_count < 1:
00208             _global_loops_count = 1
00209         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00210         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00211         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00212 
00213         self.shuffle_random_seed  = 0.0
00214         self.SHUFFLE_SEED_ROUND  = 10
00215 
00216         # MUT list and test specification storage
00217         self.muts  = _muts
00218         self.test_spec  = _test_spec
00219 
00220         # Settings passed e.g. from command line
00221         self.opts_db_url  = _opts_db_url
00222         self.opts_log_file_name  = _opts_log_file_name
00223         self.opts_report_html_file_name  = _opts_report_html_file_name
00224         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00225         self.opts_report_build_file_name  = _opts_report_build_file_name
00226         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00227         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00228         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00229         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00230         self.opts_test_by_names  = _opts_test_by_names
00231         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00232         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00233         self.opts_test_only_common  = _opts_test_only_common
00234         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00235         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00236         self.opts_verbose  = _opts_verbose
00237         self.opts_firmware_global_name  = _opts_firmware_global_name
00238         self.opts_only_build_tests  = _opts_only_build_tests
00239         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00240         self.opts_suppress_summary  = _opts_suppress_summary
00241         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00242         self.opts_copy_method  = _opts_copy_method
00243         self.opts_mut_reset_type  = _opts_mut_reset_type
00244         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00245         self.opts_waterfall_test  = _opts_waterfall_test
00246         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00247         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00248         self.opts_clean  = _clean
00249         self.opts_auto_detect  = _opts_auto_detect
00250         self.opts_include_non_automated  = _opts_include_non_automated
00251 
00252         self.build_report  = _opts_build_report
00253         self.build_properties  = _opts_build_properties
00254 
00255         # File / screen logger initialization
00256         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00257 
00258         # Database related initializations
00259         self.db_logger  = factory_db_logger(self.opts_db_url )
00260         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00261         # Let's connect to database to set up credentials and confirm database is ready
00262         if self.db_logger :
00263             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00264             if self.db_logger .is_connected():
00265                 # Get hostname and uname so we can use it as build description
00266                 # when creating new build_id in external database
00267                 (_hostname, _uname) = self.db_logger .get_hostname()
00268                 _host_location = os.path.dirname(os.path.abspath(__file__))
00269                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00270                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00271                 self.db_logger .disconnect()
00272 
00273     def dump_options (self):
00274         """ Function returns data structure with common settings passed to SingelTestRunner
00275             It can be used for example to fill _extra fields in database storing test suite single run data
00276             Example:
00277             data = self.dump_options()
00278             or
00279             data_str = json.dumps(self.dump_options())
00280         """
00281         result = {"db_url" : str(self.opts_db_url ),
00282                   "log_file_name" :  str(self.opts_log_file_name ),
00283                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00284                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00285                   "test_by_names" :  str(self.opts_test_by_names ),
00286                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00287                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00288                   "test_only_common" :  str(self.opts_test_only_common ),
00289                   "verbose" :  str(self.opts_verbose ),
00290                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00291                   "only_build_tests" :  str(self.opts_only_build_tests ),
00292                   "copy_method" :  str(self.opts_copy_method ),
00293                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00294                   "jobs" :  str(self.opts_jobs ),
00295                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00296                   "_dummy" : ''
00297         }
00298         return result
00299 
00300     def shuffle_random_func(self):
00301         return self.shuffle_random_seed 
00302 
00303     def is_shuffle_seed_float (self):
00304         """ return true if function parameter can be converted to float
00305         """
00306         result = True
00307         try:
00308             float(self.shuffle_random_seed )
00309         except ValueError:
00310             result = False
00311         return result
00312 
00313     # This will store target / toolchain specific properties
00314     test_suite_properties_ext = {}  # target : toolchain
00315     # Here we store test results
00316     test_summary = []
00317     # Here we store test results in extended data structure
00318     test_summary_ext = {}
00319     execute_thread_slice_lock = Lock()
00320 
00321     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00322         for toolchain in toolchains:
00323             tt_id = "%s::%s" % (toolchain, target)
00324 
00325             T = TARGET_MAP[target]
00326 
00327             # print target, toolchain
00328             # Test suite properties returned to external tools like CI
00329             test_suite_properties = {
00330                 'jobs': self.opts_jobs ,
00331                 'clean': clean,
00332                 'target': target,
00333                 'vendor': T.extra_labels[0],
00334                 'test_ids': ', '.join(test_ids),
00335                 'toolchain': toolchain,
00336                 'shuffle_random_seed': self.shuffle_random_seed 
00337             }
00338 
00339 
00340             # print '=== %s::%s ===' % (target, toolchain)
00341             # Let's build our test
00342             if target not in TARGET_MAP:
00343                 print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
00344                 continue
00345 
00346             build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk  else None
00347             clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk  or clean or self.opts_clean  else None
00348 
00349 
00350             try:
00351                 build_mbed_libs_result = build_mbed_libs(T,
00352                                                          toolchain,
00353                                                          options=build_mbed_libs_options,
00354                                                          clean=clean_mbed_libs_options,
00355                                                          verbose=self.opts_verbose ,
00356                                                          jobs=self.opts_jobs ,
00357                                                          report=build_report,
00358                                                          properties=build_properties)
00359 
00360                 if not build_mbed_libs_result:
00361                     print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
00362                     continue
00363 
00364             except ToolException:
00365                 print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
00366                 continue
00367 
00368             build_dir = join(BUILD_DIR, "test", target, toolchain)
00369 
00370             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00371             test_suite_properties['build_dir'] = build_dir
00372             test_suite_properties['skipped'] = []
00373 
00374             # Enumerate through all tests and shuffle test order if requested
00375             test_map_keys = sorted(TEST_MAP.keys())
00376 
00377             if self.opts_shuffle_test_order :
00378                 random.shuffle(test_map_keys, self.shuffle_random_func )
00379                 # Update database with shuffle seed f applicable
00380                 if self.db_logger :
00381                     self.db_logger .reconnect();
00382                     if self.db_logger .is_connected():
00383                         self.db_logger .update_build_id_info(self.db_logger_build_id , _shuffle_seed=self.shuffle_random_func ())
00384                         self.db_logger .disconnect();
00385 
00386             if self.db_logger :
00387                 self.db_logger .reconnect();
00388                 if self.db_logger .is_connected():
00389                     # Update MUTs and Test Specification in database
00390                     self.db_logger .update_build_id_info(self.db_logger_build_id , _muts=self.muts , _test_spec=self.test_spec )
00391                     # Update Extra information in database (some options passed to test suite)
00392                     self.db_logger .update_build_id_info(self.db_logger_build_id , _extra=json.dumps(self.dump_options ()))
00393                     self.db_logger .disconnect();
00394 
00395             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00396             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00397 
00398             for skipped_test_id in skipped_test_map_keys:
00399                 test_suite_properties['skipped'].append(skipped_test_id)
00400 
00401 
00402             # First pass through all tests and determine which libraries need to be built
00403             libraries = []
00404             for test_id in valid_test_map_keys:
00405                 test = TEST_MAP[test_id]
00406 
00407                 # Detect which lib should be added to test
00408                 # Some libs have to compiled like RTOS or ETH
00409                 for lib in LIBRARIES:
00410                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00411                         libraries.append(lib['id'])
00412 
00413 
00414             build_project_options = ["analyze"] if self.opts_goanna_for_tests  else None
00415             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00416 
00417             # Build all required libraries
00418             for lib_id in libraries:
00419                 try:
00420                     build_lib(lib_id,
00421                               T,
00422                               toolchain,
00423                               options=build_project_options,
00424                               verbose=self.opts_verbose ,
00425                               clean=clean_mbed_libs_options,
00426                               jobs=self.opts_jobs ,
00427                               report=build_report,
00428                               properties=build_properties)
00429 
00430                 except ToolException:
00431                     print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building library %s'% (lib_id))
00432                     continue
00433 
00434 
00435             for test_id in valid_test_map_keys:
00436                 test = TEST_MAP[test_id]
00437 
00438                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00439 
00440                 # TODO: move this 2 below loops to separate function
00441                 INC_DIRS = []
00442                 for lib_id in libraries:
00443                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00444                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00445 
00446                 MACROS = []
00447                 for lib_id in libraries:
00448                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00449                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00450                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00451                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00452                 test_uuid = uuid.uuid4()
00453                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00454 
00455                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00456                 if target not in self.test_summary_ext :
00457                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00458                 if toolchain not in self.test_summary_ext [target]:
00459                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00460 
00461                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00462 
00463                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00464                 try:
00465                     path = build_project(test.source_dir,
00466                                      join(build_dir, test_id),
00467                                      T,
00468                                      toolchain,
00469                                      test.dependencies,
00470                                      options=build_project_options,
00471                                      clean=clean_project_options,
00472                                      verbose=self.opts_verbose ,
00473                                      name=project_name,
00474                                      macros=MACROS,
00475                                      inc_dirs=INC_DIRS,
00476                                      jobs=self.opts_jobs ,
00477                                      report=build_report,
00478                                      properties=build_properties,
00479                                      project_id=test_id,
00480                                      project_description=test.get_description())
00481 
00482                 except Exception, e:
00483                     project_name_str = project_name if project_name is not None else test_id
00484 
00485 
00486                     test_result = self.TEST_RESULT_FAIL 
00487 
00488                     if isinstance(e, ToolException):
00489                         print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
00490                         test_result = self.TEST_RESULT_BUILD_FAILED 
00491                     elif isinstance(e, NotSupportedException):
00492                         print self.logger .log_line(self.logger .LogType.INFO, 'The project %s is not supported'% (project_name_str))
00493                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00494 
00495 
00496                     # Append test results to global test summary
00497                     self.test_summary .append(
00498                         (test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
00499                     )
00500 
00501                     # Add detailed test result to test summary structure
00502                     if test_id not in self.test_summary_ext [target][toolchain]:
00503                         self.test_summary_ext [target][toolchain][test_id] = []
00504 
00505                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00506                         'result' : test_result,
00507                         'output' : '',
00508                         'target_name' : target,
00509                         'target_name_unique': target,
00510                         'toolchain_name' : toolchain,
00511                         'id' : test_id,
00512                         'description' : test.get_description(),
00513                         'elapsed_time' : 0,
00514                         'duration' : 0,
00515                         'copy_method' : None
00516                     }})
00517                     continue
00518 
00519                 if self.opts_only_build_tests :
00520                     # With this option we are skipping testing phase
00521                     continue
00522 
00523                 # Test duration can be increased by global value
00524                 test_duration = test.duration
00525                 if self.opts_extend_test_timeout  is not None:
00526                     test_duration += self.opts_extend_test_timeout 
00527 
00528                 # For an automated test the duration act as a timeout after
00529                 # which the test gets interrupted
00530                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00531                 test_loops = self.get_test_loop_count (test_id)
00532 
00533                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00534                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00535                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00536 
00537                 # read MUTs, test specification and perform tests
00538                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00539 
00540                 if handle_results is None:
00541                     continue
00542 
00543                 for handle_result in handle_results:
00544                     if handle_result:
00545                         single_test_result, detailed_test_results = handle_result
00546                     else:
00547                         continue
00548 
00549                     # Append test results to global test summary
00550                     if single_test_result is not None:
00551                         self.test_summary .append(single_test_result)
00552 
00553                     # Add detailed test result to test summary structure
00554                     if target not in self.test_summary_ext [target][toolchain]:
00555                         if test_id not in self.test_summary_ext [target][toolchain]:
00556                             self.test_summary_ext [target][toolchain][test_id] = []
00557 
00558                         append_test_result = detailed_test_results
00559 
00560                         # If waterfall and consolidate-waterfall options are enabled,
00561                         # only include the last test result in the report.
00562                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00563                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00564 
00565                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00566 
00567             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00568             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00569 
00570         q.put(target + '_'.join(toolchains))
00571         return
00572 
00573     def execute(self):
00574         clean = self.test_spec .get('clean', False)
00575         test_ids = self.test_spec .get('test_ids', [])
00576         q = Queue()
00577 
00578         # Generate seed for shuffle if seed is not provided in
00579         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00580         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00581             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00582 
00583 
00584         if self.opts_parallel_test_exec :
00585             ###################################################################
00586             # Experimental, parallel test execution per singletest instance.
00587             ###################################################################
00588             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00589             # Note: We are building here in parallel for each target separately!
00590             # So we are not building the same thing multiple times and compilers
00591             # in separate threads do not collide.
00592             # Inside execute_thread_slice() function function handle() will be called to
00593             # get information about available MUTs (per target).
00594             for target, toolchains in self.test_spec ['targets'].iteritems():
00595                 self.test_suite_properties_ext [target] = {}
00596                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00597                 t.daemon = True
00598                 t.start()
00599                 execute_threads.append(t)
00600 
00601             for t in execute_threads:
00602                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00603         else:
00604             # Serialized (not parallel) test execution
00605             for target, toolchains in self.test_spec ['targets'].iteritems():
00606                 if target not in self.test_suite_properties_ext :
00607                     self.test_suite_properties_ext [target] = {}
00608 
00609                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00610                 q.get()
00611 
00612         if self.db_logger :
00613             self.db_logger .reconnect();
00614             if self.db_logger .is_connected():
00615                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00616                 self.db_logger .disconnect();
00617 
00618         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00619 
00620     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00621         valid_test_map_keys = []
00622 
00623         for test_id in test_map_keys:
00624             test = TEST_MAP[test_id]
00625             if self.opts_test_by_names  and test_id not in self.opts_test_by_names .split(','):
00626                 continue
00627 
00628             if test_ids and test_id not in test_ids:
00629                 continue
00630 
00631             if self.opts_test_only_peripheral  and not test.peripherals:
00632                 if self.opts_verbose_skipped_tests :
00633                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00634                 continue
00635 
00636             if self.opts_peripheral_by_names  and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names .split(',')]):
00637                 # We will skip tests not forced with -p option
00638                 if self.opts_verbose_skipped_tests :
00639                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00640                 continue
00641 
00642             if self.opts_test_only_common  and test.peripherals:
00643                 if self.opts_verbose_skipped_tests :
00644                     print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral test skipped for target %s'% (target))
00645                 continue
00646 
00647             if not include_non_automated and not test.automated:
00648                 if self.opts_verbose_skipped_tests :
00649                     print self.logger .log_line(self.logger .LogType.INFO, 'Non automated test skipped for target %s'% (target))
00650                 continue
00651 
00652             if test.is_supported(target, toolchain):
00653                 if test.peripherals is None and self.opts_only_build_tests :
00654                     # When users are using 'build only flag' and test do not have
00655                     # specified peripherals we can allow test building by default
00656                     pass
00657                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names .split(','):
00658                     # If we force peripheral with option -p we expect test
00659                     # to pass even if peripheral is not in MUTs file.
00660                     pass
00661                 elif not self.is_peripherals_available (target, test.peripherals):
00662                     if self.opts_verbose_skipped_tests :
00663                         if test.peripherals:
00664                             print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
00665                         else:
00666                             print self.logger .log_line(self.logger .LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
00667                     continue
00668 
00669                 # The test has made it through all the filters, so add it to the valid tests list
00670                 valid_test_map_keys.append(test_id)
00671 
00672         return valid_test_map_keys
00673 
00674     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00675         # NOTE: This will not preserve order
00676         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00677 
00678     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00679         """ Prints well-formed summary with results (SQL table like)
00680             table shows text x toolchain test result matrix
00681         """
00682         RESULT_INDEX = 0
00683         TARGET_INDEX = 1
00684         TOOLCHAIN_INDEX = 2
00685         TEST_INDEX = 3
00686         DESC_INDEX = 4
00687 
00688         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00689         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00690         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00691         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00692 
00693         result = "Test summary:\n"
00694         for target in unique_targets:
00695             result_dict = {} # test : { toolchain : result }
00696             unique_target_toolchains = []
00697             for test in test_summary:
00698                 if test[TARGET_INDEX] == target:
00699                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00700                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00701                     if test[TEST_INDEX] not in result_dict:
00702                         result_dict[test[TEST_INDEX]] = {}
00703                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00704 
00705             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00706             pt = PrettyTable(pt_cols)
00707             for col in pt_cols:
00708                 pt.align[col] = "l"
00709             pt.padding_width = 1 # One space between column edges and contents (default)
00710 
00711             for test in unique_tests:
00712                 if test in result_dict:
00713                     test_results = result_dict[test]
00714                     if test in unique_test_desc:
00715                         row = [target, test, unique_test_desc[test]]
00716                         for toolchain in unique_toolchains:
00717                             if toolchain in test_results:
00718                                 row.append(test_results[toolchain])
00719                         pt.add_row(row)
00720             result += pt.get_string()
00721             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00722                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00723             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00724         return result
00725 
00726     def generate_test_summary (self, test_summary, shuffle_seed=None):
00727         """ Prints well-formed summary with results (SQL table like)
00728             table shows target x test results matrix across
00729         """
00730         success_code = 0    # Success code that can be leter returned to
00731         result = "Test summary:\n"
00732         # Pretty table package is used to print results
00733         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00734                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00735         pt.align["Result"] = "l" # Left align
00736         pt.align["Target"] = "l" # Left align
00737         pt.align["Toolchain"] = "l" # Left align
00738         pt.align["Test ID"] = "l" # Left align
00739         pt.align["Test Description"] = "l" # Left align
00740         pt.padding_width = 1 # One space between column edges and contents (default)
00741 
00742         result_dict = {self.TEST_RESULT_OK  : 0,
00743                        self.TEST_RESULT_FAIL  : 0,
00744                        self.TEST_RESULT_ERROR  : 0,
00745                        self.TEST_RESULT_UNDEF  : 0,
00746                        self.TEST_RESULT_IOERR_COPY  : 0,
00747                        self.TEST_RESULT_IOERR_DISK  : 0,
00748                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00749                        self.TEST_RESULT_NO_IMAGE  : 0,
00750                        self.TEST_RESULT_TIMEOUT  : 0,
00751                        self.TEST_RESULT_MBED_ASSERT  : 0,
00752                        self.TEST_RESULT_BUILD_FAILED  : 0,
00753                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00754         }
00755 
00756         for test in test_summary:
00757             if test[0] in result_dict:
00758                 result_dict[test[0]] += 1
00759             pt.add_row(test)
00760         result += pt.get_string()
00761         result += "\n"
00762 
00763         # Print result count
00764         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
00765         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00766                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00767         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00768         return result
00769 
00770     def test_loop_list_to_dict (self, test_loops_str):
00771         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00772         """
00773         result = {}
00774         if test_loops_str:
00775             test_loops = test_loops_str.split(',')
00776             for test_loop in test_loops:
00777                 test_loop_count = test_loop.split('=')
00778                 if len(test_loop_count) == 2:
00779                     _test_id, _test_loops = test_loop_count
00780                     try:
00781                         _test_loops = int(_test_loops)
00782                     except:
00783                         continue
00784                     result[_test_id] = _test_loops
00785         return result
00786 
00787     def get_test_loop_count (self, test_id):
00788         """ This function returns no. of loops per test (deducted by test_id_.
00789             If test is not in list of redefined loop counts it will use default value.
00790         """
00791         result = self.GLOBAL_LOOPS_COUNT 
00792         if test_id in self.TEST_LOOPS_DICT :
00793             result = self.TEST_LOOPS_DICT [test_id]
00794         return result
00795 
00796     def delete_file (self, file_path):
00797         """ Remove file from the system
00798         """
00799         result = True
00800         resutl_msg = ""
00801         try:
00802             os.remove(file_path)
00803         except Exception, e:
00804             resutl_msg = e
00805             result = False
00806         return result, resutl_msg
00807 
00808     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00809         """ Test is being invoked for given MUT.
00810         """
00811         # Get test information, image and test timeout
00812         test_id = data['test_id']
00813         test = TEST_MAP[test_id]
00814         test_description = TEST_MAP[test_id].get_description()
00815         image = data["image"]
00816         duration = data.get("duration", 10)
00817 
00818         if mut is None:
00819             print "Error: No Mbed available: MUT[%s]" % data['mcu']
00820             return None
00821 
00822         mcu = mut['mcu']
00823         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00824 
00825         if self.db_logger :
00826             self.db_logger .reconnect()
00827 
00828         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00829 
00830         # Tests can be looped so test results must be stored for the same test
00831         test_all_result = []
00832         # Test results for one test ran few times
00833         detailed_test_results = {}  # { Loop_number: { results ... } }
00834 
00835         for test_index in range(test_loops):
00836 
00837             # If mbedls is available and we are auto detecting MUT info,
00838             # update MUT info (mounting may changed)
00839             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00840                 platform_name_filter = [mcu]
00841                 muts_list = {}
00842                 found = False
00843 
00844                 for i in range(0, 60):
00845                     print('Looking for %s with MBEDLS' % mcu)
00846                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00847 
00848                     if 1 not in muts_list:
00849                         sleep(3)
00850                     else:
00851                         found = True
00852                         break
00853 
00854                 if not found:
00855                     print "Error: mbed not found with MBEDLS: %s" % data['mcu']
00856                     return None
00857                 else:
00858                     mut = muts_list[1]
00859 
00860             disk = mut.get('disk')
00861             port = mut.get('port')
00862 
00863             if disk is None or port is None:
00864                 return None
00865 
00866             target_by_mcu = TARGET_MAP[mut['mcu']]
00867             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00868             # Some extra stuff can be declared in MUTs structure
00869             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00870             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00871 
00872             # When the build and test system were separate, this was relative to a
00873             # base network folder base path: join(NETWORK_BASE_PATH, )
00874             image_path = image
00875 
00876             # Host test execution
00877             start_host_exec_time = time()
00878 
00879             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00880             _copy_method = selected_copy_method
00881 
00882             if not exists(image_path):
00883                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00884                 elapsed_time = 0
00885                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00886                 print single_test_output
00887             else:
00888                 # Host test execution
00889                 start_host_exec_time = time()
00890 
00891                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00892                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00893                 host_test_result = self.run_host_test (test.host_test,
00894                                                       image_path, disk, port, duration,
00895                                                       micro=target_name,
00896                                                       verbose=host_test_verbose,
00897                                                       reset=host_test_reset,
00898                                                       reset_tout=reset_tout,
00899                                                       copy_method=selected_copy_method,
00900                                                       program_cycle_s=target_by_mcu.program_cycle_s())
00901                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00902 
00903             # Store test result
00904             test_all_result.append(single_test_result)
00905             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00906             elapsed_time = single_testduration  # TIme of single test case execution after reset
00907 
00908             detailed_test_results[test_index] = {
00909                 'result' : single_test_result,
00910                 'output' : single_test_output,
00911                 'target_name' : target_name,
00912                 'target_name_unique' : target_name_unique,
00913                 'toolchain_name' : toolchain_name,
00914                 'id' : test_id,
00915                 'description' : test_description,
00916                 'elapsed_time' : round(elapsed_time, 2),
00917                 'duration' : single_timeout,
00918                 'copy_method' : _copy_method,
00919             }
00920 
00921             print self.print_test_result (single_test_result, target_name_unique, toolchain_name,
00922                                          test_id, test_description, elapsed_time, single_timeout)
00923 
00924             # Update database entries for ongoing test
00925             if self.db_logger  and self.db_logger .is_connected():
00926                 test_type = 'SingleTest'
00927                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00928                                                  target_name,
00929                                                  toolchain_name,
00930                                                  test_type,
00931                                                  test_id,
00932                                                  single_test_result,
00933                                                  single_test_output,
00934                                                  elapsed_time,
00935                                                  single_timeout,
00936                                                  test_index)
00937 
00938             # If we perform waterfall test we test until we get OK and we stop testing
00939             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
00940                 break
00941 
00942         if self.db_logger :
00943             self.db_logger .disconnect()
00944 
00945         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
00946                 target_name_unique,
00947                 toolchain_name,
00948                 test_id,
00949                 test_description,
00950                 round(elapsed_time, 2),
00951                 single_timeout,
00952                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
00953 
00954     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
00955         """ Function determines MUT's mbed disk/port and copies binary to
00956             target.
00957         """
00958         handle_results = []
00959         data = json.loads(test_spec)
00960 
00961         # Find a suitable MUT:
00962         mut = None
00963         for id, m in self.muts .iteritems():
00964             if m['mcu'] == data['mcu']:
00965                 mut = m
00966                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
00967                 handle_results.append(handle_result)
00968 
00969         return handle_results
00970 
00971     def print_test_result (self, test_result, target_name, toolchain_name,
00972                           test_id, test_description, elapsed_time, duration):
00973         """ Use specific convention to print test result and related data
00974         """
00975         tokens = []
00976         tokens.append("TargetTest")
00977         tokens.append(target_name)
00978         tokens.append(toolchain_name)
00979         tokens.append(test_id)
00980         tokens.append(test_description)
00981         separator = "::"
00982         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
00983         result = separator.join(tokens) + " [" + test_result +"]" + time_info
00984         return Fore.MAGENTA + result + Fore.RESET
00985 
00986     def shape_test_loop_ok_result_count (self, test_all_result):
00987         """ Reformats list of results to simple string
00988         """
00989         test_loop_count = len(test_all_result)
00990         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
00991         return "%d/%d"% (test_loop_ok_result, test_loop_count)
00992 
00993     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
00994         """ Reformats list of results to simple string
00995         """
00996         result = self.TEST_RESULT_FAIL 
00997 
00998         if all(test_all_result[0] == res for res in test_all_result):
00999             result = test_all_result[0]
01000         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01001             result = self.TEST_RESULT_OK 
01002 
01003         return result
01004 
01005     def run_host_test (self, name, image_path, disk, port, duration,
01006                       micro=None, reset=None, reset_tout=None,
01007                       verbose=False, copy_method=None, program_cycle_s=None):
01008         """ Function creates new process with host test configured with particular test case.
01009             Function also is pooling for serial port activity from process to catch all data
01010             printed by test runner and host test during test execution
01011         """
01012 
01013         def get_char_from_queue(obs):
01014             """ Get character from queue safe way
01015             """
01016             try:
01017                 c = obs.queue.get(block=True, timeout=0.5)
01018             except Empty, _:
01019                 c = None
01020             return c
01021 
01022         def filter_queue_char(c):
01023             """ Filters out non ASCII characters from serial port
01024             """
01025             if ord(c) not in range(128):
01026                 c = ' '
01027             return c
01028 
01029         def get_test_result(output):
01030             """ Parse test 'output' data
01031             """
01032             result = self.TEST_RESULT_TIMEOUT 
01033             for line in "".join(output).splitlines():
01034                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01035                 if search_result and len(search_result.groups()):
01036                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01037                     break
01038             return result
01039 
01040         def get_auto_property_value(property_name, line):
01041             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01042                 Returns string
01043             """
01044             result = None
01045             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01046                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01047                 if property is not None and len(property.groups()) == 1:
01048                     result = property.groups()[0]
01049             return result
01050 
01051         # print "{%s} port:%s disk:%s"  % (name, port, disk),
01052         cmd = ["python",
01053                '%s.py'% name,
01054                '-d', disk,
01055                '-f', '"%s"'% image_path,
01056                '-p', port,
01057                '-t', str(duration),
01058                '-C', str(program_cycle_s)]
01059 
01060         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01061             cmd += ['--auto']
01062 
01063         # Add extra parameters to host_test
01064         if copy_method is not None:
01065             cmd += ["-c", copy_method]
01066         if micro is not None:
01067             cmd += ["-m", micro]
01068         if reset is not None:
01069             cmd += ["-r", reset]
01070         if reset_tout is not None:
01071             cmd += ["-R", str(reset_tout)]
01072 
01073         if verbose:
01074             print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
01075             print "Test::Output::Start"
01076 
01077         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01078         obs = ProcessObserver(proc)
01079         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01080         line = ''
01081         output = []
01082         start_time = time()
01083         while (time() - start_time) < (2 * duration):
01084             c = get_char_from_queue(obs)
01085             if c:
01086                 if verbose:
01087                     sys.stdout.write(c)
01088                 c = filter_queue_char(c)
01089                 output.append(c)
01090                 # Give the mbed under test a way to communicate the end of the test
01091                 if c in ['\n', '\r']:
01092 
01093                     # Checking for auto-detection information from the test about MUT reset moment
01094                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01095                         # We will update this marker only once to prevent multiple time resets
01096                         update_once_flag['reset_target'] = True
01097                         start_time = time()
01098 
01099                     # Checking for auto-detection information from the test about timeout
01100                     auto_timeout_val = get_auto_property_value('timeout', line)
01101                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01102                         # We will update this marker only once to prevent multiple time resets
01103                         update_once_flag['timeout'] = True
01104                         duration = int(auto_timeout_val)
01105 
01106                     # Detect mbed assert:
01107                     if 'mbed assertation failed: ' in line:
01108                         output.append('{{mbed_assert}}')
01109                         break
01110 
01111                     # Check for test end
01112                     if '{end}' in line:
01113                         break
01114                     line = ''
01115                 else:
01116                     line += c
01117         end_time = time()
01118         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01119 
01120         c = get_char_from_queue(obs)
01121 
01122         if c:
01123             if verbose:
01124                 sys.stdout.write(c)
01125             c = filter_queue_char(c)
01126             output.append(c)
01127 
01128         if verbose:
01129             print "Test::Output::Finish"
01130         # Stop test process
01131         obs.stop()
01132 
01133         result = get_test_result(output)
01134         return (result, "".join(output), testcase_duration, duration)
01135 
01136     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01137         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01138         """
01139         if peripherals is not None:
01140             peripherals = set(peripherals)
01141         for id, mut in self.muts .iteritems():
01142             # Target MCU name check
01143             if mut["mcu"] != target_mcu_name:
01144                 continue
01145             # Peripherals check
01146             if peripherals is not None:
01147                 if 'peripherals' not in mut:
01148                     continue
01149                 if not peripherals.issubset(set(mut['peripherals'])):
01150                     continue
01151             return True
01152         return False
01153 
01154     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01155         """ Function prepares JSON structure describing test specification
01156         """
01157         test_spec = {
01158             "mcu": mcu,
01159             "image": image_path,
01160             "duration": duration,
01161             "test_id": test_id,
01162         }
01163         return json.dumps(test_spec)
01164 
01165 
01166 def get_unique_value_from_summary (test_summary, index):
01167     """ Gets list of unique target names
01168     """
01169     result = []
01170     for test in test_summary:
01171         target_name = test[index]
01172         if target_name not in result:
01173             result.append(target_name)
01174     return sorted(result)
01175 
01176 
01177 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01178     """ Gets list of unique target names and return dictionary
01179     """
01180     result = {}
01181     for test in test_summary:
01182         key = test[index_key]
01183         val = test[index_val]
01184         if key not in result:
01185             result[key] = val
01186     return result
01187 
01188 
01189 def show_json_file_format_error (json_spec_filename, line, column):
01190     """ Prints JSON broken content
01191     """
01192     with open(json_spec_filename) as data_file:
01193         line_no = 1
01194         for json_line in data_file:
01195             if line_no + 5 >= line: # Print last few lines before error
01196                 print 'Line %d:\t'%line_no + json_line, # Prints line
01197             if line_no == line:
01198                 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
01199                 break
01200             line_no += 1
01201 
01202 
01203 def json_format_error_defect_pos (json_error_msg):
01204     """ Gets first error line and column in JSON file format.
01205         Parsed from exception thrown by json.loads() string
01206     """
01207     result = None
01208     line, column = 0, 0
01209     # Line value search
01210     line_search = re.search('line [0-9]+', json_error_msg)
01211     if line_search is not None:
01212         ls = line_search.group().split(' ')
01213         if len(ls) == 2:
01214             line = int(ls[1])
01215             # Column position search
01216             column_search = re.search('column [0-9]+', json_error_msg)
01217             if column_search is not None:
01218                 cs = column_search.group().split(' ')
01219                 if len(cs) == 2:
01220                     column = int(cs[1])
01221                     result = [line, column]
01222     return result
01223 
01224 
01225 def get_json_data_from_file (json_spec_filename, verbose=False):
01226     """ Loads from file JSON formatted string to data structure
01227     """
01228     result = None
01229     try:
01230         with open(json_spec_filename) as data_file:
01231             try:
01232                 result = json.load(data_file)
01233             except ValueError as json_error_msg:
01234                 result = None
01235                 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
01236                 # We can print where error occurred inside JSON file if we can parse exception msg
01237                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01238                 if json_format_defect_pos is not None:
01239                     line = json_format_defect_pos[0]
01240                     column = json_format_defect_pos[1]
01241                     print
01242                     show_json_file_format_error(json_spec_filename, line, column)
01243 
01244     except IOError as fileopen_error_msg:
01245         print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
01246         print
01247     if verbose and result:
01248         pp = pprint.PrettyPrinter(indent=4)
01249         pp.pprint(result)
01250     return result
01251 
01252 
01253 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01254     """ Prints MUTs configuration passed to test script for verboseness
01255     """
01256     muts_info_cols = []
01257     # We need to check all unique properties for each defined MUT
01258     for k in json_data:
01259         mut_info = json_data[k]
01260         for mut_property in mut_info:
01261             if mut_property not in muts_info_cols:
01262                 muts_info_cols.append(mut_property)
01263 
01264     # Prepare pretty table object to display all MUTs
01265     pt_cols = ["index"] + muts_info_cols
01266     pt = PrettyTable(pt_cols)
01267     for col in pt_cols:
01268         pt.align[col] = "l"
01269 
01270     # Add rows to pretty print object
01271     for k in json_data:
01272         row = [k]
01273         mut_info = json_data[k]
01274 
01275         add_row = True
01276         if platform_filter and 'mcu' in mut_info:
01277             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01278         if add_row:
01279             for col in muts_info_cols:
01280                 cell_val = mut_info[col] if col in mut_info else None
01281                 if type(cell_val) == ListType:
01282                     cell_val = join_delim.join(cell_val)
01283                 row.append(cell_val)
01284             pt.add_row(row)
01285     return pt.get_string()
01286 
01287 
01288 def print_test_configuration_from_json (json_data, join_delim=", "):
01289     """ Prints test specification configuration passed to test script for verboseness
01290     """
01291     toolchains_info_cols = []
01292     # We need to check all toolchains for each device
01293     for k in json_data:
01294         # k should be 'targets'
01295         targets = json_data[k]
01296         for target in targets:
01297             toolchains = targets[target]
01298             for toolchain in toolchains:
01299                 if toolchain not in toolchains_info_cols:
01300                     toolchains_info_cols.append(toolchain)
01301 
01302     # Prepare pretty table object to display test specification
01303     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01304     pt = PrettyTable(pt_cols)
01305     for col in pt_cols:
01306         pt.align[col] = "l"
01307 
01308     # { target : [conflicted toolchains] }
01309     toolchain_conflicts = {}
01310     toolchain_path_conflicts = []
01311     for k in json_data:
01312         # k should be 'targets'
01313         targets = json_data[k]
01314         for target in targets:
01315             target_supported_toolchains = get_target_supported_toolchains(target)
01316             if not target_supported_toolchains:
01317                 target_supported_toolchains = []
01318             target_name = target if target in TARGET_MAP else "%s*"% target
01319             row = [target_name]
01320             toolchains = targets[target]
01321 
01322             for toolchain in sorted(toolchains_info_cols):
01323                 # Check for conflicts: target vs toolchain
01324                 conflict = False
01325                 conflict_path = False
01326                 if toolchain in toolchains:
01327                     if toolchain not in target_supported_toolchains:
01328                         conflict = True
01329                         if target not in toolchain_conflicts:
01330                             toolchain_conflicts[target] = []
01331                         toolchain_conflicts[target].append(toolchain)
01332                 # Add marker inside table about target usage / conflict
01333                 cell_val = 'Yes' if toolchain in toolchains else '-'
01334                 if conflict:
01335                     cell_val += '*'
01336                 # Check for conflicts: toolchain vs toolchain path
01337                 if toolchain in TOOLCHAIN_BIN_PATH:
01338                     toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
01339                     if not os.path.isdir(toolchain_path):
01340                         conflict_path = True
01341                         if toolchain not in toolchain_path_conflicts:
01342                             toolchain_path_conflicts.append(toolchain)
01343                 if conflict_path:
01344                     cell_val += '#'
01345                 row.append(cell_val)
01346             pt.add_row(row)
01347 
01348     # generate result string
01349     result = pt.get_string()    # Test specification table
01350     if toolchain_conflicts or toolchain_path_conflicts:
01351         result += "\n"
01352         result += "Toolchain conflicts:\n"
01353         for target in toolchain_conflicts:
01354             if target not in TARGET_MAP:
01355                 result += "\t* Target %s unknown\n"% (target)
01356             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01357             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01358             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01359 
01360         for toolchain in toolchain_path_conflicts:
01361         # Let's check toolchain configuration
01362             if toolchain in TOOLCHAIN_BIN_PATH:
01363                 toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
01364                 if not os.path.isdir(toolchain_path):
01365                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01366     return result
01367 
01368 
01369 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01370     """ Generates table summary with all test cases and additional test cases
01371         information using pretty print functionality. Allows test suite user to
01372         see test cases
01373     """
01374     # get all unique test ID prefixes
01375     unique_test_id = []
01376     for test in TESTS:
01377         split = test['id'].split('_')[:-1]
01378         test_id_prefix = '_'.join(split)
01379         if test_id_prefix not in unique_test_id:
01380             unique_test_id.append(test_id_prefix)
01381     unique_test_id.sort()
01382     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01383     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01384 
01385     test_properties = ['id',
01386                        'automated',
01387                        'description',
01388                        'peripherals',
01389                        'host_test',
01390                        'duration'] if cols is None else cols
01391 
01392     # All tests status table print
01393     pt = PrettyTable(test_properties)
01394     for col in test_properties:
01395         pt.align[col] = "l"
01396     pt.align['duration'] = "r"
01397 
01398     counter_all = 0
01399     counter_automated = 0
01400     pt.padding_width = 1 # One space between column edges and contents (default)
01401 
01402     for test_id in sorted(TEST_MAP.keys()):
01403         if platform_filter is not None:
01404             # FIlter out platforms using regex
01405             if re.search(platform_filter, test_id) is None:
01406                 continue
01407         row = []
01408         test = TEST_MAP[test_id]
01409         split = test_id.split('_')[:-1]
01410         test_id_prefix = '_'.join(split)
01411 
01412         for col in test_properties:
01413             col_value = test[col]
01414             if type(test[col]) == ListType:
01415                 col_value = join_delim.join(test[col])
01416             elif test[col] == None:
01417                 col_value = "-"
01418 
01419             row.append(col_value)
01420         if test['automated'] == True:
01421             counter_dict_test_id_types[test_id_prefix] += 1
01422             counter_automated += 1
01423         pt.add_row(row)
01424         # Update counters
01425         counter_all += 1
01426         counter_dict_test_id_types_all[test_id_prefix] += 1
01427     result = pt.get_string()
01428     result += "\n\n"
01429 
01430     if result_summary and not platform_filter:
01431         # Automation result summary
01432         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01433         pt = PrettyTable(test_id_cols)
01434         pt.align['automated'] = "r"
01435         pt.align['all'] = "r"
01436         pt.align['percent [%]'] = "r"
01437 
01438         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01439         str_progress = progress_bar(percent_progress, 75)
01440         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01441         result += "Automation coverage:\n"
01442         result += pt.get_string()
01443         result += "\n\n"
01444 
01445         # Test automation coverage table print
01446         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01447         pt = PrettyTable(test_id_cols)
01448         pt.align['id'] = "l"
01449         pt.align['automated'] = "r"
01450         pt.align['all'] = "r"
01451         pt.align['percent [%]'] = "r"
01452         for unique_id in unique_test_id:
01453             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01454             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01455             str_progress = progress_bar(percent_progress, 75)
01456             row = [unique_id,
01457                    counter_dict_test_id_types[unique_id],
01458                    counter_dict_test_id_types_all[unique_id],
01459                    percent_progress,
01460                    "[" + str_progress + "]"]
01461             pt.add_row(row)
01462         result += "Test automation coverage:\n"
01463         result += pt.get_string()
01464         result += "\n\n"
01465     return result
01466 
01467 
01468 def progress_bar (percent_progress, saturation=0):
01469     """ This function creates progress bar with optional simple saturation mark
01470     """
01471     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01472     str_progress = '#' * step + '.' * int(50 - step)
01473     c = '!' if str_progress[38] == '.' else '|'
01474     if saturation > 0:
01475         saturation = saturation / 2
01476         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01477     return str_progress
01478 
01479 
01480 def singletest_in_cli_mode (single_test):
01481     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01482 
01483         @return returns success code (0 == success) for building and running tests
01484     """
01485     start = time()
01486     # Execute tests depending on options and filter applied
01487     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01488     elapsed_time = time() - start
01489 
01490     # Human readable summary
01491     if not single_test.opts_suppress_summary:
01492         # prints well-formed summary with results (SQL table like)
01493         print single_test.generate_test_summary(test_summary, shuffle_seed)
01494     if single_test.opts_test_x_toolchain_summary:
01495         # prints well-formed summary with results (SQL table like)
01496         # table shows text x toolchain test result matrix
01497         print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
01498 
01499     print "Completed in %.2f sec"% (elapsed_time)
01500     print
01501     # Write summary of the builds
01502 
01503     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01504     status = print_report_exporter.report(build_report)
01505 
01506     # Store extra reports in files
01507     if single_test.opts_report_html_file_name:
01508         # Export results in form of HTML report to separate file
01509         report_exporter = ReportExporter(ResultExporterType.HTML)
01510         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01511     if single_test.opts_report_junit_file_name:
01512         # Export results in form of JUnit XML report to separate file
01513         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01514         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01515     if single_test.opts_report_build_file_name:
01516         # Export build results as html report to sparate file
01517         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01518         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01519 
01520     # Returns True if no build failures of the test projects or their dependencies
01521     return status
01522 
01523 class TestLogger ():
01524     """ Super-class for logging and printing ongoing events for test suite pass
01525     """
01526     def __init__ (self, store_log=True):
01527         """ We can control if logger actually stores log in memory
01528             or just handled all log entries immediately
01529         """
01530         self.log  = []
01531         self.log_to_file  = False
01532         self.log_file_name  = None
01533         self.store_log  = store_log
01534 
01535         self.LogType  = construct_enum(INFO='Info',
01536                                       WARN='Warning',
01537                                       NOTIF='Notification',
01538                                       ERROR='Error',
01539                                       EXCEPT='Exception')
01540 
01541         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01542                                             APPEND=2)    # Append to existing log file
01543 
01544     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01545         """ Log one line of text
01546         """
01547         log_timestamp = time()
01548         log_entry = {'log_type' : LogType,
01549                      'log_timestamp' : log_timestamp,
01550                      'log_line' : log_line,
01551                      '_future' : None
01552         }
01553         # Store log in memory
01554         if self.store_log :
01555             self.log .append(log_entry)
01556         return log_entry
01557 
01558 
01559 class CLITestLogger (TestLogger ):
01560     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01561     """
01562     def __init__(self, store_log=True, file_name=None):
01563         TestLogger.__init__(self)
01564         self.log_file_name  = file_name
01565         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01566         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01567 
01568     def log_print (self, log_entry, timestamp=True):
01569         """ Prints on screen formatted log entry
01570         """
01571         ts = log_entry['log_timestamp']
01572         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01573         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01574         return timestamp_str + log_line_str
01575 
01576     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01577         """ Logs line, if log file output was specified log line will be appended
01578             at the end of log file
01579         """
01580         log_entry = TestLogger.log_line(self, LogType, log_line)
01581         log_line_str = self.log_print (log_entry, timestamp)
01582         if self.log_file_name  is not None:
01583             try:
01584                 with open(self.log_file_name , 'a') as f:
01585                     f.write(log_line_str + line_delim)
01586             except IOError:
01587                 pass
01588         return log_line_str
01589 
01590 
01591 def factory_db_logger (db_url):
01592     """ Factory database driver depending on database type supplied in database connection string db_url
01593     """
01594     if db_url is not None:
01595         from tools.test_mysql import MySQLDBAccess
01596         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01597         if connection_info is not None:
01598             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01599             if db_type == 'mysql':
01600                 return MySQLDBAccess()
01601     return None
01602 
01603 
01604 def detect_database_verbose (db_url):
01605     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01606     """
01607     result = BaseDBAccess().parse_db_connection_string(db_url)
01608     if result is not None:
01609         # Parsing passed
01610         (db_type, username, password, host, db_name) = result
01611         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01612         # Let's try to connect
01613         db_ = factory_db_logger(db_url)
01614         if db_ is not None:
01615             print "Connecting to database '%s'..."% db_url,
01616             db_.connect(host, username, password, db_name)
01617             if db_.is_connected():
01618                 print "ok"
01619                 print "Detecting database..."
01620                 print db_.detect_database(verbose=True)
01621                 print "Disconnecting...",
01622                 db_.disconnect()
01623                 print "done"
01624         else:
01625             print "Database type '%s' unknown"% db_type
01626     else:
01627         print "Parse error: '%s' - DB Url error"% (db_url)
01628 
01629 
01630 def get_module_avail (module_name):
01631     """ This function returns True if module_name is already impored module
01632     """
01633     return module_name in sys.modules.keys()
01634 
01635 
01636 def get_autodetected_MUTS_list(platform_name_filter=None):
01637     oldError = None
01638     if os.name == 'nt':
01639         # Disable Windows error box temporarily
01640         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01641 
01642     mbeds = mbed_lstools.create()
01643     detect_muts_list = mbeds.list_mbeds()
01644 
01645     if os.name == 'nt':
01646         ctypes.windll.kernel32.SetErrorMode(oldError)
01647 
01648     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01649 
01650 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01651     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01652         If function fails to auto-detect devices it will return empty dictionary.
01653 
01654         if get_module_avail('mbed_lstools'):
01655             mbeds = mbed_lstools.create()
01656             mbeds_list = mbeds.list_mbeds()
01657 
01658         @param mbeds_list list of mbeds captured from mbed_lstools
01659         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01660     """
01661     result = {}   # Should be in muts_all.json format
01662     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01663     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01664     index = 1
01665     for mut in mbeds_list:
01666         # Filter the MUTS if a filter is specified
01667 
01668         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01669             continue
01670 
01671         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01672         # if not we  are creating our own unique value (last few chars from platform's target_id).
01673         m = {'mcu': mut['platform_name'],
01674              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01675              'port': mut['serial_port'],
01676              'disk': mut['mount_point'],
01677              'peripherals': []     # No peripheral detection
01678              }
01679         if index not in result:
01680             result[index] = {}
01681         result[index] = m
01682         index += 1
01683     return result
01684 
01685 
01686 def get_autodetected_TEST_SPEC (mbeds_list,
01687                                use_default_toolchain=True,
01688                                use_supported_toolchains=False,
01689                                toolchain_filter=None,
01690                                platform_name_filter=None):
01691     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01692         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01693 
01694         use_default_toolchain - if True add default toolchain to test_spec
01695         use_supported_toolchains - if True add all supported toolchains to test_spec
01696         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01697     """
01698     result = {'targets': {} }
01699 
01700     for mut in mbeds_list:
01701         mcu = mut['mcu']
01702         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01703             if mcu in TARGET_MAP:
01704                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01705                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01706 
01707                 # Decide which toolchains should be added to test specification toolchain pool for each target
01708                 toolchains = []
01709                 if use_default_toolchain:
01710                     toolchains.append(default_toolchain)
01711                 if use_supported_toolchains:
01712                     toolchains += supported_toolchains
01713                 if toolchain_filter is not None:
01714                     all_toolchains = supported_toolchains + [default_toolchain]
01715                     for toolchain in toolchain_filter.split(','):
01716                         if toolchain in all_toolchains:
01717                             toolchains.append(toolchain)
01718 
01719                 result['targets'][mcu] = list(set(toolchains))
01720     return result
01721 
01722 
01723 def get_default_test_options_parser ():
01724     """ Get common test script options used by CLI, web services etc.
01725     """
01726     parser = optparse.OptionParser()
01727     parser.add_option('-i', '--tests',
01728                       dest='test_spec_filename',
01729                       metavar="FILE",
01730                       help='Points to file with test specification')
01731 
01732     parser.add_option('-M', '--MUTS',
01733                       dest='muts_spec_filename',
01734                       metavar="FILE",
01735                       help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01736 
01737     parser.add_option("-j", "--jobs",
01738                       dest='jobs',
01739                       metavar="NUMBER",
01740                       type="int",
01741                       help="Define number of compilation jobs. Default value is 1")
01742 
01743     if get_module_avail('mbed_lstools'):
01744         # Additional features available when mbed_lstools is installed on host and imported
01745         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01746         parser.add_option('', '--auto',
01747                           dest='auto_detect',
01748                           metavar=False,
01749                           action="store_true",
01750                           help='Use mbed-ls module to detect all connected mbed devices')
01751 
01752         parser.add_option('', '--tc',
01753                           dest='toolchains_filter',
01754                           help="Toolchain filter for --auto option. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01755 
01756         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01757         parser.add_option('', '--oper',
01758                           dest='operability_checks',
01759                           help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01760 
01761     parser.add_option('', '--clean',
01762                       dest='clean',
01763                       metavar=False,
01764                       action="store_true",
01765                       help='Clean the build directory')
01766 
01767     parser.add_option('-P', '--only-peripherals',
01768                       dest='test_only_peripheral',
01769                       default=False,
01770                       action="store_true",
01771                       help='Test only peripheral declared for MUT and skip common tests')
01772 
01773     parser.add_option('-C', '--only-commons',
01774                       dest='test_only_common',
01775                       default=False,
01776                       action="store_true",
01777                       help='Test only board internals. Skip perpherials tests and perform common tests')
01778 
01779     parser.add_option('-n', '--test-by-names',
01780                       dest='test_by_names',
01781                       help='Runs only test enumerated it this switch. Use comma to separate test case names')
01782 
01783     parser.add_option('-p', '--peripheral-by-names',
01784                       dest='peripheral_by_names',
01785                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01786 
01787     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01788     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01789 
01790     parser.add_option('-c', '--copy-method',
01791                       dest='copy_method',
01792                       help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01793 
01794     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01795     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01796 
01797     parser.add_option('-r', '--reset-type',
01798                       dest='mut_reset_type',
01799                       default=None,
01800                       help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01801 
01802     parser.add_option('-g', '--goanna-for-tests',
01803                       dest='goanna_for_tests',
01804                       metavar=False,
01805                       action="store_true",
01806                       help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01807 
01808     parser.add_option('-G', '--goanna-for-sdk',
01809                       dest='goanna_for_mbed_sdk',
01810                       metavar=False,
01811                       action="store_true",
01812                       help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01813 
01814     parser.add_option('-s', '--suppress-summary',
01815                       dest='suppress_summary',
01816                       default=False,
01817                       action="store_true",
01818                       help='Suppresses display of wellformatted table with test results')
01819 
01820     parser.add_option('-t', '--test-summary',
01821                       dest='test_x_toolchain_summary',
01822                       default=False,
01823                       action="store_true",
01824                       help='Displays wellformatted table with test x toolchain test result per target')
01825 
01826     parser.add_option('-A', '--test-automation-report',
01827                       dest='test_automation_report',
01828                       default=False,
01829                       action="store_true",
01830                       help='Prints information about all tests and exits')
01831 
01832     parser.add_option('-R', '--test-case-report',
01833                       dest='test_case_report',
01834                       default=False,
01835                       action="store_true",
01836                       help='Prints information about all test cases and exits')
01837 
01838     parser.add_option("-S", "--supported-toolchains",
01839                       action="store_true",
01840                       dest="supported_toolchains",
01841                       default=False,
01842                       help="Displays supported matrix of MCUs and toolchains")
01843 
01844     parser.add_option("-O", "--only-build",
01845                       action="store_true",
01846                       dest="only_build_tests",
01847                       default=False,
01848                       help="Only build tests, skips actual test procedures (flashing etc.)")
01849 
01850     parser.add_option('', '--parallel',
01851                       dest='parallel_test_exec',
01852                       default=False,
01853                       action="store_true",
01854                       help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01855 
01856     parser.add_option('', '--config',
01857                       dest='verbose_test_configuration_only',
01858                       default=False,
01859                       action="store_true",
01860                       help='Displays full test specification and MUTs configration and exits')
01861 
01862     parser.add_option('', '--loops',
01863                       dest='test_loops_list',
01864                       help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01865 
01866     parser.add_option('', '--global-loops',
01867                       dest='test_global_loops_value',
01868                       help='Set global number of test loops per test. Default value is set 1')
01869 
01870     parser.add_option('', '--consolidate-waterfall',
01871                       dest='consolidate_waterfall_test',
01872                       default=False,
01873                       action="store_true",
01874                       help='Used with --waterfall option. Adds only one test to report reflecting outcome of waterfall test.')
01875 
01876     parser.add_option('-W', '--waterfall',
01877                       dest='waterfall_test',
01878                       default=False,
01879                       action="store_true",
01880                       help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed')
01881 
01882     parser.add_option('-N', '--firmware-name',
01883                       dest='firmware_global_name',
01884                       help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01885 
01886     parser.add_option('-u', '--shuffle',
01887                       dest='shuffle_test_order',
01888                       default=False,
01889                       action="store_true",
01890                       help='Shuffles test execution order')
01891 
01892     parser.add_option('', '--shuffle-seed',
01893                       dest='shuffle_test_seed',
01894                       default=None,
01895                       help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01896 
01897     parser.add_option('-f', '--filter',
01898                       dest='general_filter_regex',
01899                       default=None,
01900                       help='For some commands you can use filter to filter out results')
01901 
01902     parser.add_option('', '--inc-timeout',
01903                       dest='extend_test_timeout',
01904                       metavar="NUMBER",
01905                       type="int",
01906                       help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01907 
01908     parser.add_option('', '--db',
01909                       dest='db_url',
01910                       help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01911 
01912     parser.add_option('-l', '--log',
01913                       dest='log_file_name',
01914                       help='Log events to external file (note not all console entries may be visible in log file)')
01915 
01916     parser.add_option('', '--report-html',
01917                       dest='report_html_file_name',
01918                       help='You can log test suite results in form of HTML report')
01919 
01920     parser.add_option('', '--report-junit',
01921                       dest='report_junit_file_name',
01922                       help='You can log test suite results in form of JUnit compliant XML report')
01923 
01924     parser.add_option("", "--report-build",
01925                       dest="report_build_file_name",
01926                       help="Output the build results to a junit xml file")
01927 
01928     parser.add_option('', '--verbose-skipped',
01929                       dest='verbose_skipped_tests',
01930                       default=False,
01931                       action="store_true",
01932                       help='Prints some extra information about skipped tests')
01933 
01934     parser.add_option('-V', '--verbose-test-result',
01935                       dest='verbose_test_result_only',
01936                       default=False,
01937                       action="store_true",
01938                       help='Prints test serial output')
01939 
01940     parser.add_option('-v', '--verbose',
01941                       dest='verbose',
01942                       default=False,
01943                       action="store_true",
01944                       help='Verbose mode (prints some extra information)')
01945 
01946     parser.add_option('', '--version',
01947                       dest='version',
01948                       default=False,
01949                       action="store_true",
01950                       help='Prints script version and exits')
01951     return parser