Rtos API example

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 
00020 import os
00021 import re
00022 import sys
00023 import json
00024 import uuid
00025 import pprint
00026 import random
00027 import argparse
00028 import datetime
00029 import threading
00030 import ctypes
00031 from types import ListType
00032 from colorama import Fore, Back, Style
00033 from prettytable import PrettyTable
00034 from copy import copy
00035 
00036 from time import sleep, time
00037 from Queue import Queue, Empty
00038 from os.path import join, exists, basename, relpath
00039 from threading import Thread, Lock
00040 from multiprocessing import Pool, cpu_count
00041 from subprocess import Popen, PIPE
00042 
00043 # Imports related to mbed build api
00044 from tools.tests import TESTS
00045 from tools.tests import TEST_MAP
00046 from tools.paths import BUILD_DIR
00047 from tools.paths import HOST_TESTS
00048 from tools.utils import ToolException
00049 from tools.utils import NotSupportedException
00050 from tools.utils import construct_enum
00051 from tools.memap import MemapParser
00052 from tools.targets import TARGET_MAP
00053 import tools.test_configs as TestConfig
00054 from tools.test_db import BaseDBAccess
00055 from tools.build_api import build_project, build_mbed_libs, build_lib
00056 from tools.build_api import get_target_supported_toolchains
00057 from tools.build_api import write_build_report
00058 from tools.build_api import prep_report
00059 from tools.build_api import prep_properties
00060 from tools.build_api import create_result
00061 from tools.build_api import add_result_to_report
00062 from tools.build_api import prepare_toolchain
00063 from tools.build_api import scan_resources
00064 from tools.build_api import get_config
00065 from tools.libraries import LIBRARIES, LIBRARY_MAP
00066 from tools.options import extract_profile
00067 from tools.toolchains import TOOLCHAIN_PATHS
00068 from tools.toolchains import TOOLCHAINS
00069 from tools.test_exporters import ReportExporter, ResultExporterType
00070 from tools.utils import argparse_filestring_type
00071 from tools.utils import argparse_uppercase_type
00072 from tools.utils import argparse_lowercase_type
00073 from tools.utils import argparse_many
00074 from tools.utils import get_path_depth
00075 
00076 import tools.host_tests.host_tests_plugins as host_tests_plugins
00077 
00078 try:
00079     import mbed_lstools
00080     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00081 except:
00082     pass
00083 
00084 
00085 class ProcessObserver(Thread):
00086     def __init__(self, proc):
00087         Thread.__init__(self)
00088         self.proc = proc
00089         self.queue = Queue()
00090         self.daemon = True
00091         self.active = True
00092         self.start()
00093 
00094     def run(self):
00095         while self.active:
00096             c = self.proc.stdout.read(1)
00097             self.queue.put(c)
00098 
00099     def stop(self):
00100         self.active = False
00101         try:
00102             self.proc.terminate()
00103         except Exception, _:
00104             pass
00105 
00106 
00107 class SingleTestExecutor (threading.Thread):
00108     """ Example: Single test class in separate thread usage
00109     """
00110     def __init__(self, single_test):
00111         self.single_test  = single_test
00112         threading.Thread.__init__(self)
00113 
00114     def run(self):
00115         start = time()
00116         # Execute tests depending on options and filter applied
00117         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00118         elapsed_time = time() - start
00119 
00120         # Human readable summary
00121         if not self.single_test .opts_suppress_summary:
00122             # prints well-formed summary with results (SQL table like)
00123             print self.single_test .generate_test_summary(test_summary, shuffle_seed)
00124         if self.single_test .opts_test_x_toolchain_summary:
00125             # prints well-formed summary with results (SQL table like)
00126             # table shows text x toolchain test result matrix
00127             print self.single_test .generate_test_summary_by_target(test_summary, shuffle_seed)
00128         print "Completed in %.2f sec"% (elapsed_time)
00129 
00130 
00131 class SingleTestRunner (object):
00132     """ Object wrapper for single test run which may involve multiple MUTs
00133     """
00134     RE_DETECT_TESTCASE_RESULT = None
00135 
00136     # Return codes for test script
00137     TEST_RESULT_OK = "OK"
00138     TEST_RESULT_FAIL = "FAIL"
00139     TEST_RESULT_ERROR = "ERROR"
00140     TEST_RESULT_UNDEF = "UNDEF"
00141     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00142     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00143     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00144     TEST_RESULT_TIMEOUT = "TIMEOUT"
00145     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00146     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00147     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00148     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00149 
00150     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00151     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00152     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00153 
00154     muts = {} # MUTs descriptor (from external file)
00155     test_spec = {} # Test specification (from external file)
00156 
00157     # mbed test suite -> SingleTestRunner
00158     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00159                            "failure" : TEST_RESULT_FAIL,
00160                            "error" : TEST_RESULT_ERROR,
00161                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00162                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00163                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00164                            "timeout" : TEST_RESULT_TIMEOUT,
00165                            "no_image" : TEST_RESULT_NO_IMAGE,
00166                            "end" : TEST_RESULT_UNDEF,
00167                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00168                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00169                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00170     }
00171 
00172     def __init__ (self,
00173                  _global_loops_count=1,
00174                  _test_loops_list=None,
00175                  _muts={},
00176                  _clean=False,
00177                  _parser=None,
00178                  _opts=None,
00179                  _opts_db_url=None,
00180                  _opts_log_file_name=None,
00181                  _opts_report_html_file_name=None,
00182                  _opts_report_junit_file_name=None,
00183                  _opts_report_build_file_name=None,
00184                  _opts_report_text_file_name=None,
00185                  _opts_build_report={},
00186                  _opts_build_properties={},
00187                  _test_spec={},
00188                  _opts_goanna_for_mbed_sdk=None,
00189                  _opts_goanna_for_tests=None,
00190                  _opts_shuffle_test_order=False,
00191                  _opts_shuffle_test_seed=None,
00192                  _opts_test_by_names=None,
00193                  _opts_peripheral_by_names=None,
00194                  _opts_test_only_peripheral=False,
00195                  _opts_test_only_common=False,
00196                  _opts_verbose_skipped_tests=False,
00197                  _opts_verbose_test_result_only=False,
00198                  _opts_verbose=False,
00199                  _opts_firmware_global_name=None,
00200                  _opts_only_build_tests=False,
00201                  _opts_parallel_test_exec=False,
00202                  _opts_suppress_summary=False,
00203                  _opts_test_x_toolchain_summary=False,
00204                  _opts_copy_method=None,
00205                  _opts_mut_reset_type=None,
00206                  _opts_jobs=None,
00207                  _opts_waterfall_test=None,
00208                  _opts_consolidate_waterfall_test=None,
00209                  _opts_extend_test_timeout=None,
00210                  _opts_auto_detect=None,
00211                  _opts_include_non_automated=False):
00212         """ Let's try hard to init this object
00213         """
00214         from colorama import init
00215         init()
00216 
00217         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00218         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00219         # Settings related to test loops counters
00220         try:
00221             _global_loops_count = int(_global_loops_count)
00222         except:
00223             _global_loops_count = 1
00224         if _global_loops_count < 1:
00225             _global_loops_count = 1
00226         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00227         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00228         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00229 
00230         self.shuffle_random_seed  = 0.0
00231         self.SHUFFLE_SEED_ROUND  = 10
00232 
00233         # MUT list and test specification storage
00234         self.muts  = _muts
00235         self.test_spec  = _test_spec
00236 
00237         # Settings passed e.g. from command line
00238         self.opts_db_url  = _opts_db_url
00239         self.opts_log_file_name  = _opts_log_file_name
00240         self.opts_report_html_file_name  = _opts_report_html_file_name
00241         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00242         self.opts_report_build_file_name  = _opts_report_build_file_name
00243         self.opts_report_text_file_name  = _opts_report_text_file_name
00244         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00245         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00246         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00247         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00248         self.opts_test_by_names  = _opts_test_by_names
00249         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00250         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00251         self.opts_test_only_common  = _opts_test_only_common
00252         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00253         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00254         self.opts_verbose  = _opts_verbose
00255         self.opts_firmware_global_name  = _opts_firmware_global_name
00256         self.opts_only_build_tests  = _opts_only_build_tests
00257         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00258         self.opts_suppress_summary  = _opts_suppress_summary
00259         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00260         self.opts_copy_method  = _opts_copy_method
00261         self.opts_mut_reset_type  = _opts_mut_reset_type
00262         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00263         self.opts_waterfall_test  = _opts_waterfall_test
00264         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00265         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00266         self.opts_clean  = _clean
00267         self.opts_parser  = _parser
00268         self.opts  = _opts
00269         self.opts_auto_detect  = _opts_auto_detect
00270         self.opts_include_non_automated  = _opts_include_non_automated
00271 
00272         self.build_report  = _opts_build_report
00273         self.build_properties  = _opts_build_properties
00274 
00275         # File / screen logger initialization
00276         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00277 
00278         # Database related initializations
00279         self.db_logger  = factory_db_logger(self.opts_db_url )
00280         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00281         # Let's connect to database to set up credentials and confirm database is ready
00282         if self.db_logger :
00283             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00284             if self.db_logger .is_connected():
00285                 # Get hostname and uname so we can use it as build description
00286                 # when creating new build_id in external database
00287                 (_hostname, _uname) = self.db_logger .get_hostname()
00288                 _host_location = os.path.dirname(os.path.abspath(__file__))
00289                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00290                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00291                 self.db_logger .disconnect()
00292 
00293     def dump_options (self):
00294         """ Function returns data structure with common settings passed to SingelTestRunner
00295             It can be used for example to fill _extra fields in database storing test suite single run data
00296             Example:
00297             data = self.dump_options()
00298             or
00299             data_str = json.dumps(self.dump_options())
00300         """
00301         result = {"db_url" : str(self.opts_db_url ),
00302                   "log_file_name" :  str(self.opts_log_file_name ),
00303                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00304                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00305                   "test_by_names" :  str(self.opts_test_by_names ),
00306                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00307                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00308                   "test_only_common" :  str(self.opts_test_only_common ),
00309                   "verbose" :  str(self.opts_verbose ),
00310                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00311                   "only_build_tests" :  str(self.opts_only_build_tests ),
00312                   "copy_method" :  str(self.opts_copy_method ),
00313                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00314                   "jobs" :  str(self.opts_jobs ),
00315                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00316                   "_dummy" : ''
00317         }
00318         return result
00319 
00320     def shuffle_random_func(self):
00321         return self.shuffle_random_seed 
00322 
00323     def is_shuffle_seed_float (self):
00324         """ return true if function parameter can be converted to float
00325         """
00326         result = True
00327         try:
00328             float(self.shuffle_random_seed )
00329         except ValueError:
00330             result = False
00331         return result
00332 
00333     # This will store target / toolchain specific properties
00334     test_suite_properties_ext = {}  # target : toolchain
00335     # Here we store test results
00336     test_summary = []
00337     # Here we store test results in extended data structure
00338     test_summary_ext = {}
00339     execute_thread_slice_lock = Lock()
00340 
00341     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00342         for toolchain in toolchains:
00343             tt_id = "%s::%s" % (toolchain, target)
00344 
00345             T = TARGET_MAP[target]
00346 
00347             # print target, toolchain
00348             # Test suite properties returned to external tools like CI
00349             test_suite_properties = {
00350                 'jobs': self.opts_jobs ,
00351                 'clean': clean,
00352                 'target': target,
00353                 'vendor': T.extra_labels[0],
00354                 'test_ids': ', '.join(test_ids),
00355                 'toolchain': toolchain,
00356                 'shuffle_random_seed': self.shuffle_random_seed 
00357             }
00358 
00359 
00360             # print '=== %s::%s ===' % (target, toolchain)
00361             # Let's build our test
00362             if target not in TARGET_MAP:
00363                 print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
00364                 continue
00365 
00366             clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk  or clean or self.opts_clean  else None
00367 
00368             profile = extract_profile(self.opts_parser , self.opts , toolchain)
00369             stats_depth = self.opts .stats_depth or 2
00370 
00371 
00372             try:
00373                 build_mbed_libs_result = build_mbed_libs(T,
00374                                                          toolchain,
00375                                                          clean=clean_mbed_libs_options,
00376                                                          verbose=self.opts_verbose ,
00377                                                          jobs=self.opts_jobs ,
00378                                                          report=build_report,
00379                                                          properties=build_properties,
00380                                                          build_profile=profile)
00381 
00382                 if not build_mbed_libs_result:
00383                     print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
00384                     continue
00385 
00386             except ToolException:
00387                 print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
00388                 continue
00389 
00390             build_dir = join(BUILD_DIR, "test", target, toolchain)
00391 
00392             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00393             test_suite_properties['build_dir'] = build_dir
00394             test_suite_properties['skipped'] = []
00395 
00396             # Enumerate through all tests and shuffle test order if requested
00397             test_map_keys = sorted(TEST_MAP.keys())
00398 
00399             if self.opts_shuffle_test_order :
00400                 random.shuffle(test_map_keys, self.shuffle_random_func )
00401                 # Update database with shuffle seed f applicable
00402                 if self.db_logger :
00403                     self.db_logger .reconnect();
00404                     if self.db_logger .is_connected():
00405                         self.db_logger .update_build_id_info(self.db_logger_build_id , _shuffle_seed=self.shuffle_random_func ())
00406                         self.db_logger .disconnect();
00407 
00408             if self.db_logger :
00409                 self.db_logger .reconnect();
00410                 if self.db_logger .is_connected():
00411                     # Update MUTs and Test Specification in database
00412                     self.db_logger .update_build_id_info(self.db_logger_build_id , _muts=self.muts , _test_spec=self.test_spec )
00413                     # Update Extra information in database (some options passed to test suite)
00414                     self.db_logger .update_build_id_info(self.db_logger_build_id , _extra=json.dumps(self.dump_options ()))
00415                     self.db_logger .disconnect();
00416 
00417             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00418             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00419 
00420             for skipped_test_id in skipped_test_map_keys:
00421                 test_suite_properties['skipped'].append(skipped_test_id)
00422 
00423 
00424             # First pass through all tests and determine which libraries need to be built
00425             libraries = []
00426             for test_id in valid_test_map_keys:
00427                 test = TEST_MAP[test_id]
00428 
00429                 # Detect which lib should be added to test
00430                 # Some libs have to compiled like RTOS or ETH
00431                 for lib in LIBRARIES:
00432                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00433                         libraries.append(lib['id'])
00434 
00435 
00436             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00437 
00438             # Build all required libraries
00439             for lib_id in libraries:
00440                 try:
00441                     build_lib(lib_id,
00442                               T,
00443                               toolchain,
00444                               verbose=self.opts_verbose ,
00445                               clean=clean_mbed_libs_options,
00446                               jobs=self.opts_jobs ,
00447                               report=build_report,
00448                               properties=build_properties,
00449                               build_profile=profile)
00450 
00451                 except ToolException:
00452                     print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building library %s'% (lib_id))
00453                     continue
00454 
00455 
00456             for test_id in valid_test_map_keys:
00457                 test = TEST_MAP[test_id]
00458 
00459                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00460 
00461                 # TODO: move this 2 below loops to separate function
00462                 INC_DIRS = []
00463                 for lib_id in libraries:
00464                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00465                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00466 
00467                 MACROS = []
00468                 for lib_id in libraries:
00469                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00470                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00471                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00472                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00473                 test_uuid = uuid.uuid4()
00474                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00475 
00476                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00477                 if target not in self.test_summary_ext :
00478                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00479                 if toolchain not in self.test_summary_ext [target]:
00480                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00481 
00482                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00483 
00484                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00485                 try:
00486                     path = build_project(test.source_dir, join(build_dir, test_id), T,
00487                         toolchain, test.dependencies, clean=clean_project_options,
00488                         verbose=self.opts_verbose , name=project_name, macros=MACROS,
00489                         inc_dirs=INC_DIRS, jobs=self.opts_jobs , report=build_report,
00490                         properties=build_properties, project_id=test_id,
00491                         project_description=test.get_description(),
00492                         build_profile=profile, stats_depth=stats_depth)
00493 
00494                 except Exception, e:
00495                     project_name_str = project_name if project_name is not None else test_id
00496 
00497 
00498                     test_result = self.TEST_RESULT_FAIL 
00499 
00500                     if isinstance(e, ToolException):
00501                         print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
00502                         test_result = self.TEST_RESULT_BUILD_FAILED 
00503                     elif isinstance(e, NotSupportedException):
00504                         print self.logger .log_line(self.logger .LogType.INFO, 'The project %s is not supported'% (project_name_str))
00505                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00506 
00507 
00508                     # Append test results to global test summary
00509                     self.test_summary .append(
00510                         (test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
00511                     )
00512 
00513                     # Add detailed test result to test summary structure
00514                     if test_id not in self.test_summary_ext [target][toolchain]:
00515                         self.test_summary_ext [target][toolchain][test_id] = []
00516 
00517                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00518                         'result' : test_result,
00519                         'output' : '',
00520                         'target_name' : target,
00521                         'target_name_unique': target,
00522                         'toolchain_name' : toolchain,
00523                         'id' : test_id,
00524                         'description' : test.get_description(),
00525                         'elapsed_time' : 0,
00526                         'duration' : 0,
00527                         'copy_method' : None
00528                     }})
00529                     continue
00530 
00531                 if self.opts_only_build_tests :
00532                     # With this option we are skipping testing phase
00533                     continue
00534 
00535                 # Test duration can be increased by global value
00536                 test_duration = test.duration
00537                 if self.opts_extend_test_timeout  is not None:
00538                     test_duration += self.opts_extend_test_timeout 
00539 
00540                 # For an automated test the duration act as a timeout after
00541                 # which the test gets interrupted
00542                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00543                 test_loops = self.get_test_loop_count (test_id)
00544 
00545                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00546                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00547                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00548 
00549                 # read MUTs, test specification and perform tests
00550                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00551 
00552                 if handle_results is None:
00553                     continue
00554 
00555                 for handle_result in handle_results:
00556                     if handle_result:
00557                         single_test_result, detailed_test_results = handle_result
00558                     else:
00559                         continue
00560 
00561                     # Append test results to global test summary
00562                     if single_test_result is not None:
00563                         self.test_summary .append(single_test_result)
00564 
00565                     # Add detailed test result to test summary structure
00566                     if target not in self.test_summary_ext [target][toolchain]:
00567                         if test_id not in self.test_summary_ext [target][toolchain]:
00568                             self.test_summary_ext [target][toolchain][test_id] = []
00569 
00570                         append_test_result = detailed_test_results
00571 
00572                         # If waterfall and consolidate-waterfall options are enabled,
00573                         # only include the last test result in the report.
00574                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00575                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00576 
00577                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00578 
00579             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00580             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00581 
00582         q.put(target + '_'.join(toolchains))
00583         return
00584 
00585     def execute(self):
00586         clean = self.test_spec .get('clean', False)
00587         test_ids = self.test_spec .get('test_ids', [])
00588         q = Queue()
00589 
00590         # Generate seed for shuffle if seed is not provided in
00591         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00592         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00593             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00594 
00595 
00596         if self.opts_parallel_test_exec :
00597             ###################################################################
00598             # Experimental, parallel test execution per singletest instance.
00599             ###################################################################
00600             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00601             # Note: We are building here in parallel for each target separately!
00602             # So we are not building the same thing multiple times and compilers
00603             # in separate threads do not collide.
00604             # Inside execute_thread_slice() function function handle() will be called to
00605             # get information about available MUTs (per target).
00606             for target, toolchains in self.test_spec ['targets'].iteritems():
00607                 self.test_suite_properties_ext [target] = {}
00608                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00609                 t.daemon = True
00610                 t.start()
00611                 execute_threads.append(t)
00612 
00613             for t in execute_threads:
00614                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00615         else:
00616             # Serialized (not parallel) test execution
00617             for target, toolchains in self.test_spec ['targets'].iteritems():
00618                 if target not in self.test_suite_properties_ext :
00619                     self.test_suite_properties_ext [target] = {}
00620 
00621                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00622                 q.get()
00623 
00624         if self.db_logger :
00625             self.db_logger .reconnect();
00626             if self.db_logger .is_connected():
00627                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00628                 self.db_logger .disconnect();
00629 
00630         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00631 
00632     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00633         valid_test_map_keys = []
00634 
00635         for test_id in test_map_keys:
00636             test = TEST_MAP[test_id]
00637             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00638                 continue
00639 
00640             if test_ids and test_id not in test_ids:
00641                 continue
00642 
00643             if self.opts_test_only_peripheral  and not test.peripherals:
00644                 if self.opts_verbose_skipped_tests :
00645                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00646                 continue
00647 
00648             if self.opts_peripheral_by_names  and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names ]):
00649                 # We will skip tests not forced with -p option
00650                 if self.opts_verbose_skipped_tests :
00651                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00652                 continue
00653 
00654             if self.opts_test_only_common  and test.peripherals:
00655                 if self.opts_verbose_skipped_tests :
00656                     print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral test skipped for target %s'% (target))
00657                 continue
00658 
00659             if not include_non_automated and not test.automated:
00660                 if self.opts_verbose_skipped_tests :
00661                     print self.logger .log_line(self.logger .LogType.INFO, 'Non automated test skipped for target %s'% (target))
00662                 continue
00663 
00664             if test.is_supported(target, toolchain):
00665                 if test.peripherals is None and self.opts_only_build_tests :
00666                     # When users are using 'build only flag' and test do not have
00667                     # specified peripherals we can allow test building by default
00668                     pass
00669                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00670                     # If we force peripheral with option -p we expect test
00671                     # to pass even if peripheral is not in MUTs file.
00672                     pass
00673                 elif not self.is_peripherals_available (target, test.peripherals):
00674                     if self.opts_verbose_skipped_tests :
00675                         if test.peripherals:
00676                             print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
00677                         else:
00678                             print self.logger .log_line(self.logger .LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
00679                     continue
00680 
00681                 # The test has made it through all the filters, so add it to the valid tests list
00682                 valid_test_map_keys.append(test_id)
00683 
00684         return valid_test_map_keys
00685 
00686     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00687         # NOTE: This will not preserve order
00688         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00689 
00690     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00691         """ Prints well-formed summary with results (SQL table like)
00692             table shows text x toolchain test result matrix
00693         """
00694         RESULT_INDEX = 0
00695         TARGET_INDEX = 1
00696         TOOLCHAIN_INDEX = 2
00697         TEST_INDEX = 3
00698         DESC_INDEX = 4
00699 
00700         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00701         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00702         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00703         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00704 
00705         result = "Test summary:\n"
00706         for target in unique_targets:
00707             result_dict = {} # test : { toolchain : result }
00708             unique_target_toolchains = []
00709             for test in test_summary:
00710                 if test[TARGET_INDEX] == target:
00711                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00712                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00713                     if test[TEST_INDEX] not in result_dict:
00714                         result_dict[test[TEST_INDEX]] = {}
00715                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00716 
00717             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00718             pt = PrettyTable(pt_cols)
00719             for col in pt_cols:
00720                 pt.align[col] = "l"
00721             pt.padding_width = 1 # One space between column edges and contents (default)
00722 
00723             for test in unique_tests:
00724                 if test in result_dict:
00725                     test_results = result_dict[test]
00726                     if test in unique_test_desc:
00727                         row = [target, test, unique_test_desc[test]]
00728                         for toolchain in unique_toolchains:
00729                             if toolchain in test_results:
00730                                 row.append(test_results[toolchain])
00731                         pt.add_row(row)
00732             result += pt.get_string()
00733             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00734                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00735             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00736         return result
00737 
00738     def generate_test_summary (self, test_summary, shuffle_seed=None):
00739         """ Prints well-formed summary with results (SQL table like)
00740             table shows target x test results matrix across
00741         """
00742         success_code = 0    # Success code that can be leter returned to
00743         result = "Test summary:\n"
00744         # Pretty table package is used to print results
00745         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00746                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00747         pt.align["Result"] = "l" # Left align
00748         pt.align["Target"] = "l" # Left align
00749         pt.align["Toolchain"] = "l" # Left align
00750         pt.align["Test ID"] = "l" # Left align
00751         pt.align["Test Description"] = "l" # Left align
00752         pt.padding_width = 1 # One space between column edges and contents (default)
00753 
00754         result_dict = {self.TEST_RESULT_OK  : 0,
00755                        self.TEST_RESULT_FAIL  : 0,
00756                        self.TEST_RESULT_ERROR  : 0,
00757                        self.TEST_RESULT_UNDEF  : 0,
00758                        self.TEST_RESULT_IOERR_COPY  : 0,
00759                        self.TEST_RESULT_IOERR_DISK  : 0,
00760                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00761                        self.TEST_RESULT_NO_IMAGE  : 0,
00762                        self.TEST_RESULT_TIMEOUT  : 0,
00763                        self.TEST_RESULT_MBED_ASSERT  : 0,
00764                        self.TEST_RESULT_BUILD_FAILED  : 0,
00765                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00766         }
00767 
00768         for test in test_summary:
00769             if test[0] in result_dict:
00770                 result_dict[test[0]] += 1
00771             pt.add_row(test)
00772         result += pt.get_string()
00773         result += "\n"
00774 
00775         # Print result count
00776         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
00777         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00778                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00779         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00780         return result
00781 
00782     def test_loop_list_to_dict (self, test_loops_str):
00783         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00784         """
00785         result = {}
00786         if test_loops_str:
00787             test_loops = test_loops_str
00788             for test_loop in test_loops:
00789                 test_loop_count = test_loop.split('=')
00790                 if len(test_loop_count) == 2:
00791                     _test_id, _test_loops = test_loop_count
00792                     try:
00793                         _test_loops = int(_test_loops)
00794                     except:
00795                         continue
00796                     result[_test_id] = _test_loops
00797         return result
00798 
00799     def get_test_loop_count (self, test_id):
00800         """ This function returns no. of loops per test (deducted by test_id_.
00801             If test is not in list of redefined loop counts it will use default value.
00802         """
00803         result = self.GLOBAL_LOOPS_COUNT 
00804         if test_id in self.TEST_LOOPS_DICT :
00805             result = self.TEST_LOOPS_DICT [test_id]
00806         return result
00807 
00808     def delete_file (self, file_path):
00809         """ Remove file from the system
00810         """
00811         result = True
00812         resutl_msg = ""
00813         try:
00814             os.remove(file_path)
00815         except Exception, e:
00816             resutl_msg = e
00817             result = False
00818         return result, resutl_msg
00819 
00820     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00821         """ Test is being invoked for given MUT.
00822         """
00823         # Get test information, image and test timeout
00824         test_id = data['test_id']
00825         test = TEST_MAP[test_id]
00826         test_description = TEST_MAP[test_id].get_description()
00827         image = data["image"]
00828         duration = data.get("duration", 10)
00829 
00830         if mut is None:
00831             print "Error: No Mbed available: MUT[%s]" % data['mcu']
00832             return None
00833 
00834         mcu = mut['mcu']
00835         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00836 
00837         if self.db_logger :
00838             self.db_logger .reconnect()
00839 
00840         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00841 
00842         # Tests can be looped so test results must be stored for the same test
00843         test_all_result = []
00844         # Test results for one test ran few times
00845         detailed_test_results = {}  # { Loop_number: { results ... } }
00846 
00847         for test_index in range(test_loops):
00848 
00849             # If mbedls is available and we are auto detecting MUT info,
00850             # update MUT info (mounting may changed)
00851             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00852                 platform_name_filter = [mcu]
00853                 muts_list = {}
00854                 found = False
00855 
00856                 for i in range(0, 60):
00857                     print('Looking for %s with MBEDLS' % mcu)
00858                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00859 
00860                     if 1 not in muts_list:
00861                         sleep(3)
00862                     else:
00863                         found = True
00864                         break
00865 
00866                 if not found:
00867                     print "Error: mbed not found with MBEDLS: %s" % data['mcu']
00868                     return None
00869                 else:
00870                     mut = muts_list[1]
00871 
00872             disk = mut.get('disk')
00873             port = mut.get('port')
00874 
00875             if disk is None or port is None:
00876                 return None
00877 
00878             target_by_mcu = TARGET_MAP[mut['mcu']]
00879             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00880             # Some extra stuff can be declared in MUTs structure
00881             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00882             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00883 
00884             # When the build and test system were separate, this was relative to a
00885             # base network folder base path: join(NETWORK_BASE_PATH, )
00886             image_path = image
00887 
00888             # Host test execution
00889             start_host_exec_time = time()
00890 
00891             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00892             _copy_method = selected_copy_method
00893 
00894             if not exists(image_path):
00895                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00896                 elapsed_time = 0
00897                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00898                 print single_test_output
00899             else:
00900                 # Host test execution
00901                 start_host_exec_time = time()
00902 
00903                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00904                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00905                 host_test_result = self.run_host_test (test.host_test,
00906                                                       image_path, disk, port, duration,
00907                                                       micro=target_name,
00908                                                       verbose=host_test_verbose,
00909                                                       reset=host_test_reset,
00910                                                       reset_tout=reset_tout,
00911                                                       copy_method=selected_copy_method,
00912                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00913                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00914 
00915             # Store test result
00916             test_all_result.append(single_test_result)
00917             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00918             elapsed_time = single_testduration  # TIme of single test case execution after reset
00919 
00920             detailed_test_results[test_index] = {
00921                 'result' : single_test_result,
00922                 'output' : single_test_output,
00923                 'target_name' : target_name,
00924                 'target_name_unique' : target_name_unique,
00925                 'toolchain_name' : toolchain_name,
00926                 'id' : test_id,
00927                 'description' : test_description,
00928                 'elapsed_time' : round(elapsed_time, 2),
00929                 'duration' : single_timeout,
00930                 'copy_method' : _copy_method,
00931             }
00932 
00933             print self.print_test_result (single_test_result, target_name_unique, toolchain_name,
00934                                          test_id, test_description, elapsed_time, single_timeout)
00935 
00936             # Update database entries for ongoing test
00937             if self.db_logger  and self.db_logger .is_connected():
00938                 test_type = 'SingleTest'
00939                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00940                                                  target_name,
00941                                                  toolchain_name,
00942                                                  test_type,
00943                                                  test_id,
00944                                                  single_test_result,
00945                                                  single_test_output,
00946                                                  elapsed_time,
00947                                                  single_timeout,
00948                                                  test_index)
00949 
00950             # If we perform waterfall test we test until we get OK and we stop testing
00951             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
00952                 break
00953 
00954         if self.db_logger :
00955             self.db_logger .disconnect()
00956 
00957         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
00958                 target_name_unique,
00959                 toolchain_name,
00960                 test_id,
00961                 test_description,
00962                 round(elapsed_time, 2),
00963                 single_timeout,
00964                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
00965 
00966     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
00967         """ Function determines MUT's mbed disk/port and copies binary to
00968             target.
00969         """
00970         handle_results = []
00971         data = json.loads(test_spec)
00972 
00973         # Find a suitable MUT:
00974         mut = None
00975         for id, m in self.muts .iteritems():
00976             if m['mcu'] == data['mcu']:
00977                 mut = m
00978                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
00979                 handle_results.append(handle_result)
00980 
00981         return handle_results
00982 
00983     def print_test_result (self, test_result, target_name, toolchain_name,
00984                           test_id, test_description, elapsed_time, duration):
00985         """ Use specific convention to print test result and related data
00986         """
00987         tokens = []
00988         tokens.append("TargetTest")
00989         tokens.append(target_name)
00990         tokens.append(toolchain_name)
00991         tokens.append(test_id)
00992         tokens.append(test_description)
00993         separator = "::"
00994         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
00995         result = separator.join(tokens) + " [" + test_result +"]" + time_info
00996         return Fore.MAGENTA + result + Fore.RESET
00997 
00998     def shape_test_loop_ok_result_count (self, test_all_result):
00999         """ Reformats list of results to simple string
01000         """
01001         test_loop_count = len(test_all_result)
01002         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01003         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01004 
01005     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01006         """ Reformats list of results to simple string
01007         """
01008         result = self.TEST_RESULT_FAIL 
01009 
01010         if all(test_all_result[0] == res for res in test_all_result):
01011             result = test_all_result[0]
01012         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01013             result = self.TEST_RESULT_OK 
01014 
01015         return result
01016 
01017     def run_host_test (self, name, image_path, disk, port, duration,
01018                       micro=None, reset=None, reset_tout=None,
01019                       verbose=False, copy_method=None, program_cycle_s=None):
01020         """ Function creates new process with host test configured with particular test case.
01021             Function also is pooling for serial port activity from process to catch all data
01022             printed by test runner and host test during test execution
01023         """
01024 
01025         def get_char_from_queue(obs):
01026             """ Get character from queue safe way
01027             """
01028             try:
01029                 c = obs.queue.get(block=True, timeout=0.5)
01030             except Empty, _:
01031                 c = None
01032             return c
01033 
01034         def filter_queue_char(c):
01035             """ Filters out non ASCII characters from serial port
01036             """
01037             if ord(c) not in range(128):
01038                 c = ' '
01039             return c
01040 
01041         def get_test_result(output):
01042             """ Parse test 'output' data
01043             """
01044             result = self.TEST_RESULT_TIMEOUT 
01045             for line in "".join(output).splitlines():
01046                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01047                 if search_result and len(search_result.groups()):
01048                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01049                     break
01050             return result
01051 
01052         def get_auto_property_value(property_name, line):
01053             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01054                 Returns string
01055             """
01056             result = None
01057             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01058                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01059                 if property is not None and len(property.groups()) == 1:
01060                     result = property.groups()[0]
01061             return result
01062 
01063         # print "{%s} port:%s disk:%s"  % (name, port, disk),
01064         cmd = ["python",
01065                '%s.py'% name,
01066                '-d', disk,
01067                '-f', '"%s"'% image_path,
01068                '-p', port,
01069                '-t', str(duration),
01070                '-C', str(program_cycle_s)]
01071 
01072         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01073             cmd += ['--auto']
01074 
01075         # Add extra parameters to host_test
01076         if copy_method is not None:
01077             cmd += ["-c", copy_method]
01078         if micro is not None:
01079             cmd += ["-m", micro]
01080         if reset is not None:
01081             cmd += ["-r", reset]
01082         if reset_tout is not None:
01083             cmd += ["-R", str(reset_tout)]
01084 
01085         if verbose:
01086             print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
01087             print "Test::Output::Start"
01088 
01089         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01090         obs = ProcessObserver(proc)
01091         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01092         line = ''
01093         output = []
01094         start_time = time()
01095         while (time() - start_time) < (2 * duration):
01096             c = get_char_from_queue(obs)
01097             if c:
01098                 if verbose:
01099                     sys.stdout.write(c)
01100                 c = filter_queue_char(c)
01101                 output.append(c)
01102                 # Give the mbed under test a way to communicate the end of the test
01103                 if c in ['\n', '\r']:
01104 
01105                     # Checking for auto-detection information from the test about MUT reset moment
01106                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01107                         # We will update this marker only once to prevent multiple time resets
01108                         update_once_flag['reset_target'] = True
01109                         start_time = time()
01110 
01111                     # Checking for auto-detection information from the test about timeout
01112                     auto_timeout_val = get_auto_property_value('timeout', line)
01113                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01114                         # We will update this marker only once to prevent multiple time resets
01115                         update_once_flag['timeout'] = True
01116                         duration = int(auto_timeout_val)
01117 
01118                     # Detect mbed assert:
01119                     if 'mbed assertation failed: ' in line:
01120                         output.append('{{mbed_assert}}')
01121                         break
01122 
01123                     # Check for test end
01124                     if '{end}' in line:
01125                         break
01126                     line = ''
01127                 else:
01128                     line += c
01129         end_time = time()
01130         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01131 
01132         c = get_char_from_queue(obs)
01133 
01134         if c:
01135             if verbose:
01136                 sys.stdout.write(c)
01137             c = filter_queue_char(c)
01138             output.append(c)
01139 
01140         if verbose:
01141             print "Test::Output::Finish"
01142         # Stop test process
01143         obs.stop()
01144 
01145         result = get_test_result(output)
01146         return (result, "".join(output), testcase_duration, duration)
01147 
01148     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01149         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01150         """
01151         if peripherals is not None:
01152             peripherals = set(peripherals)
01153         for id, mut in self.muts .iteritems():
01154             # Target MCU name check
01155             if mut["mcu"] != target_mcu_name:
01156                 continue
01157             # Peripherals check
01158             if peripherals is not None:
01159                 if 'peripherals' not in mut:
01160                     continue
01161                 if not peripherals.issubset(set(mut['peripherals'])):
01162                     continue
01163             return True
01164         return False
01165 
01166     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01167         """ Function prepares JSON structure describing test specification
01168         """
01169         test_spec = {
01170             "mcu": mcu,
01171             "image": image_path,
01172             "duration": duration,
01173             "test_id": test_id,
01174         }
01175         return json.dumps(test_spec)
01176 
01177 
01178 def get_unique_value_from_summary (test_summary, index):
01179     """ Gets list of unique target names
01180     """
01181     result = []
01182     for test in test_summary:
01183         target_name = test[index]
01184         if target_name not in result:
01185             result.append(target_name)
01186     return sorted(result)
01187 
01188 
01189 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01190     """ Gets list of unique target names and return dictionary
01191     """
01192     result = {}
01193     for test in test_summary:
01194         key = test[index_key]
01195         val = test[index_val]
01196         if key not in result:
01197             result[key] = val
01198     return result
01199 
01200 
01201 def show_json_file_format_error (json_spec_filename, line, column):
01202     """ Prints JSON broken content
01203     """
01204     with open(json_spec_filename) as data_file:
01205         line_no = 1
01206         for json_line in data_file:
01207             if line_no + 5 >= line: # Print last few lines before error
01208                 print 'Line %d:\t'%line_no + json_line, # Prints line
01209             if line_no == line:
01210                 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
01211                 break
01212             line_no += 1
01213 
01214 
01215 def json_format_error_defect_pos (json_error_msg):
01216     """ Gets first error line and column in JSON file format.
01217         Parsed from exception thrown by json.loads() string
01218     """
01219     result = None
01220     line, column = 0, 0
01221     # Line value search
01222     line_search = re.search('line [0-9]+', json_error_msg)
01223     if line_search is not None:
01224         ls = line_search.group().split(' ')
01225         if len(ls) == 2:
01226             line = int(ls[1])
01227             # Column position search
01228             column_search = re.search('column [0-9]+', json_error_msg)
01229             if column_search is not None:
01230                 cs = column_search.group().split(' ')
01231                 if len(cs) == 2:
01232                     column = int(cs[1])
01233                     result = [line, column]
01234     return result
01235 
01236 
01237 def get_json_data_from_file (json_spec_filename, verbose=False):
01238     """ Loads from file JSON formatted string to data structure
01239     """
01240     result = None
01241     try:
01242         with open(json_spec_filename) as data_file:
01243             try:
01244                 result = json.load(data_file)
01245             except ValueError as json_error_msg:
01246                 result = None
01247                 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
01248                 # We can print where error occurred inside JSON file if we can parse exception msg
01249                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01250                 if json_format_defect_pos is not None:
01251                     line = json_format_defect_pos[0]
01252                     column = json_format_defect_pos[1]
01253                     print
01254                     show_json_file_format_error(json_spec_filename, line, column)
01255 
01256     except IOError as fileopen_error_msg:
01257         print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
01258         print
01259     if verbose and result:
01260         pp = pprint.PrettyPrinter(indent=4)
01261         pp.pprint(result)
01262     return result
01263 
01264 
01265 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01266     """ Prints MUTs configuration passed to test script for verboseness
01267     """
01268     muts_info_cols = []
01269     # We need to check all unique properties for each defined MUT
01270     for k in json_data:
01271         mut_info = json_data[k]
01272         for mut_property in mut_info:
01273             if mut_property not in muts_info_cols:
01274                 muts_info_cols.append(mut_property)
01275 
01276     # Prepare pretty table object to display all MUTs
01277     pt_cols = ["index"] + muts_info_cols
01278     pt = PrettyTable(pt_cols)
01279     for col in pt_cols:
01280         pt.align[col] = "l"
01281 
01282     # Add rows to pretty print object
01283     for k in json_data:
01284         row = [k]
01285         mut_info = json_data[k]
01286 
01287         add_row = True
01288         if platform_filter and 'mcu' in mut_info:
01289             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01290         if add_row:
01291             for col in muts_info_cols:
01292                 cell_val = mut_info[col] if col in mut_info else None
01293                 if type(cell_val) == ListType:
01294                     cell_val = join_delim.join(cell_val)
01295                 row.append(cell_val)
01296             pt.add_row(row)
01297     return pt.get_string()
01298 
01299 
01300 def print_test_configuration_from_json (json_data, join_delim=", "):
01301     """ Prints test specification configuration passed to test script for verboseness
01302     """
01303     toolchains_info_cols = []
01304     # We need to check all toolchains for each device
01305     for k in json_data:
01306         # k should be 'targets'
01307         targets = json_data[k]
01308         for target in targets:
01309             toolchains = targets[target]
01310             for toolchain in toolchains:
01311                 if toolchain not in toolchains_info_cols:
01312                     toolchains_info_cols.append(toolchain)
01313 
01314     # Prepare pretty table object to display test specification
01315     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01316     pt = PrettyTable(pt_cols)
01317     for col in pt_cols:
01318         pt.align[col] = "l"
01319 
01320     # { target : [conflicted toolchains] }
01321     toolchain_conflicts = {}
01322     toolchain_path_conflicts = []
01323     for k in json_data:
01324         # k should be 'targets'
01325         targets = json_data[k]
01326         for target in targets:
01327             target_supported_toolchains = get_target_supported_toolchains(target)
01328             if not target_supported_toolchains:
01329                 target_supported_toolchains = []
01330             target_name = target if target in TARGET_MAP else "%s*"% target
01331             row = [target_name]
01332             toolchains = targets[target]
01333 
01334             for toolchain in sorted(toolchains_info_cols):
01335                 # Check for conflicts: target vs toolchain
01336                 conflict = False
01337                 conflict_path = False
01338                 if toolchain in toolchains:
01339                     if toolchain not in target_supported_toolchains:
01340                         conflict = True
01341                         if target not in toolchain_conflicts:
01342                             toolchain_conflicts[target] = []
01343                         toolchain_conflicts[target].append(toolchain)
01344                 # Add marker inside table about target usage / conflict
01345                 cell_val = 'Yes' if toolchain in toolchains else '-'
01346                 if conflict:
01347                     cell_val += '*'
01348                 # Check for conflicts: toolchain vs toolchain path
01349                 if toolchain in TOOLCHAIN_PATHS:
01350                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01351                     if not os.path.isdir(toolchain_path):
01352                         conflict_path = True
01353                         if toolchain not in toolchain_path_conflicts:
01354                             toolchain_path_conflicts.append(toolchain)
01355                 if conflict_path:
01356                     cell_val += '#'
01357                 row.append(cell_val)
01358             pt.add_row(row)
01359 
01360     # generate result string
01361     result = pt.get_string()    # Test specification table
01362     if toolchain_conflicts or toolchain_path_conflicts:
01363         result += "\n"
01364         result += "Toolchain conflicts:\n"
01365         for target in toolchain_conflicts:
01366             if target not in TARGET_MAP:
01367                 result += "\t* Target %s unknown\n"% (target)
01368             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01369             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01370             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01371 
01372         for toolchain in toolchain_path_conflicts:
01373         # Let's check toolchain configuration
01374             if toolchain in TOOLCHAIN_PATHS:
01375                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01376                 if not os.path.isdir(toolchain_path):
01377                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01378     return result
01379 
01380 
01381 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01382     """ Generates table summary with all test cases and additional test cases
01383         information using pretty print functionality. Allows test suite user to
01384         see test cases
01385     """
01386     # get all unique test ID prefixes
01387     unique_test_id = []
01388     for test in TESTS:
01389         split = test['id'].split('_')[:-1]
01390         test_id_prefix = '_'.join(split)
01391         if test_id_prefix not in unique_test_id:
01392             unique_test_id.append(test_id_prefix)
01393     unique_test_id.sort()
01394     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01395     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01396 
01397     test_properties = ['id',
01398                        'automated',
01399                        'description',
01400                        'peripherals',
01401                        'host_test',
01402                        'duration'] if cols is None else cols
01403 
01404     # All tests status table print
01405     pt = PrettyTable(test_properties)
01406     for col in test_properties:
01407         pt.align[col] = "l"
01408     pt.align['duration'] = "r"
01409 
01410     counter_all = 0
01411     counter_automated = 0
01412     pt.padding_width = 1 # One space between column edges and contents (default)
01413 
01414     for test_id in sorted(TEST_MAP.keys()):
01415         if platform_filter is not None:
01416             # FIlter out platforms using regex
01417             if re.search(platform_filter, test_id) is None:
01418                 continue
01419         row = []
01420         test = TEST_MAP[test_id]
01421         split = test_id.split('_')[:-1]
01422         test_id_prefix = '_'.join(split)
01423 
01424         for col in test_properties:
01425             col_value = test[col]
01426             if type(test[col]) == ListType:
01427                 col_value = join_delim.join(test[col])
01428             elif test[col] == None:
01429                 col_value = "-"
01430 
01431             row.append(col_value)
01432         if test['automated'] == True:
01433             counter_dict_test_id_types[test_id_prefix] += 1
01434             counter_automated += 1
01435         pt.add_row(row)
01436         # Update counters
01437         counter_all += 1
01438         counter_dict_test_id_types_all[test_id_prefix] += 1
01439     result = pt.get_string()
01440     result += "\n\n"
01441 
01442     if result_summary and not platform_filter:
01443         # Automation result summary
01444         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01445         pt = PrettyTable(test_id_cols)
01446         pt.align['automated'] = "r"
01447         pt.align['all'] = "r"
01448         pt.align['percent [%]'] = "r"
01449 
01450         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01451         str_progress = progress_bar(percent_progress, 75)
01452         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01453         result += "Automation coverage:\n"
01454         result += pt.get_string()
01455         result += "\n\n"
01456 
01457         # Test automation coverage table print
01458         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01459         pt = PrettyTable(test_id_cols)
01460         pt.align['id'] = "l"
01461         pt.align['automated'] = "r"
01462         pt.align['all'] = "r"
01463         pt.align['percent [%]'] = "r"
01464         for unique_id in unique_test_id:
01465             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01466             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01467             str_progress = progress_bar(percent_progress, 75)
01468             row = [unique_id,
01469                    counter_dict_test_id_types[unique_id],
01470                    counter_dict_test_id_types_all[unique_id],
01471                    percent_progress,
01472                    "[" + str_progress + "]"]
01473             pt.add_row(row)
01474         result += "Test automation coverage:\n"
01475         result += pt.get_string()
01476         result += "\n\n"
01477     return result
01478 
01479 
01480 def progress_bar (percent_progress, saturation=0):
01481     """ This function creates progress bar with optional simple saturation mark
01482     """
01483     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01484     str_progress = '#' * step + '.' * int(50 - step)
01485     c = '!' if str_progress[38] == '.' else '|'
01486     if saturation > 0:
01487         saturation = saturation / 2
01488         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01489     return str_progress
01490 
01491 
01492 def singletest_in_cli_mode (single_test):
01493     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01494 
01495         @return returns success code (0 == success) for building and running tests
01496     """
01497     start = time()
01498     # Execute tests depending on options and filter applied
01499     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01500     elapsed_time = time() - start
01501 
01502     # Human readable summary
01503     if not single_test.opts_suppress_summary:
01504         # prints well-formed summary with results (SQL table like)
01505         print single_test.generate_test_summary(test_summary, shuffle_seed)
01506     if single_test.opts_test_x_toolchain_summary:
01507         # prints well-formed summary with results (SQL table like)
01508         # table shows text x toolchain test result matrix
01509         print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
01510 
01511     print "Completed in %.2f sec"% (elapsed_time)
01512     print
01513     # Write summary of the builds
01514 
01515     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01516     status = print_report_exporter.report(build_report)
01517 
01518     # Store extra reports in files
01519     if single_test.opts_report_html_file_name:
01520         # Export results in form of HTML report to separate file
01521         report_exporter = ReportExporter(ResultExporterType.HTML)
01522         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01523     if single_test.opts_report_junit_file_name:
01524         # Export results in form of JUnit XML report to separate file
01525         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01526         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01527     if single_test.opts_report_text_file_name:
01528         # Export results in form of a text file
01529         report_exporter = ReportExporter(ResultExporterType.TEXT)
01530         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01531     if single_test.opts_report_build_file_name:
01532         # Export build results as html report to sparate file
01533         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01534         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01535 
01536     # Returns True if no build failures of the test projects or their dependencies
01537     return status
01538 
01539 class TestLogger ():
01540     """ Super-class for logging and printing ongoing events for test suite pass
01541     """
01542     def __init__ (self, store_log=True):
01543         """ We can control if logger actually stores log in memory
01544             or just handled all log entries immediately
01545         """
01546         self.log  = []
01547         self.log_to_file  = False
01548         self.log_file_name  = None
01549         self.store_log  = store_log
01550 
01551         self.LogType  = construct_enum(INFO='Info',
01552                                       WARN='Warning',
01553                                       NOTIF='Notification',
01554                                       ERROR='Error',
01555                                       EXCEPT='Exception')
01556 
01557         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01558                                             APPEND=2)    # Append to existing log file
01559 
01560     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01561         """ Log one line of text
01562         """
01563         log_timestamp = time()
01564         log_entry = {'log_type' : LogType,
01565                      'log_timestamp' : log_timestamp,
01566                      'log_line' : log_line,
01567                      '_future' : None
01568         }
01569         # Store log in memory
01570         if self.store_log :
01571             self.log .append(log_entry)
01572         return log_entry
01573 
01574 
01575 class CLITestLogger (TestLogger ):
01576     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01577     """
01578     def __init__(self, store_log=True, file_name=None):
01579         TestLogger.__init__(self)
01580         self.log_file_name  = file_name
01581         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01582         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01583 
01584     def log_print (self, log_entry, timestamp=True):
01585         """ Prints on screen formatted log entry
01586         """
01587         ts = log_entry['log_timestamp']
01588         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01589         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01590         return timestamp_str + log_line_str
01591 
01592     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01593         """ Logs line, if log file output was specified log line will be appended
01594             at the end of log file
01595         """
01596         log_entry = TestLogger.log_line(self, LogType, log_line)
01597         log_line_str = self.log_print (log_entry, timestamp)
01598         if self.log_file_name  is not None:
01599             try:
01600                 with open(self.log_file_name , 'a') as f:
01601                     f.write(log_line_str + line_delim)
01602             except IOError:
01603                 pass
01604         return log_line_str
01605 
01606 
01607 def factory_db_logger (db_url):
01608     """ Factory database driver depending on database type supplied in database connection string db_url
01609     """
01610     if db_url is not None:
01611         from tools.test_mysql import MySQLDBAccess
01612         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01613         if connection_info is not None:
01614             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01615             if db_type == 'mysql':
01616                 return MySQLDBAccess()
01617     return None
01618 
01619 
01620 def detect_database_verbose (db_url):
01621     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01622     """
01623     result = BaseDBAccess().parse_db_connection_string(db_url)
01624     if result is not None:
01625         # Parsing passed
01626         (db_type, username, password, host, db_name) = result
01627         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01628         # Let's try to connect
01629         db_ = factory_db_logger(db_url)
01630         if db_ is not None:
01631             print "Connecting to database '%s'..."% db_url,
01632             db_.connect(host, username, password, db_name)
01633             if db_.is_connected():
01634                 print "ok"
01635                 print "Detecting database..."
01636                 print db_.detect_database(verbose=True)
01637                 print "Disconnecting...",
01638                 db_.disconnect()
01639                 print "done"
01640         else:
01641             print "Database type '%s' unknown"% db_type
01642     else:
01643         print "Parse error: '%s' - DB Url error"% (db_url)
01644 
01645 
01646 def get_module_avail (module_name):
01647     """ This function returns True if module_name is already imported module
01648     """
01649     return module_name in sys.modules.keys()
01650 
01651 def get_autodetected_MUTS_list(platform_name_filter=None):
01652     oldError = None
01653     if os.name == 'nt':
01654         # Disable Windows error box temporarily
01655         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01656 
01657     mbeds = mbed_lstools.create()
01658     detect_muts_list = mbeds.list_mbeds()
01659 
01660     if os.name == 'nt':
01661         ctypes.windll.kernel32.SetErrorMode(oldError)
01662 
01663     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01664 
01665 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01666     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01667         If function fails to auto-detect devices it will return empty dictionary.
01668 
01669         if get_module_avail('mbed_lstools'):
01670             mbeds = mbed_lstools.create()
01671             mbeds_list = mbeds.list_mbeds()
01672 
01673         @param mbeds_list list of mbeds captured from mbed_lstools
01674         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01675     """
01676     result = {}   # Should be in muts_all.json format
01677     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01678     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01679     index = 1
01680     for mut in mbeds_list:
01681         # Filter the MUTS if a filter is specified
01682 
01683         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01684             continue
01685 
01686         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01687         # if not we  are creating our own unique value (last few chars from platform's target_id).
01688         m = {'mcu': mut['platform_name'],
01689              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01690              'port': mut['serial_port'],
01691              'disk': mut['mount_point'],
01692              'peripherals': []     # No peripheral detection
01693              }
01694         if index not in result:
01695             result[index] = {}
01696         result[index] = m
01697         index += 1
01698     return result
01699 
01700 
01701 def get_autodetected_TEST_SPEC (mbeds_list,
01702                                use_default_toolchain=True,
01703                                use_supported_toolchains=False,
01704                                toolchain_filter=None,
01705                                platform_name_filter=None):
01706     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01707         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01708 
01709         use_default_toolchain - if True add default toolchain to test_spec
01710         use_supported_toolchains - if True add all supported toolchains to test_spec
01711         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01712     """
01713     result = {'targets': {} }
01714 
01715     for mut in mbeds_list:
01716         mcu = mut['mcu']
01717         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01718             if mcu in TARGET_MAP:
01719                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01720                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01721 
01722                 # Decide which toolchains should be added to test specification toolchain pool for each target
01723                 toolchains = []
01724                 if use_default_toolchain:
01725                     toolchains.append(default_toolchain)
01726                 if use_supported_toolchains:
01727                     toolchains += supported_toolchains
01728                 if toolchain_filter is not None:
01729                     all_toolchains = supported_toolchains + [default_toolchain]
01730                     for toolchain in toolchain_filter:
01731                         if toolchain in all_toolchains:
01732                             toolchains.append(toolchain)
01733 
01734                 result['targets'][mcu] = list(set(toolchains))
01735     return result
01736 
01737 
01738 def get_default_test_options_parser ():
01739     """ Get common test script options used by CLI, web services etc.
01740     """
01741     parser = argparse.ArgumentParser()
01742     parser.add_argument('-i', '--tests',
01743                         dest='test_spec_filename',
01744                         metavar="FILE",
01745                         type=argparse_filestring_type,
01746                         help='Points to file with test specification')
01747 
01748     parser.add_argument('-M', '--MUTS',
01749                         dest='muts_spec_filename',
01750                         metavar="FILE",
01751                         type=argparse_filestring_type,
01752                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01753 
01754     parser.add_argument("-j", "--jobs",
01755                         dest='jobs',
01756                         metavar="NUMBER",
01757                         type=int,
01758                         help="Define number of compilation jobs. Default value is 1")
01759 
01760     if get_module_avail('mbed_lstools'):
01761         # Additional features available when mbed_lstools is installed on host and imported
01762         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01763         parser.add_argument('--auto',
01764                             dest='auto_detect',
01765                             action="store_true",
01766                             help='Use mbed-ls module to detect all connected mbed devices')
01767 
01768         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01769         parser.add_argument('--tc',
01770                             dest='toolchains_filter',
01771                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01772                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01773 
01774         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01775         parser.add_argument('--oper',
01776                             dest='operability_checks',
01777                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01778                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01779 
01780     parser.add_argument('--clean',
01781                         dest='clean',
01782                         action="store_true",
01783                         help='Clean the build directory')
01784 
01785     parser.add_argument('-P', '--only-peripherals',
01786                         dest='test_only_peripheral',
01787                         default=False,
01788                         action="store_true",
01789                         help='Test only peripheral declared for MUT and skip common tests')
01790 
01791     parser.add_argument("--profile", dest="profile", action="append",
01792                         type=argparse_filestring_type,
01793                         default=[])
01794 
01795     parser.add_argument('-C', '--only-commons',
01796                         dest='test_only_common',
01797                         default=False,
01798                         action="store_true",
01799                         help='Test only board internals. Skip perpherials tests and perform common tests')
01800 
01801     parser.add_argument('-n', '--test-by-names',
01802                         dest='test_by_names',
01803                         type=argparse_many(str),
01804                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01805 
01806     parser.add_argument('-p', '--peripheral-by-names',
01807                       dest='peripheral_by_names',
01808                       type=argparse_many(str),
01809                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01810 
01811     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01812     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01813 
01814     parser.add_argument('-c', '--copy-method',
01815                         dest='copy_method',
01816                         type=argparse_uppercase_type(copy_methods, "flash method"),
01817                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01818 
01819     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01820     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01821 
01822     parser.add_argument('-r', '--reset-type',
01823                         dest='mut_reset_type',
01824                         default=None,
01825                         type=argparse_uppercase_type(reset_methods, "reset method"),
01826                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01827 
01828     parser.add_argument('-g', '--goanna-for-tests',
01829                         dest='goanna_for_tests',
01830                         action="store_true",
01831                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01832 
01833     parser.add_argument('-G', '--goanna-for-sdk',
01834                         dest='goanna_for_mbed_sdk',
01835                         action="store_true",
01836                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01837 
01838     parser.add_argument('-s', '--suppress-summary',
01839                         dest='suppress_summary',
01840                         default=False,
01841                         action="store_true",
01842                         help='Suppresses display of wellformatted table with test results')
01843 
01844     parser.add_argument('-t', '--test-summary',
01845                         dest='test_x_toolchain_summary',
01846                         default=False,
01847                         action="store_true",
01848                         help='Displays wellformatted table with test x toolchain test result per target')
01849 
01850     parser.add_argument('-A', '--test-automation-report',
01851                         dest='test_automation_report',
01852                         default=False,
01853                         action="store_true",
01854                         help='Prints information about all tests and exits')
01855 
01856     parser.add_argument('-R', '--test-case-report',
01857                         dest='test_case_report',
01858                         default=False,
01859                         action="store_true",
01860                         help='Prints information about all test cases and exits')
01861 
01862     parser.add_argument("-S", "--supported-toolchains",
01863                         action="store_true",
01864                         dest="supported_toolchains",
01865                         default=False,
01866                         help="Displays supported matrix of MCUs and toolchains")
01867 
01868     parser.add_argument("-O", "--only-build",
01869                         action="store_true",
01870                         dest="only_build_tests",
01871                         default=False,
01872                         help="Only build tests, skips actual test procedures (flashing etc.)")
01873 
01874     parser.add_argument('--parallel',
01875                         dest='parallel_test_exec',
01876                         default=False,
01877                         action="store_true",
01878                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01879 
01880     parser.add_argument('--config',
01881                         dest='verbose_test_configuration_only',
01882                         default=False,
01883                         action="store_true",
01884                         help='Displays full test specification and MUTs configration and exits')
01885 
01886     parser.add_argument('--loops',
01887                         dest='test_loops_list',
01888                         type=argparse_many(str),
01889                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01890 
01891     parser.add_argument('--global-loops',
01892                         dest='test_global_loops_value',
01893                         type=int,
01894                         help='Set global number of test loops per test. Default value is set 1')
01895 
01896     parser.add_argument('--consolidate-waterfall',
01897                         dest='consolidate_waterfall_test',
01898                         default=False,
01899                         action="store_true",
01900                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01901 
01902     parser.add_argument('-W', '--waterfall',
01903                         dest='waterfall_test',
01904                         default=False,
01905                         action="store_true",
01906                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01907 
01908     parser.add_argument('-N', '--firmware-name',
01909                         dest='firmware_global_name',
01910                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01911 
01912     parser.add_argument('-u', '--shuffle',
01913                         dest='shuffle_test_order',
01914                         default=False,
01915                         action="store_true",
01916                         help='Shuffles test execution order')
01917 
01918     parser.add_argument('--shuffle-seed',
01919                         dest='shuffle_test_seed',
01920                         default=None,
01921                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01922 
01923     parser.add_argument('-f', '--filter',
01924                         dest='general_filter_regex',
01925                         type=argparse_many(str),
01926                         default=None,
01927                         help='For some commands you can use filter to filter out results')
01928 
01929     parser.add_argument('--inc-timeout',
01930                         dest='extend_test_timeout',
01931                         metavar="NUMBER",
01932                         type=int,
01933                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01934 
01935     parser.add_argument('--db',
01936                         dest='db_url',
01937                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01938 
01939     parser.add_argument('-l', '--log',
01940                         dest='log_file_name',
01941                         help='Log events to external file (note not all console entries may be visible in log file)')
01942 
01943     parser.add_argument('--report-html',
01944                         dest='report_html_file_name',
01945                         help='You can log test suite results in form of HTML report')
01946 
01947     parser.add_argument('--report-junit',
01948                         dest='report_junit_file_name',
01949                         help='You can log test suite results in form of JUnit compliant XML report')
01950 
01951     parser.add_argument("--report-build",
01952                         dest="report_build_file_name",
01953                         help="Output the build results to a junit xml file")
01954 
01955     parser.add_argument("--report-text",
01956                         dest="report_text_file_name",
01957                         help="Output the build results to a text file")
01958 
01959     parser.add_argument('--verbose-skipped',
01960                         dest='verbose_skipped_tests',
01961                         default=False,
01962                         action="store_true",
01963                         help='Prints some extra information about skipped tests')
01964 
01965     parser.add_argument('-V', '--verbose-test-result',
01966                         dest='verbose_test_result_only',
01967                         default=False,
01968                         action="store_true",
01969                         help='Prints test serial output')
01970 
01971     parser.add_argument('-v', '--verbose',
01972                         dest='verbose',
01973                         default=False,
01974                         action="store_true",
01975                         help='Verbose mode (prints some extra information)')
01976 
01977     parser.add_argument('--version',
01978                         dest='version',
01979                         default=False,
01980                         action="store_true",
01981                         help='Prints script version and exits')
01982 
01983     parser.add_argument('--stats-depth',
01984                         dest='stats_depth',
01985                         default=2,
01986                         type=int,
01987                         help="Depth level for static memory report")
01988     return parser
01989 
01990 def test_path_to_name (path, base):
01991     """Change all slashes in a path into hyphens
01992     This creates a unique cross-platform test name based on the path
01993     This can eventually be overriden by a to-be-determined meta-data mechanism"""
01994     name_parts = []
01995     head, tail = os.path.split(relpath(path,base))
01996     while (tail and tail != "."):
01997         name_parts.insert(0, tail)
01998         head, tail = os.path.split(head)
01999 
02000     return "-".join(name_parts).lower()
02001 
02002 def get_test_config (config_name, target_name):
02003     """Finds the path to a test configuration file
02004     config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
02005     target_name: name of target to determing if mbed OS interface given is valid
02006     returns path to config, will return None if no valid config is found
02007     """
02008     # If they passed in a full path
02009     if exists(config_name):
02010         # This is a module config
02011         return config_name
02012     # Otherwise find the path to configuration file based on mbed OS interface
02013     return TestConfig.get_config_path(config_name, target_name)
02014 
02015 def find_tests (base_dir, target_name, toolchain_name, app_config=None):
02016     """ Finds all tests in a directory recursively
02017     base_dir: path to the directory to scan for tests (ex. 'path/to/project')
02018     target_name: name of the target to use for scanning (ex. 'K64F')
02019     toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
02020     options: Compile options to pass to the toolchain (ex. ['debug-info'])
02021     app_config - location of a chosen mbed_app.json file
02022     """
02023 
02024     tests = {}
02025 
02026     # Prepare the toolchain
02027     toolchain = prepare_toolchain([base_dir], None, target_name, toolchain_name,
02028                                   silent=True, app_config=app_config)
02029 
02030     # Scan the directory for paths to probe for 'TESTS' folders
02031     base_resources = scan_resources([base_dir], toolchain)
02032 
02033     dirs = base_resources.inc_dirs
02034     for directory in dirs:
02035         subdirs = os.listdir(directory)
02036 
02037         # If the directory contains a subdirectory called 'TESTS', scan it for test cases
02038         if 'TESTS' in subdirs:
02039             walk_base_dir = join(directory, 'TESTS')
02040             test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
02041 
02042             # Loop through all subdirectories
02043             for d in test_resources.inc_dirs:
02044 
02045                 # If the test case folder is not called 'host_tests' and it is
02046                 # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
02047                 # then add it to the tests
02048                 path_depth = get_path_depth(relpath(d, walk_base_dir))
02049                 if path_depth == 2:
02050                     test_group_directory_path, test_case_directory = os.path.split(d)
02051                     test_group_directory = os.path.basename(test_group_directory_path)
02052 
02053                     # Check to make sure discoverd folder is not in a host test directory
02054                     if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
02055                         test_name = test_path_to_name(d, base_dir)
02056                         tests[test_name] = d
02057 
02058     return tests
02059 
02060 def print_tests (tests, format="list", sort=True):
02061     """Given a dictionary of tests (as returned from "find_tests"), print them
02062     in the specified format"""
02063     if format == "list":
02064         for test_name in sorted(tests.keys()):
02065             test_path = tests[test_name]
02066             print "Test Case:"
02067             print "    Name: %s" % test_name
02068             print "    Path: %s" % test_path
02069     elif format == "json":
02070         print json.dumps(tests, indent=2)
02071     else:
02072         print "Unknown format '%s'" % format
02073         sys.exit(1)
02074 
02075 def norm_relative_path (path, start):
02076     """This function will create a normalized, relative path. It mimics the
02077     python os.path.relpath function, but also normalizes a Windows-syle path
02078     that use backslashes to a Unix style path that uses forward slashes."""
02079     path = os.path.normpath(path)
02080     path = os.path.relpath(path, start)
02081     path = path.replace("\\", "/")
02082     return path
02083 
02084 
02085 def build_test_worker (*args, **kwargs):
02086     """This is a worker function for the parallel building of tests. The `args`
02087     and `kwargs` are passed directly to `build_project`. It returns a dictionary
02088     with the following structure:
02089 
02090     {
02091         'result': `True` if no exceptions were thrown, `False` otherwise
02092         'reason': Instance of exception that was thrown on failure
02093         'bin_file': Path to the created binary if `build_project` was
02094                     successful. Not present otherwise
02095         'kwargs': The keyword arguments that were passed to `build_project`.
02096                   This includes arguments that were modified (ex. report)
02097     }
02098     """
02099     bin_file = None
02100     ret = {
02101         'result': False,
02102         'args': args,
02103         'kwargs': kwargs
02104     }
02105 
02106     # Use parent TOOLCHAIN_PATHS variable
02107     for key, value in kwargs['toolchain_paths'].iteritems():
02108         TOOLCHAIN_PATHS[key] = value
02109 
02110     del kwargs['toolchain_paths']
02111 
02112     try:
02113         bin_file = build_project(*args, **kwargs)
02114         ret['result'] = True
02115         ret['bin_file'] = bin_file
02116         ret['kwargs'] = kwargs
02117 
02118     except NotSupportedException, e:
02119         ret['reason'] = e
02120     except ToolException, e:
02121         ret['reason'] = e
02122     except KeyboardInterrupt, e:
02123         ret['reason'] = e
02124     except:
02125         # Print unhandled exceptions here
02126         import traceback
02127         traceback.print_exc(file=sys.stdout)
02128 
02129     return ret
02130 
02131 
02132 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02133                 clean=False, notify=None, verbose=False, jobs=1, macros=None,
02134                 silent=False, report=None, properties=None,
02135                 continue_on_build_fail=False, app_config=None,
02136                 build_profile=None, stats_depth=None):
02137     """Given the data structure from 'find_tests' and the typical build parameters,
02138     build all the tests
02139 
02140     Returns a tuple of the build result (True or False) followed by the test
02141     build data structure"""
02142 
02143     execution_directory = "."
02144     base_path = norm_relative_path(build_path, execution_directory)
02145 
02146     target_name = target if isinstance(target, str) else target.name
02147     cfg, _, _ = get_config(base_source_paths, target_name, toolchain_name)
02148 
02149     baud_rate = 9600
02150     if 'platform.stdio-baud-rate' in cfg:
02151         baud_rate = cfg['platform.stdio-baud-rate'].value
02152 
02153     test_build = {
02154         "platform": target_name,
02155         "toolchain": toolchain_name,
02156         "base_path": base_path,
02157         "baud_rate": baud_rate,
02158         "binary_type": "bootable",
02159         "tests": {}
02160     }
02161 
02162     result = True
02163 
02164     jobs_count = int(jobs if jobs else cpu_count())
02165     p = Pool(processes=jobs_count)
02166     results = []
02167     for test_name, test_path in tests.iteritems():
02168         test_build_path = os.path.join(build_path, test_path)
02169         src_path = base_source_paths + [test_path]
02170         bin_file = None
02171         test_case_folder_name = os.path.basename(test_path)
02172 
02173         args = (src_path, test_build_path, target, toolchain_name)
02174         kwargs = {
02175             'jobs': 1,
02176             'clean': clean,
02177             'macros': macros,
02178             'name': test_case_folder_name,
02179             'project_id': test_name,
02180             'report': report,
02181             'properties': properties,
02182             'verbose': verbose,
02183             'app_config': app_config,
02184             'build_profile': build_profile,
02185             'silent': True,
02186             'toolchain_paths': TOOLCHAIN_PATHS,
02187             'stats_depth': stats_depth
02188         }
02189 
02190         results.append(p.apply_async(build_test_worker, args, kwargs))
02191 
02192     p.close()
02193     result = True
02194     itr = 0
02195     while len(results):
02196         itr += 1
02197         if itr > 360000:
02198             p.terminate()
02199             p.join()
02200             raise ToolException("Compile did not finish in 10 minutes")
02201         else:
02202             sleep(0.01)
02203             pending = 0
02204             for r in results:
02205                 if r.ready() is True:
02206                     try:
02207                         worker_result = r.get()
02208                         results.remove(r)
02209 
02210                         # Take report from the kwargs and merge it into existing report
02211                         if report:
02212                             report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
02213                             for test_key in report_entry.keys():
02214                                 report[target_name][toolchain_name][test_key] = report_entry[test_key]
02215 
02216                         # Set the overall result to a failure if a build failure occurred
02217                         if ('reason' in worker_result and
02218                             not worker_result['reason'] and
02219                             not isinstance(worker_result['reason'], NotSupportedException)):
02220                             result = False
02221                             break
02222 
02223                         # Adding binary path to test build result
02224                         if ('result' in worker_result and
02225                             worker_result['result'] and
02226                             'bin_file' in worker_result):
02227                             bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
02228 
02229                             test_build['tests'][worker_result['kwargs']['project_id']] = {
02230                                 "binaries": [
02231                                     {
02232                                         "path": bin_file
02233                                     }
02234                                 ]
02235                             }
02236 
02237                             test_key = worker_result['kwargs']['project_id'].upper()
02238                             if report:
02239                                 print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip()
02240                             print 'Image: %s\n' % bin_file
02241 
02242                     except:
02243                         if p._taskqueue.queue:
02244                             p._taskqueue.queue.clear()
02245                             sleep(0.5)
02246                         p.terminate()
02247                         p.join()
02248                         raise
02249                 else:
02250                     pending += 1
02251                     if pending >= jobs_count:
02252                         break
02253 
02254             # Break as soon as possible if there is a failure and we are not
02255             # continuing on build failures
02256             if not result and not continue_on_build_fail:
02257                 if p._taskqueue.queue:
02258                     p._taskqueue.queue.clear()
02259                     sleep(0.5)
02260                 p.terminate()
02261                 break
02262 
02263     p.join()
02264 
02265     test_builds = {}
02266     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02267 
02268     return result, test_builds
02269 
02270 
02271 def test_spec_from_test_builds(test_builds):
02272     return {
02273         "builds": test_builds
02274     }