Rizky Ardi Maulana / mbed-os
Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 
00020 import os
00021 import re
00022 import sys
00023 import json
00024 import uuid
00025 import pprint
00026 import random
00027 import argparse
00028 import datetime
00029 import threading
00030 import ctypes
00031 from types import ListType
00032 from colorama import Fore, Back, Style
00033 from prettytable import PrettyTable
00034 from copy import copy
00035 
00036 from time import sleep, time
00037 from Queue import Queue, Empty
00038 from os.path import join, exists, basename, relpath
00039 from threading import Thread, Lock
00040 from multiprocessing import Pool, cpu_count
00041 from subprocess import Popen, PIPE
00042 
00043 # Imports related to mbed build api
00044 from tools.tests import TESTS
00045 from tools.tests import TEST_MAP
00046 from tools.paths import BUILD_DIR
00047 from tools.paths import HOST_TESTS
00048 from tools.utils import ToolException
00049 from tools.utils import NotSupportedException
00050 from tools.utils import construct_enum
00051 from tools.memap import MemapParser
00052 from tools.targets import TARGET_MAP
00053 from tools.test_db import BaseDBAccess
00054 from tools.build_api import build_project, build_mbed_libs, build_lib
00055 from tools.build_api import get_target_supported_toolchains
00056 from tools.build_api import write_build_report
00057 from tools.build_api import prep_report
00058 from tools.build_api import prep_properties
00059 from tools.build_api import create_result
00060 from tools.build_api import add_result_to_report
00061 from tools.build_api import prepare_toolchain
00062 from tools.build_api import scan_resources
00063 from tools.libraries import LIBRARIES, LIBRARY_MAP
00064 from tools.options import extract_profile
00065 from tools.toolchains import TOOLCHAIN_PATHS
00066 from tools.toolchains import TOOLCHAINS
00067 from tools.test_exporters import ReportExporter, ResultExporterType
00068 from tools.utils import argparse_filestring_type
00069 from tools.utils import argparse_uppercase_type
00070 from tools.utils import argparse_lowercase_type
00071 from tools.utils import argparse_many
00072 from tools.utils import get_path_depth
00073 
00074 import tools.host_tests.host_tests_plugins as host_tests_plugins
00075 
00076 try:
00077     import mbed_lstools
00078     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00079 except:
00080     pass
00081 
00082 
00083 class ProcessObserver(Thread):
00084     def __init__(self, proc):
00085         Thread.__init__(self)
00086         self.proc = proc
00087         self.queue = Queue()
00088         self.daemon = True
00089         self.active = True
00090         self.start()
00091 
00092     def run(self):
00093         while self.active:
00094             c = self.proc.stdout.read(1)
00095             self.queue.put(c)
00096 
00097     def stop(self):
00098         self.active = False
00099         try:
00100             self.proc.terminate()
00101         except Exception, _:
00102             pass
00103 
00104 
00105 class SingleTestExecutor (threading.Thread):
00106     """ Example: Single test class in separate thread usage
00107     """
00108     def __init__(self, single_test):
00109         self.single_test  = single_test
00110         threading.Thread.__init__(self)
00111 
00112     def run(self):
00113         start = time()
00114         # Execute tests depending on options and filter applied
00115         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00116         elapsed_time = time() - start
00117 
00118         # Human readable summary
00119         if not self.single_test .opts_suppress_summary:
00120             # prints well-formed summary with results (SQL table like)
00121             print self.single_test .generate_test_summary(test_summary, shuffle_seed)
00122         if self.single_test .opts_test_x_toolchain_summary:
00123             # prints well-formed summary with results (SQL table like)
00124             # table shows text x toolchain test result matrix
00125             print self.single_test .generate_test_summary_by_target(test_summary, shuffle_seed)
00126         print "Completed in %.2f sec"% (elapsed_time)
00127 
00128 
00129 class SingleTestRunner (object):
00130     """ Object wrapper for single test run which may involve multiple MUTs
00131     """
00132     RE_DETECT_TESTCASE_RESULT = None
00133 
00134     # Return codes for test script
00135     TEST_RESULT_OK = "OK"
00136     TEST_RESULT_FAIL = "FAIL"
00137     TEST_RESULT_ERROR = "ERROR"
00138     TEST_RESULT_UNDEF = "UNDEF"
00139     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00140     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00141     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00142     TEST_RESULT_TIMEOUT = "TIMEOUT"
00143     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00144     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00145     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00146     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00147 
00148     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00149     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00150     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00151 
00152     muts = {} # MUTs descriptor (from external file)
00153     test_spec = {} # Test specification (from external file)
00154 
00155     # mbed test suite -> SingleTestRunner
00156     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00157                            "failure" : TEST_RESULT_FAIL,
00158                            "error" : TEST_RESULT_ERROR,
00159                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00160                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00161                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00162                            "timeout" : TEST_RESULT_TIMEOUT,
00163                            "no_image" : TEST_RESULT_NO_IMAGE,
00164                            "end" : TEST_RESULT_UNDEF,
00165                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00166                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00167                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00168     }
00169 
00170     def __init__ (self,
00171                  _global_loops_count=1,
00172                  _test_loops_list=None,
00173                  _muts={},
00174                  _clean=False,
00175                  _parser=None,
00176                  _opts=None,
00177                  _opts_db_url=None,
00178                  _opts_log_file_name=None,
00179                  _opts_report_html_file_name=None,
00180                  _opts_report_junit_file_name=None,
00181                  _opts_report_build_file_name=None,
00182                  _opts_report_text_file_name=None,
00183                  _opts_build_report={},
00184                  _opts_build_properties={},
00185                  _test_spec={},
00186                  _opts_goanna_for_mbed_sdk=None,
00187                  _opts_goanna_for_tests=None,
00188                  _opts_shuffle_test_order=False,
00189                  _opts_shuffle_test_seed=None,
00190                  _opts_test_by_names=None,
00191                  _opts_peripheral_by_names=None,
00192                  _opts_test_only_peripheral=False,
00193                  _opts_test_only_common=False,
00194                  _opts_verbose_skipped_tests=False,
00195                  _opts_verbose_test_result_only=False,
00196                  _opts_verbose=False,
00197                  _opts_firmware_global_name=None,
00198                  _opts_only_build_tests=False,
00199                  _opts_parallel_test_exec=False,
00200                  _opts_suppress_summary=False,
00201                  _opts_test_x_toolchain_summary=False,
00202                  _opts_copy_method=None,
00203                  _opts_mut_reset_type=None,
00204                  _opts_jobs=None,
00205                  _opts_waterfall_test=None,
00206                  _opts_consolidate_waterfall_test=None,
00207                  _opts_extend_test_timeout=None,
00208                  _opts_auto_detect=None,
00209                  _opts_include_non_automated=False):
00210         """ Let's try hard to init this object
00211         """
00212         from colorama import init
00213         init()
00214 
00215         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00216         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00217         # Settings related to test loops counters
00218         try:
00219             _global_loops_count = int(_global_loops_count)
00220         except:
00221             _global_loops_count = 1
00222         if _global_loops_count < 1:
00223             _global_loops_count = 1
00224         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00225         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00226         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00227 
00228         self.shuffle_random_seed  = 0.0
00229         self.SHUFFLE_SEED_ROUND  = 10
00230 
00231         # MUT list and test specification storage
00232         self.muts  = _muts
00233         self.test_spec  = _test_spec
00234 
00235         # Settings passed e.g. from command line
00236         self.opts_db_url  = _opts_db_url
00237         self.opts_log_file_name  = _opts_log_file_name
00238         self.opts_report_html_file_name  = _opts_report_html_file_name
00239         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00240         self.opts_report_build_file_name  = _opts_report_build_file_name
00241         self.opts_report_text_file_name  = _opts_report_text_file_name
00242         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00243         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00244         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00245         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00246         self.opts_test_by_names  = _opts_test_by_names
00247         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00248         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00249         self.opts_test_only_common  = _opts_test_only_common
00250         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00251         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00252         self.opts_verbose  = _opts_verbose
00253         self.opts_firmware_global_name  = _opts_firmware_global_name
00254         self.opts_only_build_tests  = _opts_only_build_tests
00255         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00256         self.opts_suppress_summary  = _opts_suppress_summary
00257         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00258         self.opts_copy_method  = _opts_copy_method
00259         self.opts_mut_reset_type  = _opts_mut_reset_type
00260         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00261         self.opts_waterfall_test  = _opts_waterfall_test
00262         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00263         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00264         self.opts_clean  = _clean
00265         self.opts_parser  = _parser
00266         self.opts  = _opts
00267         self.opts_auto_detect  = _opts_auto_detect
00268         self.opts_include_non_automated  = _opts_include_non_automated
00269 
00270         self.build_report  = _opts_build_report
00271         self.build_properties  = _opts_build_properties
00272 
00273         # File / screen logger initialization
00274         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00275 
00276         # Database related initializations
00277         self.db_logger  = factory_db_logger(self.opts_db_url )
00278         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00279         # Let's connect to database to set up credentials and confirm database is ready
00280         if self.db_logger :
00281             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00282             if self.db_logger .is_connected():
00283                 # Get hostname and uname so we can use it as build description
00284                 # when creating new build_id in external database
00285                 (_hostname, _uname) = self.db_logger .get_hostname()
00286                 _host_location = os.path.dirname(os.path.abspath(__file__))
00287                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00288                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00289                 self.db_logger .disconnect()
00290 
00291     def dump_options (self):
00292         """ Function returns data structure with common settings passed to SingelTestRunner
00293             It can be used for example to fill _extra fields in database storing test suite single run data
00294             Example:
00295             data = self.dump_options()
00296             or
00297             data_str = json.dumps(self.dump_options())
00298         """
00299         result = {"db_url" : str(self.opts_db_url ),
00300                   "log_file_name" :  str(self.opts_log_file_name ),
00301                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00302                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00303                   "test_by_names" :  str(self.opts_test_by_names ),
00304                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00305                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00306                   "test_only_common" :  str(self.opts_test_only_common ),
00307                   "verbose" :  str(self.opts_verbose ),
00308                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00309                   "only_build_tests" :  str(self.opts_only_build_tests ),
00310                   "copy_method" :  str(self.opts_copy_method ),
00311                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00312                   "jobs" :  str(self.opts_jobs ),
00313                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00314                   "_dummy" : ''
00315         }
00316         return result
00317 
00318     def shuffle_random_func(self):
00319         return self.shuffle_random_seed 
00320 
00321     def is_shuffle_seed_float (self):
00322         """ return true if function parameter can be converted to float
00323         """
00324         result = True
00325         try:
00326             float(self.shuffle_random_seed )
00327         except ValueError:
00328             result = False
00329         return result
00330 
00331     # This will store target / toolchain specific properties
00332     test_suite_properties_ext = {}  # target : toolchain
00333     # Here we store test results
00334     test_summary = []
00335     # Here we store test results in extended data structure
00336     test_summary_ext = {}
00337     execute_thread_slice_lock = Lock()
00338 
00339     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00340         for toolchain in toolchains:
00341             tt_id = "%s::%s" % (toolchain, target)
00342 
00343             T = TARGET_MAP[target]
00344 
00345             # print target, toolchain
00346             # Test suite properties returned to external tools like CI
00347             test_suite_properties = {
00348                 'jobs': self.opts_jobs ,
00349                 'clean': clean,
00350                 'target': target,
00351                 'vendor': T.extra_labels[0],
00352                 'test_ids': ', '.join(test_ids),
00353                 'toolchain': toolchain,
00354                 'shuffle_random_seed': self.shuffle_random_seed 
00355             }
00356 
00357 
00358             # print '=== %s::%s ===' % (target, toolchain)
00359             # Let's build our test
00360             if target not in TARGET_MAP:
00361                 print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
00362                 continue
00363 
00364             clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk  or clean or self.opts_clean  else None
00365 
00366             profile = extract_profile(self.opts_parser , self.opts , toolchain)
00367 
00368 
00369             try:
00370                 build_mbed_libs_result = build_mbed_libs(T,
00371                                                          toolchain,
00372                                                          clean=clean_mbed_libs_options,
00373                                                          verbose=self.opts_verbose ,
00374                                                          jobs=self.opts_jobs ,
00375                                                          report=build_report,
00376                                                          properties=build_properties,
00377                                                          build_profile=profile)
00378 
00379                 if not build_mbed_libs_result:
00380                     print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
00381                     continue
00382 
00383             except ToolException:
00384                 print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
00385                 continue
00386 
00387             build_dir = join(BUILD_DIR, "test", target, toolchain)
00388 
00389             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00390             test_suite_properties['build_dir'] = build_dir
00391             test_suite_properties['skipped'] = []
00392 
00393             # Enumerate through all tests and shuffle test order if requested
00394             test_map_keys = sorted(TEST_MAP.keys())
00395 
00396             if self.opts_shuffle_test_order :
00397                 random.shuffle(test_map_keys, self.shuffle_random_func )
00398                 # Update database with shuffle seed f applicable
00399                 if self.db_logger :
00400                     self.db_logger .reconnect();
00401                     if self.db_logger .is_connected():
00402                         self.db_logger .update_build_id_info(self.db_logger_build_id , _shuffle_seed=self.shuffle_random_func ())
00403                         self.db_logger .disconnect();
00404 
00405             if self.db_logger :
00406                 self.db_logger .reconnect();
00407                 if self.db_logger .is_connected():
00408                     # Update MUTs and Test Specification in database
00409                     self.db_logger .update_build_id_info(self.db_logger_build_id , _muts=self.muts , _test_spec=self.test_spec )
00410                     # Update Extra information in database (some options passed to test suite)
00411                     self.db_logger .update_build_id_info(self.db_logger_build_id , _extra=json.dumps(self.dump_options ()))
00412                     self.db_logger .disconnect();
00413 
00414             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00415             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00416 
00417             for skipped_test_id in skipped_test_map_keys:
00418                 test_suite_properties['skipped'].append(skipped_test_id)
00419 
00420 
00421             # First pass through all tests and determine which libraries need to be built
00422             libraries = []
00423             for test_id in valid_test_map_keys:
00424                 test = TEST_MAP[test_id]
00425 
00426                 # Detect which lib should be added to test
00427                 # Some libs have to compiled like RTOS or ETH
00428                 for lib in LIBRARIES:
00429                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00430                         libraries.append(lib['id'])
00431 
00432 
00433             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00434 
00435             # Build all required libraries
00436             for lib_id in libraries:
00437                 try:
00438                     build_lib(lib_id,
00439                               T,
00440                               toolchain,
00441                               verbose=self.opts_verbose ,
00442                               clean=clean_mbed_libs_options,
00443                               jobs=self.opts_jobs ,
00444                               report=build_report,
00445                               properties=build_properties,
00446                               build_profile=profile)
00447 
00448                 except ToolException:
00449                     print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building library %s'% (lib_id))
00450                     continue
00451 
00452 
00453             for test_id in valid_test_map_keys:
00454                 test = TEST_MAP[test_id]
00455 
00456                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00457 
00458                 # TODO: move this 2 below loops to separate function
00459                 INC_DIRS = []
00460                 for lib_id in libraries:
00461                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00462                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00463 
00464                 MACROS = []
00465                 for lib_id in libraries:
00466                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00467                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00468                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00469                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00470                 test_uuid = uuid.uuid4()
00471                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00472 
00473                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00474                 if target not in self.test_summary_ext :
00475                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00476                 if toolchain not in self.test_summary_ext [target]:
00477                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00478 
00479                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00480 
00481                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00482                 try:
00483                     path = build_project(test.source_dir,
00484                                      join(build_dir, test_id),
00485                                      T,
00486                                      toolchain,
00487                                      test.dependencies,
00488                                      clean=clean_project_options,
00489                                      verbose=self.opts_verbose ,
00490                                      name=project_name,
00491                                      macros=MACROS,
00492                                      inc_dirs=INC_DIRS,
00493                                      jobs=self.opts_jobs ,
00494                                      report=build_report,
00495                                      properties=build_properties,
00496                                      project_id=test_id,
00497                                      project_description=test.get_description(),
00498                                      build_profile=profile)
00499 
00500                 except Exception, e:
00501                     project_name_str = project_name if project_name is not None else test_id
00502 
00503 
00504                     test_result = self.TEST_RESULT_FAIL 
00505 
00506                     if isinstance(e, ToolException):
00507                         print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
00508                         test_result = self.TEST_RESULT_BUILD_FAILED 
00509                     elif isinstance(e, NotSupportedException):
00510                         print self.logger .log_line(self.logger .LogType.INFO, 'The project %s is not supported'% (project_name_str))
00511                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00512 
00513 
00514                     # Append test results to global test summary
00515                     self.test_summary .append(
00516                         (test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
00517                     )
00518 
00519                     # Add detailed test result to test summary structure
00520                     if test_id not in self.test_summary_ext [target][toolchain]:
00521                         self.test_summary_ext [target][toolchain][test_id] = []
00522 
00523                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00524                         'result' : test_result,
00525                         'output' : '',
00526                         'target_name' : target,
00527                         'target_name_unique': target,
00528                         'toolchain_name' : toolchain,
00529                         'id' : test_id,
00530                         'description' : test.get_description(),
00531                         'elapsed_time' : 0,
00532                         'duration' : 0,
00533                         'copy_method' : None
00534                     }})
00535                     continue
00536 
00537                 if self.opts_only_build_tests :
00538                     # With this option we are skipping testing phase
00539                     continue
00540 
00541                 # Test duration can be increased by global value
00542                 test_duration = test.duration
00543                 if self.opts_extend_test_timeout  is not None:
00544                     test_duration += self.opts_extend_test_timeout 
00545 
00546                 # For an automated test the duration act as a timeout after
00547                 # which the test gets interrupted
00548                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00549                 test_loops = self.get_test_loop_count (test_id)
00550 
00551                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00552                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00553                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00554 
00555                 # read MUTs, test specification and perform tests
00556                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00557 
00558                 if handle_results is None:
00559                     continue
00560 
00561                 for handle_result in handle_results:
00562                     if handle_result:
00563                         single_test_result, detailed_test_results = handle_result
00564                     else:
00565                         continue
00566 
00567                     # Append test results to global test summary
00568                     if single_test_result is not None:
00569                         self.test_summary .append(single_test_result)
00570 
00571                     # Add detailed test result to test summary structure
00572                     if target not in self.test_summary_ext [target][toolchain]:
00573                         if test_id not in self.test_summary_ext [target][toolchain]:
00574                             self.test_summary_ext [target][toolchain][test_id] = []
00575 
00576                         append_test_result = detailed_test_results
00577 
00578                         # If waterfall and consolidate-waterfall options are enabled,
00579                         # only include the last test result in the report.
00580                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00581                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00582 
00583                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00584 
00585             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00586             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00587 
00588         q.put(target + '_'.join(toolchains))
00589         return
00590 
00591     def execute(self):
00592         clean = self.test_spec .get('clean', False)
00593         test_ids = self.test_spec .get('test_ids', [])
00594         q = Queue()
00595 
00596         # Generate seed for shuffle if seed is not provided in
00597         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00598         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00599             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00600 
00601 
00602         if self.opts_parallel_test_exec :
00603             ###################################################################
00604             # Experimental, parallel test execution per singletest instance.
00605             ###################################################################
00606             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00607             # Note: We are building here in parallel for each target separately!
00608             # So we are not building the same thing multiple times and compilers
00609             # in separate threads do not collide.
00610             # Inside execute_thread_slice() function function handle() will be called to
00611             # get information about available MUTs (per target).
00612             for target, toolchains in self.test_spec ['targets'].iteritems():
00613                 self.test_suite_properties_ext [target] = {}
00614                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00615                 t.daemon = True
00616                 t.start()
00617                 execute_threads.append(t)
00618 
00619             for t in execute_threads:
00620                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00621         else:
00622             # Serialized (not parallel) test execution
00623             for target, toolchains in self.test_spec ['targets'].iteritems():
00624                 if target not in self.test_suite_properties_ext :
00625                     self.test_suite_properties_ext [target] = {}
00626 
00627                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00628                 q.get()
00629 
00630         if self.db_logger :
00631             self.db_logger .reconnect();
00632             if self.db_logger .is_connected():
00633                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00634                 self.db_logger .disconnect();
00635 
00636         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00637 
00638     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00639         valid_test_map_keys = []
00640 
00641         for test_id in test_map_keys:
00642             test = TEST_MAP[test_id]
00643             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00644                 continue
00645 
00646             if test_ids and test_id not in test_ids:
00647                 continue
00648 
00649             if self.opts_test_only_peripheral  and not test.peripherals:
00650                 if self.opts_verbose_skipped_tests :
00651                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00652                 continue
00653 
00654             if self.opts_peripheral_by_names  and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names ]):
00655                 # We will skip tests not forced with -p option
00656                 if self.opts_verbose_skipped_tests :
00657                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00658                 continue
00659 
00660             if self.opts_test_only_common  and test.peripherals:
00661                 if self.opts_verbose_skipped_tests :
00662                     print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral test skipped for target %s'% (target))
00663                 continue
00664 
00665             if not include_non_automated and not test.automated:
00666                 if self.opts_verbose_skipped_tests :
00667                     print self.logger .log_line(self.logger .LogType.INFO, 'Non automated test skipped for target %s'% (target))
00668                 continue
00669 
00670             if test.is_supported(target, toolchain):
00671                 if test.peripherals is None and self.opts_only_build_tests :
00672                     # When users are using 'build only flag' and test do not have
00673                     # specified peripherals we can allow test building by default
00674                     pass
00675                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00676                     # If we force peripheral with option -p we expect test
00677                     # to pass even if peripheral is not in MUTs file.
00678                     pass
00679                 elif not self.is_peripherals_available (target, test.peripherals):
00680                     if self.opts_verbose_skipped_tests :
00681                         if test.peripherals:
00682                             print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
00683                         else:
00684                             print self.logger .log_line(self.logger .LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
00685                     continue
00686 
00687                 # The test has made it through all the filters, so add it to the valid tests list
00688                 valid_test_map_keys.append(test_id)
00689 
00690         return valid_test_map_keys
00691 
00692     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00693         # NOTE: This will not preserve order
00694         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00695 
00696     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00697         """ Prints well-formed summary with results (SQL table like)
00698             table shows text x toolchain test result matrix
00699         """
00700         RESULT_INDEX = 0
00701         TARGET_INDEX = 1
00702         TOOLCHAIN_INDEX = 2
00703         TEST_INDEX = 3
00704         DESC_INDEX = 4
00705 
00706         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00707         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00708         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00709         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00710 
00711         result = "Test summary:\n"
00712         for target in unique_targets:
00713             result_dict = {} # test : { toolchain : result }
00714             unique_target_toolchains = []
00715             for test in test_summary:
00716                 if test[TARGET_INDEX] == target:
00717                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00718                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00719                     if test[TEST_INDEX] not in result_dict:
00720                         result_dict[test[TEST_INDEX]] = {}
00721                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00722 
00723             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00724             pt = PrettyTable(pt_cols)
00725             for col in pt_cols:
00726                 pt.align[col] = "l"
00727             pt.padding_width = 1 # One space between column edges and contents (default)
00728 
00729             for test in unique_tests:
00730                 if test in result_dict:
00731                     test_results = result_dict[test]
00732                     if test in unique_test_desc:
00733                         row = [target, test, unique_test_desc[test]]
00734                         for toolchain in unique_toolchains:
00735                             if toolchain in test_results:
00736                                 row.append(test_results[toolchain])
00737                         pt.add_row(row)
00738             result += pt.get_string()
00739             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00740                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00741             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00742         return result
00743 
00744     def generate_test_summary (self, test_summary, shuffle_seed=None):
00745         """ Prints well-formed summary with results (SQL table like)
00746             table shows target x test results matrix across
00747         """
00748         success_code = 0    # Success code that can be leter returned to
00749         result = "Test summary:\n"
00750         # Pretty table package is used to print results
00751         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00752                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00753         pt.align["Result"] = "l" # Left align
00754         pt.align["Target"] = "l" # Left align
00755         pt.align["Toolchain"] = "l" # Left align
00756         pt.align["Test ID"] = "l" # Left align
00757         pt.align["Test Description"] = "l" # Left align
00758         pt.padding_width = 1 # One space between column edges and contents (default)
00759 
00760         result_dict = {self.TEST_RESULT_OK  : 0,
00761                        self.TEST_RESULT_FAIL  : 0,
00762                        self.TEST_RESULT_ERROR  : 0,
00763                        self.TEST_RESULT_UNDEF  : 0,
00764                        self.TEST_RESULT_IOERR_COPY  : 0,
00765                        self.TEST_RESULT_IOERR_DISK  : 0,
00766                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00767                        self.TEST_RESULT_NO_IMAGE  : 0,
00768                        self.TEST_RESULT_TIMEOUT  : 0,
00769                        self.TEST_RESULT_MBED_ASSERT  : 0,
00770                        self.TEST_RESULT_BUILD_FAILED  : 0,
00771                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00772         }
00773 
00774         for test in test_summary:
00775             if test[0] in result_dict:
00776                 result_dict[test[0]] += 1
00777             pt.add_row(test)
00778         result += pt.get_string()
00779         result += "\n"
00780 
00781         # Print result count
00782         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
00783         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00784                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00785         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00786         return result
00787 
00788     def test_loop_list_to_dict (self, test_loops_str):
00789         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00790         """
00791         result = {}
00792         if test_loops_str:
00793             test_loops = test_loops_str
00794             for test_loop in test_loops:
00795                 test_loop_count = test_loop.split('=')
00796                 if len(test_loop_count) == 2:
00797                     _test_id, _test_loops = test_loop_count
00798                     try:
00799                         _test_loops = int(_test_loops)
00800                     except:
00801                         continue
00802                     result[_test_id] = _test_loops
00803         return result
00804 
00805     def get_test_loop_count (self, test_id):
00806         """ This function returns no. of loops per test (deducted by test_id_.
00807             If test is not in list of redefined loop counts it will use default value.
00808         """
00809         result = self.GLOBAL_LOOPS_COUNT 
00810         if test_id in self.TEST_LOOPS_DICT :
00811             result = self.TEST_LOOPS_DICT [test_id]
00812         return result
00813 
00814     def delete_file (self, file_path):
00815         """ Remove file from the system
00816         """
00817         result = True
00818         resutl_msg = ""
00819         try:
00820             os.remove(file_path)
00821         except Exception, e:
00822             resutl_msg = e
00823             result = False
00824         return result, resutl_msg
00825 
00826     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00827         """ Test is being invoked for given MUT.
00828         """
00829         # Get test information, image and test timeout
00830         test_id = data['test_id']
00831         test = TEST_MAP[test_id]
00832         test_description = TEST_MAP[test_id].get_description()
00833         image = data["image"]
00834         duration = data.get("duration", 10)
00835 
00836         if mut is None:
00837             print "Error: No Mbed available: MUT[%s]" % data['mcu']
00838             return None
00839 
00840         mcu = mut['mcu']
00841         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00842 
00843         if self.db_logger :
00844             self.db_logger .reconnect()
00845 
00846         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00847 
00848         # Tests can be looped so test results must be stored for the same test
00849         test_all_result = []
00850         # Test results for one test ran few times
00851         detailed_test_results = {}  # { Loop_number: { results ... } }
00852 
00853         for test_index in range(test_loops):
00854 
00855             # If mbedls is available and we are auto detecting MUT info,
00856             # update MUT info (mounting may changed)
00857             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00858                 platform_name_filter = [mcu]
00859                 muts_list = {}
00860                 found = False
00861 
00862                 for i in range(0, 60):
00863                     print('Looking for %s with MBEDLS' % mcu)
00864                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00865 
00866                     if 1 not in muts_list:
00867                         sleep(3)
00868                     else:
00869                         found = True
00870                         break
00871 
00872                 if not found:
00873                     print "Error: mbed not found with MBEDLS: %s" % data['mcu']
00874                     return None
00875                 else:
00876                     mut = muts_list[1]
00877 
00878             disk = mut.get('disk')
00879             port = mut.get('port')
00880 
00881             if disk is None or port is None:
00882                 return None
00883 
00884             target_by_mcu = TARGET_MAP[mut['mcu']]
00885             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00886             # Some extra stuff can be declared in MUTs structure
00887             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00888             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00889 
00890             # When the build and test system were separate, this was relative to a
00891             # base network folder base path: join(NETWORK_BASE_PATH, )
00892             image_path = image
00893 
00894             # Host test execution
00895             start_host_exec_time = time()
00896 
00897             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00898             _copy_method = selected_copy_method
00899 
00900             if not exists(image_path):
00901                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00902                 elapsed_time = 0
00903                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00904                 print single_test_output
00905             else:
00906                 # Host test execution
00907                 start_host_exec_time = time()
00908 
00909                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00910                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00911                 host_test_result = self.run_host_test (test.host_test,
00912                                                       image_path, disk, port, duration,
00913                                                       micro=target_name,
00914                                                       verbose=host_test_verbose,
00915                                                       reset=host_test_reset,
00916                                                       reset_tout=reset_tout,
00917                                                       copy_method=selected_copy_method,
00918                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00919                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00920 
00921             # Store test result
00922             test_all_result.append(single_test_result)
00923             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00924             elapsed_time = single_testduration  # TIme of single test case execution after reset
00925 
00926             detailed_test_results[test_index] = {
00927                 'result' : single_test_result,
00928                 'output' : single_test_output,
00929                 'target_name' : target_name,
00930                 'target_name_unique' : target_name_unique,
00931                 'toolchain_name' : toolchain_name,
00932                 'id' : test_id,
00933                 'description' : test_description,
00934                 'elapsed_time' : round(elapsed_time, 2),
00935                 'duration' : single_timeout,
00936                 'copy_method' : _copy_method,
00937             }
00938 
00939             print self.print_test_result (single_test_result, target_name_unique, toolchain_name,
00940                                          test_id, test_description, elapsed_time, single_timeout)
00941 
00942             # Update database entries for ongoing test
00943             if self.db_logger  and self.db_logger .is_connected():
00944                 test_type = 'SingleTest'
00945                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00946                                                  target_name,
00947                                                  toolchain_name,
00948                                                  test_type,
00949                                                  test_id,
00950                                                  single_test_result,
00951                                                  single_test_output,
00952                                                  elapsed_time,
00953                                                  single_timeout,
00954                                                  test_index)
00955 
00956             # If we perform waterfall test we test until we get OK and we stop testing
00957             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
00958                 break
00959 
00960         if self.db_logger :
00961             self.db_logger .disconnect()
00962 
00963         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
00964                 target_name_unique,
00965                 toolchain_name,
00966                 test_id,
00967                 test_description,
00968                 round(elapsed_time, 2),
00969                 single_timeout,
00970                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
00971 
00972     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
00973         """ Function determines MUT's mbed disk/port and copies binary to
00974             target.
00975         """
00976         handle_results = []
00977         data = json.loads(test_spec)
00978 
00979         # Find a suitable MUT:
00980         mut = None
00981         for id, m in self.muts .iteritems():
00982             if m['mcu'] == data['mcu']:
00983                 mut = m
00984                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
00985                 handle_results.append(handle_result)
00986 
00987         return handle_results
00988 
00989     def print_test_result (self, test_result, target_name, toolchain_name,
00990                           test_id, test_description, elapsed_time, duration):
00991         """ Use specific convention to print test result and related data
00992         """
00993         tokens = []
00994         tokens.append("TargetTest")
00995         tokens.append(target_name)
00996         tokens.append(toolchain_name)
00997         tokens.append(test_id)
00998         tokens.append(test_description)
00999         separator = "::"
01000         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
01001         result = separator.join(tokens) + " [" + test_result +"]" + time_info
01002         return Fore.MAGENTA + result + Fore.RESET
01003 
01004     def shape_test_loop_ok_result_count (self, test_all_result):
01005         """ Reformats list of results to simple string
01006         """
01007         test_loop_count = len(test_all_result)
01008         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01009         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01010 
01011     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01012         """ Reformats list of results to simple string
01013         """
01014         result = self.TEST_RESULT_FAIL 
01015 
01016         if all(test_all_result[0] == res for res in test_all_result):
01017             result = test_all_result[0]
01018         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01019             result = self.TEST_RESULT_OK 
01020 
01021         return result
01022 
01023     def run_host_test (self, name, image_path, disk, port, duration,
01024                       micro=None, reset=None, reset_tout=None,
01025                       verbose=False, copy_method=None, program_cycle_s=None):
01026         """ Function creates new process with host test configured with particular test case.
01027             Function also is pooling for serial port activity from process to catch all data
01028             printed by test runner and host test during test execution
01029         """
01030 
01031         def get_char_from_queue(obs):
01032             """ Get character from queue safe way
01033             """
01034             try:
01035                 c = obs.queue.get(block=True, timeout=0.5)
01036             except Empty, _:
01037                 c = None
01038             return c
01039 
01040         def filter_queue_char(c):
01041             """ Filters out non ASCII characters from serial port
01042             """
01043             if ord(c) not in range(128):
01044                 c = ' '
01045             return c
01046 
01047         def get_test_result(output):
01048             """ Parse test 'output' data
01049             """
01050             result = self.TEST_RESULT_TIMEOUT 
01051             for line in "".join(output).splitlines():
01052                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01053                 if search_result and len(search_result.groups()):
01054                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01055                     break
01056             return result
01057 
01058         def get_auto_property_value(property_name, line):
01059             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01060                 Returns string
01061             """
01062             result = None
01063             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01064                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01065                 if property is not None and len(property.groups()) == 1:
01066                     result = property.groups()[0]
01067             return result
01068 
01069         # print "{%s} port:%s disk:%s"  % (name, port, disk),
01070         cmd = ["python",
01071                '%s.py'% name,
01072                '-d', disk,
01073                '-f', '"%s"'% image_path,
01074                '-p', port,
01075                '-t', str(duration),
01076                '-C', str(program_cycle_s)]
01077 
01078         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01079             cmd += ['--auto']
01080 
01081         # Add extra parameters to host_test
01082         if copy_method is not None:
01083             cmd += ["-c", copy_method]
01084         if micro is not None:
01085             cmd += ["-m", micro]
01086         if reset is not None:
01087             cmd += ["-r", reset]
01088         if reset_tout is not None:
01089             cmd += ["-R", str(reset_tout)]
01090 
01091         if verbose:
01092             print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
01093             print "Test::Output::Start"
01094 
01095         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01096         obs = ProcessObserver(proc)
01097         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01098         line = ''
01099         output = []
01100         start_time = time()
01101         while (time() - start_time) < (2 * duration):
01102             c = get_char_from_queue(obs)
01103             if c:
01104                 if verbose:
01105                     sys.stdout.write(c)
01106                 c = filter_queue_char(c)
01107                 output.append(c)
01108                 # Give the mbed under test a way to communicate the end of the test
01109                 if c in ['\n', '\r']:
01110 
01111                     # Checking for auto-detection information from the test about MUT reset moment
01112                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01113                         # We will update this marker only once to prevent multiple time resets
01114                         update_once_flag['reset_target'] = True
01115                         start_time = time()
01116 
01117                     # Checking for auto-detection information from the test about timeout
01118                     auto_timeout_val = get_auto_property_value('timeout', line)
01119                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01120                         # We will update this marker only once to prevent multiple time resets
01121                         update_once_flag['timeout'] = True
01122                         duration = int(auto_timeout_val)
01123 
01124                     # Detect mbed assert:
01125                     if 'mbed assertation failed: ' in line:
01126                         output.append('{{mbed_assert}}')
01127                         break
01128 
01129                     # Check for test end
01130                     if '{end}' in line:
01131                         break
01132                     line = ''
01133                 else:
01134                     line += c
01135         end_time = time()
01136         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01137 
01138         c = get_char_from_queue(obs)
01139 
01140         if c:
01141             if verbose:
01142                 sys.stdout.write(c)
01143             c = filter_queue_char(c)
01144             output.append(c)
01145 
01146         if verbose:
01147             print "Test::Output::Finish"
01148         # Stop test process
01149         obs.stop()
01150 
01151         result = get_test_result(output)
01152         return (result, "".join(output), testcase_duration, duration)
01153 
01154     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01155         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01156         """
01157         if peripherals is not None:
01158             peripherals = set(peripherals)
01159         for id, mut in self.muts .iteritems():
01160             # Target MCU name check
01161             if mut["mcu"] != target_mcu_name:
01162                 continue
01163             # Peripherals check
01164             if peripherals is not None:
01165                 if 'peripherals' not in mut:
01166                     continue
01167                 if not peripherals.issubset(set(mut['peripherals'])):
01168                     continue
01169             return True
01170         return False
01171 
01172     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01173         """ Function prepares JSON structure describing test specification
01174         """
01175         test_spec = {
01176             "mcu": mcu,
01177             "image": image_path,
01178             "duration": duration,
01179             "test_id": test_id,
01180         }
01181         return json.dumps(test_spec)
01182 
01183 
01184 def get_unique_value_from_summary (test_summary, index):
01185     """ Gets list of unique target names
01186     """
01187     result = []
01188     for test in test_summary:
01189         target_name = test[index]
01190         if target_name not in result:
01191             result.append(target_name)
01192     return sorted(result)
01193 
01194 
01195 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01196     """ Gets list of unique target names and return dictionary
01197     """
01198     result = {}
01199     for test in test_summary:
01200         key = test[index_key]
01201         val = test[index_val]
01202         if key not in result:
01203             result[key] = val
01204     return result
01205 
01206 
01207 def show_json_file_format_error (json_spec_filename, line, column):
01208     """ Prints JSON broken content
01209     """
01210     with open(json_spec_filename) as data_file:
01211         line_no = 1
01212         for json_line in data_file:
01213             if line_no + 5 >= line: # Print last few lines before error
01214                 print 'Line %d:\t'%line_no + json_line, # Prints line
01215             if line_no == line:
01216                 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
01217                 break
01218             line_no += 1
01219 
01220 
01221 def json_format_error_defect_pos (json_error_msg):
01222     """ Gets first error line and column in JSON file format.
01223         Parsed from exception thrown by json.loads() string
01224     """
01225     result = None
01226     line, column = 0, 0
01227     # Line value search
01228     line_search = re.search('line [0-9]+', json_error_msg)
01229     if line_search is not None:
01230         ls = line_search.group().split(' ')
01231         if len(ls) == 2:
01232             line = int(ls[1])
01233             # Column position search
01234             column_search = re.search('column [0-9]+', json_error_msg)
01235             if column_search is not None:
01236                 cs = column_search.group().split(' ')
01237                 if len(cs) == 2:
01238                     column = int(cs[1])
01239                     result = [line, column]
01240     return result
01241 
01242 
01243 def get_json_data_from_file (json_spec_filename, verbose=False):
01244     """ Loads from file JSON formatted string to data structure
01245     """
01246     result = None
01247     try:
01248         with open(json_spec_filename) as data_file:
01249             try:
01250                 result = json.load(data_file)
01251             except ValueError as json_error_msg:
01252                 result = None
01253                 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
01254                 # We can print where error occurred inside JSON file if we can parse exception msg
01255                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01256                 if json_format_defect_pos is not None:
01257                     line = json_format_defect_pos[0]
01258                     column = json_format_defect_pos[1]
01259                     print
01260                     show_json_file_format_error(json_spec_filename, line, column)
01261 
01262     except IOError as fileopen_error_msg:
01263         print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
01264         print
01265     if verbose and result:
01266         pp = pprint.PrettyPrinter(indent=4)
01267         pp.pprint(result)
01268     return result
01269 
01270 
01271 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01272     """ Prints MUTs configuration passed to test script for verboseness
01273     """
01274     muts_info_cols = []
01275     # We need to check all unique properties for each defined MUT
01276     for k in json_data:
01277         mut_info = json_data[k]
01278         for mut_property in mut_info:
01279             if mut_property not in muts_info_cols:
01280                 muts_info_cols.append(mut_property)
01281 
01282     # Prepare pretty table object to display all MUTs
01283     pt_cols = ["index"] + muts_info_cols
01284     pt = PrettyTable(pt_cols)
01285     for col in pt_cols:
01286         pt.align[col] = "l"
01287 
01288     # Add rows to pretty print object
01289     for k in json_data:
01290         row = [k]
01291         mut_info = json_data[k]
01292 
01293         add_row = True
01294         if platform_filter and 'mcu' in mut_info:
01295             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01296         if add_row:
01297             for col in muts_info_cols:
01298                 cell_val = mut_info[col] if col in mut_info else None
01299                 if type(cell_val) == ListType:
01300                     cell_val = join_delim.join(cell_val)
01301                 row.append(cell_val)
01302             pt.add_row(row)
01303     return pt.get_string()
01304 
01305 
01306 def print_test_configuration_from_json (json_data, join_delim=", "):
01307     """ Prints test specification configuration passed to test script for verboseness
01308     """
01309     toolchains_info_cols = []
01310     # We need to check all toolchains for each device
01311     for k in json_data:
01312         # k should be 'targets'
01313         targets = json_data[k]
01314         for target in targets:
01315             toolchains = targets[target]
01316             for toolchain in toolchains:
01317                 if toolchain not in toolchains_info_cols:
01318                     toolchains_info_cols.append(toolchain)
01319 
01320     # Prepare pretty table object to display test specification
01321     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01322     pt = PrettyTable(pt_cols)
01323     for col in pt_cols:
01324         pt.align[col] = "l"
01325 
01326     # { target : [conflicted toolchains] }
01327     toolchain_conflicts = {}
01328     toolchain_path_conflicts = []
01329     for k in json_data:
01330         # k should be 'targets'
01331         targets = json_data[k]
01332         for target in targets:
01333             target_supported_toolchains = get_target_supported_toolchains(target)
01334             if not target_supported_toolchains:
01335                 target_supported_toolchains = []
01336             target_name = target if target in TARGET_MAP else "%s*"% target
01337             row = [target_name]
01338             toolchains = targets[target]
01339 
01340             for toolchain in sorted(toolchains_info_cols):
01341                 # Check for conflicts: target vs toolchain
01342                 conflict = False
01343                 conflict_path = False
01344                 if toolchain in toolchains:
01345                     if toolchain not in target_supported_toolchains:
01346                         conflict = True
01347                         if target not in toolchain_conflicts:
01348                             toolchain_conflicts[target] = []
01349                         toolchain_conflicts[target].append(toolchain)
01350                 # Add marker inside table about target usage / conflict
01351                 cell_val = 'Yes' if toolchain in toolchains else '-'
01352                 if conflict:
01353                     cell_val += '*'
01354                 # Check for conflicts: toolchain vs toolchain path
01355                 if toolchain in TOOLCHAIN_PATHS:
01356                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01357                     if not os.path.isdir(toolchain_path):
01358                         conflict_path = True
01359                         if toolchain not in toolchain_path_conflicts:
01360                             toolchain_path_conflicts.append(toolchain)
01361                 if conflict_path:
01362                     cell_val += '#'
01363                 row.append(cell_val)
01364             pt.add_row(row)
01365 
01366     # generate result string
01367     result = pt.get_string()    # Test specification table
01368     if toolchain_conflicts or toolchain_path_conflicts:
01369         result += "\n"
01370         result += "Toolchain conflicts:\n"
01371         for target in toolchain_conflicts:
01372             if target not in TARGET_MAP:
01373                 result += "\t* Target %s unknown\n"% (target)
01374             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01375             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01376             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01377 
01378         for toolchain in toolchain_path_conflicts:
01379         # Let's check toolchain configuration
01380             if toolchain in TOOLCHAIN_PATHS:
01381                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01382                 if not os.path.isdir(toolchain_path):
01383                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01384     return result
01385 
01386 
01387 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01388     """ Generates table summary with all test cases and additional test cases
01389         information using pretty print functionality. Allows test suite user to
01390         see test cases
01391     """
01392     # get all unique test ID prefixes
01393     unique_test_id = []
01394     for test in TESTS:
01395         split = test['id'].split('_')[:-1]
01396         test_id_prefix = '_'.join(split)
01397         if test_id_prefix not in unique_test_id:
01398             unique_test_id.append(test_id_prefix)
01399     unique_test_id.sort()
01400     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01401     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01402 
01403     test_properties = ['id',
01404                        'automated',
01405                        'description',
01406                        'peripherals',
01407                        'host_test',
01408                        'duration'] if cols is None else cols
01409 
01410     # All tests status table print
01411     pt = PrettyTable(test_properties)
01412     for col in test_properties:
01413         pt.align[col] = "l"
01414     pt.align['duration'] = "r"
01415 
01416     counter_all = 0
01417     counter_automated = 0
01418     pt.padding_width = 1 # One space between column edges and contents (default)
01419 
01420     for test_id in sorted(TEST_MAP.keys()):
01421         if platform_filter is not None:
01422             # FIlter out platforms using regex
01423             if re.search(platform_filter, test_id) is None:
01424                 continue
01425         row = []
01426         test = TEST_MAP[test_id]
01427         split = test_id.split('_')[:-1]
01428         test_id_prefix = '_'.join(split)
01429 
01430         for col in test_properties:
01431             col_value = test[col]
01432             if type(test[col]) == ListType:
01433                 col_value = join_delim.join(test[col])
01434             elif test[col] == None:
01435                 col_value = "-"
01436 
01437             row.append(col_value)
01438         if test['automated'] == True:
01439             counter_dict_test_id_types[test_id_prefix] += 1
01440             counter_automated += 1
01441         pt.add_row(row)
01442         # Update counters
01443         counter_all += 1
01444         counter_dict_test_id_types_all[test_id_prefix] += 1
01445     result = pt.get_string()
01446     result += "\n\n"
01447 
01448     if result_summary and not platform_filter:
01449         # Automation result summary
01450         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01451         pt = PrettyTable(test_id_cols)
01452         pt.align['automated'] = "r"
01453         pt.align['all'] = "r"
01454         pt.align['percent [%]'] = "r"
01455 
01456         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01457         str_progress = progress_bar(percent_progress, 75)
01458         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01459         result += "Automation coverage:\n"
01460         result += pt.get_string()
01461         result += "\n\n"
01462 
01463         # Test automation coverage table print
01464         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01465         pt = PrettyTable(test_id_cols)
01466         pt.align['id'] = "l"
01467         pt.align['automated'] = "r"
01468         pt.align['all'] = "r"
01469         pt.align['percent [%]'] = "r"
01470         for unique_id in unique_test_id:
01471             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01472             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01473             str_progress = progress_bar(percent_progress, 75)
01474             row = [unique_id,
01475                    counter_dict_test_id_types[unique_id],
01476                    counter_dict_test_id_types_all[unique_id],
01477                    percent_progress,
01478                    "[" + str_progress + "]"]
01479             pt.add_row(row)
01480         result += "Test automation coverage:\n"
01481         result += pt.get_string()
01482         result += "\n\n"
01483     return result
01484 
01485 
01486 def progress_bar (percent_progress, saturation=0):
01487     """ This function creates progress bar with optional simple saturation mark
01488     """
01489     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01490     str_progress = '#' * step + '.' * int(50 - step)
01491     c = '!' if str_progress[38] == '.' else '|'
01492     if saturation > 0:
01493         saturation = saturation / 2
01494         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01495     return str_progress
01496 
01497 
01498 def singletest_in_cli_mode (single_test):
01499     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01500 
01501         @return returns success code (0 == success) for building and running tests
01502     """
01503     start = time()
01504     # Execute tests depending on options and filter applied
01505     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01506     elapsed_time = time() - start
01507 
01508     # Human readable summary
01509     if not single_test.opts_suppress_summary:
01510         # prints well-formed summary with results (SQL table like)
01511         print single_test.generate_test_summary(test_summary, shuffle_seed)
01512     if single_test.opts_test_x_toolchain_summary:
01513         # prints well-formed summary with results (SQL table like)
01514         # table shows text x toolchain test result matrix
01515         print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
01516 
01517     print "Completed in %.2f sec"% (elapsed_time)
01518     print
01519     # Write summary of the builds
01520 
01521     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01522     status = print_report_exporter.report(build_report)
01523 
01524     # Store extra reports in files
01525     if single_test.opts_report_html_file_name:
01526         # Export results in form of HTML report to separate file
01527         report_exporter = ReportExporter(ResultExporterType.HTML)
01528         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01529     if single_test.opts_report_junit_file_name:
01530         # Export results in form of JUnit XML report to separate file
01531         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01532         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01533     if single_test.opts_report_text_file_name:
01534         # Export results in form of a text file
01535         report_exporter = ReportExporter(ResultExporterType.TEXT)
01536         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01537     if single_test.opts_report_build_file_name:
01538         # Export build results as html report to sparate file
01539         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01540         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01541 
01542     # Returns True if no build failures of the test projects or their dependencies
01543     return status
01544 
01545 class TestLogger ():
01546     """ Super-class for logging and printing ongoing events for test suite pass
01547     """
01548     def __init__ (self, store_log=True):
01549         """ We can control if logger actually stores log in memory
01550             or just handled all log entries immediately
01551         """
01552         self.log  = []
01553         self.log_to_file  = False
01554         self.log_file_name  = None
01555         self.store_log  = store_log
01556 
01557         self.LogType  = construct_enum(INFO='Info',
01558                                       WARN='Warning',
01559                                       NOTIF='Notification',
01560                                       ERROR='Error',
01561                                       EXCEPT='Exception')
01562 
01563         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01564                                             APPEND=2)    # Append to existing log file
01565 
01566     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01567         """ Log one line of text
01568         """
01569         log_timestamp = time()
01570         log_entry = {'log_type' : LogType,
01571                      'log_timestamp' : log_timestamp,
01572                      'log_line' : log_line,
01573                      '_future' : None
01574         }
01575         # Store log in memory
01576         if self.store_log :
01577             self.log .append(log_entry)
01578         return log_entry
01579 
01580 
01581 class CLITestLogger (TestLogger ):
01582     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01583     """
01584     def __init__(self, store_log=True, file_name=None):
01585         TestLogger.__init__(self)
01586         self.log_file_name  = file_name
01587         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01588         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01589 
01590     def log_print (self, log_entry, timestamp=True):
01591         """ Prints on screen formatted log entry
01592         """
01593         ts = log_entry['log_timestamp']
01594         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01595         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01596         return timestamp_str + log_line_str
01597 
01598     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01599         """ Logs line, if log file output was specified log line will be appended
01600             at the end of log file
01601         """
01602         log_entry = TestLogger.log_line(self, LogType, log_line)
01603         log_line_str = self.log_print (log_entry, timestamp)
01604         if self.log_file_name  is not None:
01605             try:
01606                 with open(self.log_file_name , 'a') as f:
01607                     f.write(log_line_str + line_delim)
01608             except IOError:
01609                 pass
01610         return log_line_str
01611 
01612 
01613 def factory_db_logger (db_url):
01614     """ Factory database driver depending on database type supplied in database connection string db_url
01615     """
01616     if db_url is not None:
01617         from tools.test_mysql import MySQLDBAccess
01618         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01619         if connection_info is not None:
01620             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01621             if db_type == 'mysql':
01622                 return MySQLDBAccess()
01623     return None
01624 
01625 
01626 def detect_database_verbose (db_url):
01627     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01628     """
01629     result = BaseDBAccess().parse_db_connection_string(db_url)
01630     if result is not None:
01631         # Parsing passed
01632         (db_type, username, password, host, db_name) = result
01633         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01634         # Let's try to connect
01635         db_ = factory_db_logger(db_url)
01636         if db_ is not None:
01637             print "Connecting to database '%s'..."% db_url,
01638             db_.connect(host, username, password, db_name)
01639             if db_.is_connected():
01640                 print "ok"
01641                 print "Detecting database..."
01642                 print db_.detect_database(verbose=True)
01643                 print "Disconnecting...",
01644                 db_.disconnect()
01645                 print "done"
01646         else:
01647             print "Database type '%s' unknown"% db_type
01648     else:
01649         print "Parse error: '%s' - DB Url error"% (db_url)
01650 
01651 
01652 def get_module_avail (module_name):
01653     """ This function returns True if module_name is already impored module
01654     """
01655     return module_name in sys.modules.keys()
01656 
01657 
01658 def get_autodetected_MUTS_list(platform_name_filter=None):
01659     oldError = None
01660     if os.name == 'nt':
01661         # Disable Windows error box temporarily
01662         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01663 
01664     mbeds = mbed_lstools.create()
01665     detect_muts_list = mbeds.list_mbeds()
01666 
01667     if os.name == 'nt':
01668         ctypes.windll.kernel32.SetErrorMode(oldError)
01669 
01670     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01671 
01672 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01673     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01674         If function fails to auto-detect devices it will return empty dictionary.
01675 
01676         if get_module_avail('mbed_lstools'):
01677             mbeds = mbed_lstools.create()
01678             mbeds_list = mbeds.list_mbeds()
01679 
01680         @param mbeds_list list of mbeds captured from mbed_lstools
01681         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01682     """
01683     result = {}   # Should be in muts_all.json format
01684     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01685     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01686     index = 1
01687     for mut in mbeds_list:
01688         # Filter the MUTS if a filter is specified
01689 
01690         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01691             continue
01692 
01693         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01694         # if not we  are creating our own unique value (last few chars from platform's target_id).
01695         m = {'mcu': mut['platform_name'],
01696              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01697              'port': mut['serial_port'],
01698              'disk': mut['mount_point'],
01699              'peripherals': []     # No peripheral detection
01700              }
01701         if index not in result:
01702             result[index] = {}
01703         result[index] = m
01704         index += 1
01705     return result
01706 
01707 
01708 def get_autodetected_TEST_SPEC (mbeds_list,
01709                                use_default_toolchain=True,
01710                                use_supported_toolchains=False,
01711                                toolchain_filter=None,
01712                                platform_name_filter=None):
01713     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01714         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01715 
01716         use_default_toolchain - if True add default toolchain to test_spec
01717         use_supported_toolchains - if True add all supported toolchains to test_spec
01718         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01719     """
01720     result = {'targets': {} }
01721 
01722     for mut in mbeds_list:
01723         mcu = mut['mcu']
01724         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01725             if mcu in TARGET_MAP:
01726                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01727                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01728 
01729                 # Decide which toolchains should be added to test specification toolchain pool for each target
01730                 toolchains = []
01731                 if use_default_toolchain:
01732                     toolchains.append(default_toolchain)
01733                 if use_supported_toolchains:
01734                     toolchains += supported_toolchains
01735                 if toolchain_filter is not None:
01736                     all_toolchains = supported_toolchains + [default_toolchain]
01737                     for toolchain in toolchain_filter:
01738                         if toolchain in all_toolchains:
01739                             toolchains.append(toolchain)
01740 
01741                 result['targets'][mcu] = list(set(toolchains))
01742     return result
01743 
01744 
01745 def get_default_test_options_parser ():
01746     """ Get common test script options used by CLI, web services etc.
01747     """
01748     parser = argparse.ArgumentParser()
01749     parser.add_argument('-i', '--tests',
01750                         dest='test_spec_filename',
01751                         metavar="FILE",
01752                         type=argparse_filestring_type,
01753                         help='Points to file with test specification')
01754 
01755     parser.add_argument('-M', '--MUTS',
01756                         dest='muts_spec_filename',
01757                         metavar="FILE",
01758                         type=argparse_filestring_type,
01759                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01760 
01761     parser.add_argument("-j", "--jobs",
01762                         dest='jobs',
01763                         metavar="NUMBER",
01764                         type=int,
01765                         help="Define number of compilation jobs. Default value is 1")
01766 
01767     if get_module_avail('mbed_lstools'):
01768         # Additional features available when mbed_lstools is installed on host and imported
01769         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01770         parser.add_argument('--auto',
01771                             dest='auto_detect',
01772                             action="store_true",
01773                             help='Use mbed-ls module to detect all connected mbed devices')
01774 
01775         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01776         parser.add_argument('--tc',
01777                             dest='toolchains_filter',
01778                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01779                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01780 
01781         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01782         parser.add_argument('--oper',
01783                             dest='operability_checks',
01784                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01785                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01786 
01787     parser.add_argument('--clean',
01788                         dest='clean',
01789                         action="store_true",
01790                         help='Clean the build directory')
01791 
01792     parser.add_argument('-P', '--only-peripherals',
01793                         dest='test_only_peripheral',
01794                         default=False,
01795                         action="store_true",
01796                         help='Test only peripheral declared for MUT and skip common tests')
01797 
01798     parser.add_argument("--profile", dest="profile", action="append",
01799                         type=argparse_filestring_type,
01800                         default=[])
01801 
01802     parser.add_argument('-C', '--only-commons',
01803                         dest='test_only_common',
01804                         default=False,
01805                         action="store_true",
01806                         help='Test only board internals. Skip perpherials tests and perform common tests')
01807 
01808     parser.add_argument('-n', '--test-by-names',
01809                         dest='test_by_names',
01810                         type=argparse_many(str),
01811                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01812 
01813     parser.add_argument('-p', '--peripheral-by-names',
01814                       dest='peripheral_by_names',
01815                       type=argparse_many(str),
01816                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01817 
01818     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01819     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01820 
01821     parser.add_argument('-c', '--copy-method',
01822                         dest='copy_method',
01823                         type=argparse_uppercase_type(copy_methods, "flash method"),
01824                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01825 
01826     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01827     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01828 
01829     parser.add_argument('-r', '--reset-type',
01830                         dest='mut_reset_type',
01831                         default=None,
01832                         type=argparse_uppercase_type(reset_methods, "reset method"),
01833                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01834 
01835     parser.add_argument('-g', '--goanna-for-tests',
01836                         dest='goanna_for_tests',
01837                         action="store_true",
01838                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01839 
01840     parser.add_argument('-G', '--goanna-for-sdk',
01841                         dest='goanna_for_mbed_sdk',
01842                         action="store_true",
01843                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01844 
01845     parser.add_argument('-s', '--suppress-summary',
01846                         dest='suppress_summary',
01847                         default=False,
01848                         action="store_true",
01849                         help='Suppresses display of wellformatted table with test results')
01850 
01851     parser.add_argument('-t', '--test-summary',
01852                         dest='test_x_toolchain_summary',
01853                         default=False,
01854                         action="store_true",
01855                         help='Displays wellformatted table with test x toolchain test result per target')
01856 
01857     parser.add_argument('-A', '--test-automation-report',
01858                         dest='test_automation_report',
01859                         default=False,
01860                         action="store_true",
01861                         help='Prints information about all tests and exits')
01862 
01863     parser.add_argument('-R', '--test-case-report',
01864                         dest='test_case_report',
01865                         default=False,
01866                         action="store_true",
01867                         help='Prints information about all test cases and exits')
01868 
01869     parser.add_argument("-S", "--supported-toolchains",
01870                         action="store_true",
01871                         dest="supported_toolchains",
01872                         default=False,
01873                         help="Displays supported matrix of MCUs and toolchains")
01874 
01875     parser.add_argument("-O", "--only-build",
01876                         action="store_true",
01877                         dest="only_build_tests",
01878                         default=False,
01879                         help="Only build tests, skips actual test procedures (flashing etc.)")
01880 
01881     parser.add_argument('--parallel',
01882                         dest='parallel_test_exec',
01883                         default=False,
01884                         action="store_true",
01885                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01886 
01887     parser.add_argument('--config',
01888                         dest='verbose_test_configuration_only',
01889                         default=False,
01890                         action="store_true",
01891                         help='Displays full test specification and MUTs configration and exits')
01892 
01893     parser.add_argument('--loops',
01894                         dest='test_loops_list',
01895                         type=argparse_many(str),
01896                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01897 
01898     parser.add_argument('--global-loops',
01899                         dest='test_global_loops_value',
01900                         type=int,
01901                         help='Set global number of test loops per test. Default value is set 1')
01902 
01903     parser.add_argument('--consolidate-waterfall',
01904                         dest='consolidate_waterfall_test',
01905                         default=False,
01906                         action="store_true",
01907                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01908 
01909     parser.add_argument('-W', '--waterfall',
01910                         dest='waterfall_test',
01911                         default=False,
01912                         action="store_true",
01913                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01914 
01915     parser.add_argument('-N', '--firmware-name',
01916                         dest='firmware_global_name',
01917                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01918 
01919     parser.add_argument('-u', '--shuffle',
01920                         dest='shuffle_test_order',
01921                         default=False,
01922                         action="store_true",
01923                         help='Shuffles test execution order')
01924 
01925     parser.add_argument('--shuffle-seed',
01926                         dest='shuffle_test_seed',
01927                         default=None,
01928                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01929 
01930     parser.add_argument('-f', '--filter',
01931                         dest='general_filter_regex',
01932                         type=argparse_many(str),
01933                         default=None,
01934                         help='For some commands you can use filter to filter out results')
01935 
01936     parser.add_argument('--inc-timeout',
01937                         dest='extend_test_timeout',
01938                         metavar="NUMBER",
01939                         type=int,
01940                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01941 
01942     parser.add_argument('--db',
01943                         dest='db_url',
01944                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01945 
01946     parser.add_argument('-l', '--log',
01947                         dest='log_file_name',
01948                         help='Log events to external file (note not all console entries may be visible in log file)')
01949 
01950     parser.add_argument('--report-html',
01951                         dest='report_html_file_name',
01952                         help='You can log test suite results in form of HTML report')
01953 
01954     parser.add_argument('--report-junit',
01955                         dest='report_junit_file_name',
01956                         help='You can log test suite results in form of JUnit compliant XML report')
01957 
01958     parser.add_argument("--report-build",
01959                         dest="report_build_file_name",
01960                         help="Output the build results to a junit xml file")
01961 
01962     parser.add_argument("--report-text",
01963                         dest="report_text_file_name",
01964                         help="Output the build results to a text file")
01965 
01966     parser.add_argument('--verbose-skipped',
01967                         dest='verbose_skipped_tests',
01968                         default=False,
01969                         action="store_true",
01970                         help='Prints some extra information about skipped tests')
01971 
01972     parser.add_argument('-V', '--verbose-test-result',
01973                         dest='verbose_test_result_only',
01974                         default=False,
01975                         action="store_true",
01976                         help='Prints test serial output')
01977 
01978     parser.add_argument('-v', '--verbose',
01979                         dest='verbose',
01980                         default=False,
01981                         action="store_true",
01982                         help='Verbose mode (prints some extra information)')
01983 
01984     parser.add_argument('--version',
01985                         dest='version',
01986                         default=False,
01987                         action="store_true",
01988                         help='Prints script version and exits')
01989     return parser
01990 
01991 def test_path_to_name (path, base):
01992     """Change all slashes in a path into hyphens
01993     This creates a unique cross-platform test name based on the path
01994     This can eventually be overriden by a to-be-determined meta-data mechanism"""
01995     name_parts = []
01996     head, tail = os.path.split(relpath(path,base))
01997     while (tail and tail != "."):
01998         name_parts.insert(0, tail)
01999         head, tail = os.path.split(head)
02000 
02001     return "-".join(name_parts).lower()
02002 
02003 def find_tests (base_dir, target_name, toolchain_name, app_config=None):
02004     """ Finds all tests in a directory recursively
02005     base_dir: path to the directory to scan for tests (ex. 'path/to/project')
02006     target_name: name of the target to use for scanning (ex. 'K64F')
02007     toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
02008     options: Compile options to pass to the toolchain (ex. ['debug-info'])
02009     app_config - location of a chosen mbed_app.json file
02010     """
02011 
02012     tests = {}
02013 
02014     # Prepare the toolchain
02015     toolchain = prepare_toolchain([base_dir], target_name, toolchain_name,
02016                                   silent=True, app_config=app_config)
02017 
02018     # Scan the directory for paths to probe for 'TESTS' folders
02019     base_resources = scan_resources([base_dir], toolchain)
02020 
02021     dirs = base_resources.inc_dirs
02022     for directory in dirs:
02023         subdirs = os.listdir(directory)
02024 
02025         # If the directory contains a subdirectory called 'TESTS', scan it for test cases
02026         if 'TESTS' in subdirs:
02027             walk_base_dir = join(directory, 'TESTS')
02028             test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
02029 
02030             # Loop through all subdirectories
02031             for d in test_resources.inc_dirs:
02032 
02033                 # If the test case folder is not called 'host_tests' and it is
02034                 # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
02035                 # then add it to the tests
02036                 path_depth = get_path_depth(relpath(d, walk_base_dir))
02037                 if path_depth == 2:
02038                     test_group_directory_path, test_case_directory = os.path.split(d)
02039                     test_group_directory = os.path.basename(test_group_directory_path)
02040                     
02041                     # Check to make sure discoverd folder is not in a host test directory
02042                     if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
02043                         test_name = test_path_to_name(d, base_dir)
02044                         tests[test_name] = d
02045 
02046     return tests
02047 
02048 def print_tests (tests, format="list", sort=True):
02049     """Given a dictionary of tests (as returned from "find_tests"), print them
02050     in the specified format"""
02051     if format == "list":
02052         for test_name in sorted(tests.keys()):
02053             test_path = tests[test_name]
02054             print "Test Case:"
02055             print "    Name: %s" % test_name
02056             print "    Path: %s" % test_path
02057     elif format == "json":
02058         print json.dumps(tests, indent=2)
02059     else:
02060         print "Unknown format '%s'" % format
02061         sys.exit(1)
02062 
02063 def norm_relative_path (path, start):
02064     """This function will create a normalized, relative path. It mimics the
02065     python os.path.relpath function, but also normalizes a Windows-syle path
02066     that use backslashes to a Unix style path that uses forward slashes."""
02067     path = os.path.normpath(path)
02068     path = os.path.relpath(path, start)
02069     path = path.replace("\\", "/")
02070     return path
02071 
02072 
02073 def build_test_worker (*args, **kwargs):
02074     """This is a worker function for the parallel building of tests. The `args`
02075     and `kwargs` are passed directly to `build_project`. It returns a dictionary
02076     with the following structure:
02077 
02078     {
02079         'result': `True` if no exceptions were thrown, `False` otherwise
02080         'reason': Instance of exception that was thrown on failure
02081         'bin_file': Path to the created binary if `build_project` was
02082                     successful. Not present otherwise
02083         'kwargs': The keyword arguments that were passed to `build_project`.
02084                   This includes arguments that were modified (ex. report)
02085     }
02086     """
02087     bin_file = None
02088     ret = {
02089         'result': False,
02090         'args': args,
02091         'kwargs': kwargs
02092     }
02093 
02094     try:
02095         bin_file = build_project(*args, **kwargs)
02096         ret['result'] = True
02097         ret['bin_file'] = bin_file
02098         ret['kwargs'] = kwargs
02099 
02100     except NotSupportedException, e:
02101         ret['reason'] = e
02102     except ToolException, e:
02103         ret['reason'] = e
02104     except KeyboardInterrupt, e:
02105         ret['reason'] = e
02106     except:
02107         # Print unhandled exceptions here
02108         import traceback
02109         traceback.print_exc(file=sys.stdout)
02110 
02111     return ret
02112 
02113 
02114 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02115                 clean=False, notify=None, verbose=False, jobs=1, macros=None,
02116                 silent=False, report=None, properties=None,
02117                 continue_on_build_fail=False, app_config=None,
02118                 build_profile=None):
02119     """Given the data structure from 'find_tests' and the typical build parameters,
02120     build all the tests
02121 
02122     Returns a tuple of the build result (True or False) followed by the test
02123     build data structure"""
02124 
02125     execution_directory = "."
02126     base_path = norm_relative_path(build_path, execution_directory)
02127 
02128     target_name = target if isinstance(target, str) else target.name
02129     
02130     test_build = {
02131         "platform": target_name,
02132         "toolchain": toolchain_name,
02133         "base_path": base_path,
02134         "baud_rate": 9600,
02135         "binary_type": "bootable",
02136         "tests": {}
02137     }
02138 
02139     result = True
02140 
02141     jobs_count = int(jobs if jobs else cpu_count())
02142     p = Pool(processes=jobs_count)
02143     results = []
02144     for test_name, test_path in tests.iteritems():
02145         test_build_path = os.path.join(build_path, test_path)
02146         src_path = base_source_paths + [test_path]
02147         bin_file = None
02148         test_case_folder_name = os.path.basename(test_path)
02149         
02150         args = (src_path, test_build_path, target, toolchain_name)
02151         kwargs = {
02152             'jobs': jobs,
02153             'clean': clean,
02154             'macros': macros,
02155             'name': test_case_folder_name,
02156             'project_id': test_name,
02157             'report': report,
02158             'properties': properties,
02159             'verbose': verbose,
02160             'app_config': app_config,
02161             'build_profile': build_profile,
02162             'silent': True
02163         }
02164         
02165         results.append(p.apply_async(build_test_worker, args, kwargs))
02166 
02167     p.close()
02168     result = True
02169     itr = 0
02170     while len(results):
02171         itr += 1
02172         if itr > 360000:
02173             p.terminate()
02174             p.join()
02175             raise ToolException("Compile did not finish in 10 minutes")
02176         else:
02177             sleep(0.01)
02178             pending = 0
02179             for r in results:
02180                 if r.ready() is True:
02181                     try:
02182                         worker_result = r.get()
02183                         results.remove(r)
02184 
02185                         # Take report from the kwargs and merge it into existing report
02186                         report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
02187                         for test_key in report_entry.keys():
02188                             report[target_name][toolchain_name][test_key] = report_entry[test_key]
02189                         
02190                         # Set the overall result to a failure if a build failure occurred
02191                         if not worker_result['result'] and not isinstance(worker_result['reason'], NotSupportedException):
02192                             result = False
02193                             break
02194 
02195                         # Adding binary path to test build result
02196                         if worker_result['result'] and 'bin_file' in worker_result:
02197                             bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
02198 
02199                             test_build['tests'][worker_result['kwargs']['project_id']] = {
02200                                 "binaries": [
02201                                     {
02202                                         "path": bin_file
02203                                     }
02204                                 ]
02205                             }
02206 
02207                             test_key = worker_result['kwargs']['project_id'].upper()
02208                             print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip()
02209                             print 'Image: %s\n' % bin_file
02210 
02211                     except:
02212                         if p._taskqueue.queue:
02213                             p._taskqueue.queue.clear()
02214                             sleep(0.5)
02215                         p.terminate()
02216                         p.join()
02217                         raise
02218                 else:
02219                     pending += 1
02220                     if pending >= jobs_count:
02221                         break
02222 
02223             # Break as soon as possible if there is a failure and we are not
02224             # continuing on build failures
02225             if not result and not continue_on_build_fail:
02226                 if p._taskqueue.queue:
02227                     p._taskqueue.queue.clear()
02228                     sleep(0.5)
02229                 p.terminate()
02230                 break
02231 
02232     p.join()
02233 
02234     test_builds = {}
02235     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02236 
02237     return result, test_builds
02238 
02239 
02240 def test_spec_from_test_builds(test_builds):
02241     return {
02242         "builds": test_builds
02243     }