joey shelton / LED_Demo

Dependencies:   MAX44000 PWM_Tone_Library nexpaq_mdk

Fork of LED_Demo by Maxim nexpaq

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 
00020 import os
00021 import re
00022 import sys
00023 import json
00024 import uuid
00025 import pprint
00026 import random
00027 import argparse
00028 import datetime
00029 import threading
00030 import ctypes
00031 from types import ListType
00032 from colorama import Fore, Back, Style
00033 from prettytable import PrettyTable
00034 from copy import copy
00035 
00036 from time import sleep, time
00037 from Queue import Queue, Empty
00038 from os.path import join, exists, basename, relpath
00039 from threading import Thread, Lock
00040 from subprocess import Popen, PIPE
00041 
00042 # Imports related to mbed build api
00043 from tools.tests import TESTS
00044 from tools.tests import TEST_MAP
00045 from tools.paths import BUILD_DIR
00046 from tools.paths import HOST_TESTS
00047 from tools.utils import ToolException
00048 from tools.utils import NotSupportedException
00049 from tools.utils import construct_enum
00050 from tools.memap import MemapParser
00051 from tools.targets import TARGET_MAP
00052 from tools.test_db import BaseDBAccess
00053 from tools.build_api import build_project, build_mbed_libs, build_lib
00054 from tools.build_api import get_target_supported_toolchains
00055 from tools.build_api import write_build_report
00056 from tools.build_api import prep_report
00057 from tools.build_api import prep_properties
00058 from tools.build_api import create_result
00059 from tools.build_api import add_result_to_report
00060 from tools.build_api import prepare_toolchain
00061 from tools.build_api import scan_resources
00062 from tools.libraries import LIBRARIES, LIBRARY_MAP
00063 from tools.toolchains import TOOLCHAIN_PATHS
00064 from tools.toolchains import TOOLCHAINS
00065 from tools.test_exporters import ReportExporter, ResultExporterType
00066 from tools.utils import argparse_filestring_type
00067 from tools.utils import argparse_uppercase_type
00068 from tools.utils import argparse_lowercase_type
00069 from tools.utils import argparse_many
00070 from tools.utils import get_path_depth
00071 
00072 import tools.host_tests.host_tests_plugins as host_tests_plugins
00073 
00074 try:
00075     import mbed_lstools
00076     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00077 except:
00078     pass
00079 
00080 
00081 class ProcessObserver(Thread):
00082     def __init__(self, proc):
00083         Thread.__init__(self)
00084         self.proc = proc
00085         self.queue = Queue()
00086         self.daemon = True
00087         self.active = True
00088         self.start()
00089 
00090     def run(self):
00091         while self.active:
00092             c = self.proc.stdout.read(1)
00093             self.queue.put(c)
00094 
00095     def stop(self):
00096         self.active = False
00097         try:
00098             self.proc.terminate()
00099         except Exception, _:
00100             pass
00101 
00102 
00103 class SingleTestExecutor (threading.Thread):
00104     """ Example: Single test class in separate thread usage
00105     """
00106     def __init__(self, single_test):
00107         self.single_test  = single_test
00108         threading.Thread.__init__(self)
00109 
00110     def run(self):
00111         start = time()
00112         # Execute tests depending on options and filter applied
00113         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00114         elapsed_time = time() - start
00115 
00116         # Human readable summary
00117         if not self.single_test .opts_suppress_summary:
00118             # prints well-formed summary with results (SQL table like)
00119             print self.single_test .generate_test_summary(test_summary, shuffle_seed)
00120         if self.single_test .opts_test_x_toolchain_summary:
00121             # prints well-formed summary with results (SQL table like)
00122             # table shows text x toolchain test result matrix
00123             print self.single_test .generate_test_summary_by_target(test_summary, shuffle_seed)
00124         print "Completed in %.2f sec"% (elapsed_time)
00125 
00126 
00127 class SingleTestRunner (object):
00128     """ Object wrapper for single test run which may involve multiple MUTs
00129     """
00130     RE_DETECT_TESTCASE_RESULT = None
00131 
00132     # Return codes for test script
00133     TEST_RESULT_OK = "OK"
00134     TEST_RESULT_FAIL = "FAIL"
00135     TEST_RESULT_ERROR = "ERROR"
00136     TEST_RESULT_UNDEF = "UNDEF"
00137     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00138     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00139     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00140     TEST_RESULT_TIMEOUT = "TIMEOUT"
00141     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00142     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00143     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00144     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00145 
00146     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00147     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00148     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00149 
00150     muts = {} # MUTs descriptor (from external file)
00151     test_spec = {} # Test specification (from external file)
00152 
00153     # mbed test suite -> SingleTestRunner
00154     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00155                            "failure" : TEST_RESULT_FAIL,
00156                            "error" : TEST_RESULT_ERROR,
00157                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00158                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00159                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00160                            "timeout" : TEST_RESULT_TIMEOUT,
00161                            "no_image" : TEST_RESULT_NO_IMAGE,
00162                            "end" : TEST_RESULT_UNDEF,
00163                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00164                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00165                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00166     }
00167 
00168     def __init__ (self,
00169                  _global_loops_count=1,
00170                  _test_loops_list=None,
00171                  _muts={},
00172                  _clean=False,
00173                  _opts_db_url=None,
00174                  _opts_log_file_name=None,
00175                  _opts_report_html_file_name=None,
00176                  _opts_report_junit_file_name=None,
00177                  _opts_report_build_file_name=None,
00178                  _opts_report_text_file_name=None,
00179                  _opts_build_report={},
00180                  _opts_build_properties={},
00181                  _test_spec={},
00182                  _opts_goanna_for_mbed_sdk=None,
00183                  _opts_goanna_for_tests=None,
00184                  _opts_shuffle_test_order=False,
00185                  _opts_shuffle_test_seed=None,
00186                  _opts_test_by_names=None,
00187                  _opts_peripheral_by_names=None,
00188                  _opts_test_only_peripheral=False,
00189                  _opts_test_only_common=False,
00190                  _opts_verbose_skipped_tests=False,
00191                  _opts_verbose_test_result_only=False,
00192                  _opts_verbose=False,
00193                  _opts_firmware_global_name=None,
00194                  _opts_only_build_tests=False,
00195                  _opts_parallel_test_exec=False,
00196                  _opts_suppress_summary=False,
00197                  _opts_test_x_toolchain_summary=False,
00198                  _opts_copy_method=None,
00199                  _opts_mut_reset_type=None,
00200                  _opts_jobs=None,
00201                  _opts_waterfall_test=None,
00202                  _opts_consolidate_waterfall_test=None,
00203                  _opts_extend_test_timeout=None,
00204                  _opts_auto_detect=None,
00205                  _opts_include_non_automated=False):
00206         """ Let's try hard to init this object
00207         """
00208         from colorama import init
00209         init()
00210 
00211         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00212         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00213         # Settings related to test loops counters
00214         try:
00215             _global_loops_count = int(_global_loops_count)
00216         except:
00217             _global_loops_count = 1
00218         if _global_loops_count < 1:
00219             _global_loops_count = 1
00220         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00221         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00222         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00223 
00224         self.shuffle_random_seed  = 0.0
00225         self.SHUFFLE_SEED_ROUND  = 10
00226 
00227         # MUT list and test specification storage
00228         self.muts  = _muts
00229         self.test_spec  = _test_spec
00230 
00231         # Settings passed e.g. from command line
00232         self.opts_db_url  = _opts_db_url
00233         self.opts_log_file_name  = _opts_log_file_name
00234         self.opts_report_html_file_name  = _opts_report_html_file_name
00235         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00236         self.opts_report_build_file_name  = _opts_report_build_file_name
00237         self.opts_report_text_file_name  = _opts_report_text_file_name
00238         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00239         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00240         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00241         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00242         self.opts_test_by_names  = _opts_test_by_names
00243         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00244         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00245         self.opts_test_only_common  = _opts_test_only_common
00246         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00247         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00248         self.opts_verbose  = _opts_verbose
00249         self.opts_firmware_global_name  = _opts_firmware_global_name
00250         self.opts_only_build_tests  = _opts_only_build_tests
00251         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00252         self.opts_suppress_summary  = _opts_suppress_summary
00253         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00254         self.opts_copy_method  = _opts_copy_method
00255         self.opts_mut_reset_type  = _opts_mut_reset_type
00256         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00257         self.opts_waterfall_test  = _opts_waterfall_test
00258         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00259         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00260         self.opts_clean  = _clean
00261         self.opts_auto_detect  = _opts_auto_detect
00262         self.opts_include_non_automated  = _opts_include_non_automated
00263 
00264         self.build_report  = _opts_build_report
00265         self.build_properties  = _opts_build_properties
00266 
00267         # File / screen logger initialization
00268         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00269 
00270         # Database related initializations
00271         self.db_logger  = factory_db_logger(self.opts_db_url )
00272         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00273         # Let's connect to database to set up credentials and confirm database is ready
00274         if self.db_logger :
00275             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00276             if self.db_logger .is_connected():
00277                 # Get hostname and uname so we can use it as build description
00278                 # when creating new build_id in external database
00279                 (_hostname, _uname) = self.db_logger .get_hostname()
00280                 _host_location = os.path.dirname(os.path.abspath(__file__))
00281                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00282                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00283                 self.db_logger .disconnect()
00284 
00285     def dump_options (self):
00286         """ Function returns data structure with common settings passed to SingelTestRunner
00287             It can be used for example to fill _extra fields in database storing test suite single run data
00288             Example:
00289             data = self.dump_options()
00290             or
00291             data_str = json.dumps(self.dump_options())
00292         """
00293         result = {"db_url" : str(self.opts_db_url ),
00294                   "log_file_name" :  str(self.opts_log_file_name ),
00295                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00296                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00297                   "test_by_names" :  str(self.opts_test_by_names ),
00298                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00299                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00300                   "test_only_common" :  str(self.opts_test_only_common ),
00301                   "verbose" :  str(self.opts_verbose ),
00302                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00303                   "only_build_tests" :  str(self.opts_only_build_tests ),
00304                   "copy_method" :  str(self.opts_copy_method ),
00305                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00306                   "jobs" :  str(self.opts_jobs ),
00307                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00308                   "_dummy" : ''
00309         }
00310         return result
00311 
00312     def shuffle_random_func(self):
00313         return self.shuffle_random_seed 
00314 
00315     def is_shuffle_seed_float (self):
00316         """ return true if function parameter can be converted to float
00317         """
00318         result = True
00319         try:
00320             float(self.shuffle_random_seed )
00321         except ValueError:
00322             result = False
00323         return result
00324 
00325     # This will store target / toolchain specific properties
00326     test_suite_properties_ext = {}  # target : toolchain
00327     # Here we store test results
00328     test_summary = []
00329     # Here we store test results in extended data structure
00330     test_summary_ext = {}
00331     execute_thread_slice_lock = Lock()
00332 
00333     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00334         for toolchain in toolchains:
00335             tt_id = "%s::%s" % (toolchain, target)
00336 
00337             T = TARGET_MAP[target]
00338 
00339             # print target, toolchain
00340             # Test suite properties returned to external tools like CI
00341             test_suite_properties = {
00342                 'jobs': self.opts_jobs ,
00343                 'clean': clean,
00344                 'target': target,
00345                 'vendor': T.extra_labels[0],
00346                 'test_ids': ', '.join(test_ids),
00347                 'toolchain': toolchain,
00348                 'shuffle_random_seed': self.shuffle_random_seed 
00349             }
00350 
00351 
00352             # print '=== %s::%s ===' % (target, toolchain)
00353             # Let's build our test
00354             if target not in TARGET_MAP:
00355                 print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
00356                 continue
00357 
00358             build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk  else None
00359             clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk  or clean or self.opts_clean  else None
00360 
00361 
00362             try:
00363                 build_mbed_libs_result = build_mbed_libs(T,
00364                                                          toolchain,
00365                                                          options=build_mbed_libs_options,
00366                                                          clean=clean_mbed_libs_options,
00367                                                          verbose=self.opts_verbose ,
00368                                                          jobs=self.opts_jobs ,
00369                                                          report=build_report,
00370                                                          properties=build_properties)
00371 
00372                 if not build_mbed_libs_result:
00373                     print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
00374                     continue
00375 
00376             except ToolException:
00377                 print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
00378                 continue
00379 
00380             build_dir = join(BUILD_DIR, "test", target, toolchain)
00381 
00382             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00383             test_suite_properties['build_dir'] = build_dir
00384             test_suite_properties['skipped'] = []
00385 
00386             # Enumerate through all tests and shuffle test order if requested
00387             test_map_keys = sorted(TEST_MAP.keys())
00388 
00389             if self.opts_shuffle_test_order :
00390                 random.shuffle(test_map_keys, self.shuffle_random_func )
00391                 # Update database with shuffle seed f applicable
00392                 if self.db_logger :
00393                     self.db_logger .reconnect();
00394                     if self.db_logger .is_connected():
00395                         self.db_logger .update_build_id_info(self.db_logger_build_id , _shuffle_seed=self.shuffle_random_func ())
00396                         self.db_logger .disconnect();
00397 
00398             if self.db_logger :
00399                 self.db_logger .reconnect();
00400                 if self.db_logger .is_connected():
00401                     # Update MUTs and Test Specification in database
00402                     self.db_logger .update_build_id_info(self.db_logger_build_id , _muts=self.muts , _test_spec=self.test_spec )
00403                     # Update Extra information in database (some options passed to test suite)
00404                     self.db_logger .update_build_id_info(self.db_logger_build_id , _extra=json.dumps(self.dump_options ()))
00405                     self.db_logger .disconnect();
00406 
00407             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00408             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00409 
00410             for skipped_test_id in skipped_test_map_keys:
00411                 test_suite_properties['skipped'].append(skipped_test_id)
00412 
00413 
00414             # First pass through all tests and determine which libraries need to be built
00415             libraries = []
00416             for test_id in valid_test_map_keys:
00417                 test = TEST_MAP[test_id]
00418 
00419                 # Detect which lib should be added to test
00420                 # Some libs have to compiled like RTOS or ETH
00421                 for lib in LIBRARIES:
00422                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00423                         libraries.append(lib['id'])
00424 
00425 
00426             build_project_options = ["analyze"] if self.opts_goanna_for_tests  else None
00427             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00428 
00429             # Build all required libraries
00430             for lib_id in libraries:
00431                 try:
00432                     build_lib(lib_id,
00433                               T,
00434                               toolchain,
00435                               options=build_project_options,
00436                               verbose=self.opts_verbose ,
00437                               clean=clean_mbed_libs_options,
00438                               jobs=self.opts_jobs ,
00439                               report=build_report,
00440                               properties=build_properties)
00441 
00442                 except ToolException:
00443                     print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building library %s'% (lib_id))
00444                     continue
00445 
00446 
00447             for test_id in valid_test_map_keys:
00448                 test = TEST_MAP[test_id]
00449 
00450                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00451 
00452                 # TODO: move this 2 below loops to separate function
00453                 INC_DIRS = []
00454                 for lib_id in libraries:
00455                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00456                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00457 
00458                 MACROS = []
00459                 for lib_id in libraries:
00460                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00461                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00462                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00463                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00464                 test_uuid = uuid.uuid4()
00465                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00466 
00467                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00468                 if target not in self.test_summary_ext :
00469                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00470                 if toolchain not in self.test_summary_ext [target]:
00471                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00472 
00473                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00474 
00475                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00476                 try:
00477                     path = build_project(test.source_dir,
00478                                      join(build_dir, test_id),
00479                                      T,
00480                                      toolchain,
00481                                      test.dependencies,
00482                                      options=build_project_options,
00483                                      clean=clean_project_options,
00484                                      verbose=self.opts_verbose ,
00485                                      name=project_name,
00486                                      macros=MACROS,
00487                                      inc_dirs=INC_DIRS,
00488                                      jobs=self.opts_jobs ,
00489                                      report=build_report,
00490                                      properties=build_properties,
00491                                      project_id=test_id,
00492                                      project_description=test.get_description())
00493 
00494                 except Exception, e:
00495                     project_name_str = project_name if project_name is not None else test_id
00496 
00497 
00498                     test_result = self.TEST_RESULT_FAIL 
00499 
00500                     if isinstance(e, ToolException):
00501                         print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
00502                         test_result = self.TEST_RESULT_BUILD_FAILED 
00503                     elif isinstance(e, NotSupportedException):
00504                         print self.logger .log_line(self.logger .LogType.INFO, 'The project %s is not supported'% (project_name_str))
00505                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00506 
00507 
00508                     # Append test results to global test summary
00509                     self.test_summary .append(
00510                         (test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
00511                     )
00512 
00513                     # Add detailed test result to test summary structure
00514                     if test_id not in self.test_summary_ext [target][toolchain]:
00515                         self.test_summary_ext [target][toolchain][test_id] = []
00516 
00517                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00518                         'result' : test_result,
00519                         'output' : '',
00520                         'target_name' : target,
00521                         'target_name_unique': target,
00522                         'toolchain_name' : toolchain,
00523                         'id' : test_id,
00524                         'description' : test.get_description(),
00525                         'elapsed_time' : 0,
00526                         'duration' : 0,
00527                         'copy_method' : None
00528                     }})
00529                     continue
00530 
00531                 if self.opts_only_build_tests :
00532                     # With this option we are skipping testing phase
00533                     continue
00534 
00535                 # Test duration can be increased by global value
00536                 test_duration = test.duration
00537                 if self.opts_extend_test_timeout  is not None:
00538                     test_duration += self.opts_extend_test_timeout 
00539 
00540                 # For an automated test the duration act as a timeout after
00541                 # which the test gets interrupted
00542                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00543                 test_loops = self.get_test_loop_count (test_id)
00544 
00545                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00546                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00547                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00548 
00549                 # read MUTs, test specification and perform tests
00550                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00551 
00552                 if handle_results is None:
00553                     continue
00554 
00555                 for handle_result in handle_results:
00556                     if handle_result:
00557                         single_test_result, detailed_test_results = handle_result
00558                     else:
00559                         continue
00560 
00561                     # Append test results to global test summary
00562                     if single_test_result is not None:
00563                         self.test_summary .append(single_test_result)
00564 
00565                     # Add detailed test result to test summary structure
00566                     if target not in self.test_summary_ext [target][toolchain]:
00567                         if test_id not in self.test_summary_ext [target][toolchain]:
00568                             self.test_summary_ext [target][toolchain][test_id] = []
00569 
00570                         append_test_result = detailed_test_results
00571 
00572                         # If waterfall and consolidate-waterfall options are enabled,
00573                         # only include the last test result in the report.
00574                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00575                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00576 
00577                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00578 
00579             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00580             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00581 
00582         q.put(target + '_'.join(toolchains))
00583         return
00584 
00585     def execute(self):
00586         clean = self.test_spec .get('clean', False)
00587         test_ids = self.test_spec .get('test_ids', [])
00588         q = Queue()
00589 
00590         # Generate seed for shuffle if seed is not provided in
00591         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00592         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00593             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00594 
00595 
00596         if self.opts_parallel_test_exec :
00597             ###################################################################
00598             # Experimental, parallel test execution per singletest instance.
00599             ###################################################################
00600             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00601             # Note: We are building here in parallel for each target separately!
00602             # So we are not building the same thing multiple times and compilers
00603             # in separate threads do not collide.
00604             # Inside execute_thread_slice() function function handle() will be called to
00605             # get information about available MUTs (per target).
00606             for target, toolchains in self.test_spec ['targets'].iteritems():
00607                 self.test_suite_properties_ext [target] = {}
00608                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00609                 t.daemon = True
00610                 t.start()
00611                 execute_threads.append(t)
00612 
00613             for t in execute_threads:
00614                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00615         else:
00616             # Serialized (not parallel) test execution
00617             for target, toolchains in self.test_spec ['targets'].iteritems():
00618                 if target not in self.test_suite_properties_ext :
00619                     self.test_suite_properties_ext [target] = {}
00620 
00621                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00622                 q.get()
00623 
00624         if self.db_logger :
00625             self.db_logger .reconnect();
00626             if self.db_logger .is_connected():
00627                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00628                 self.db_logger .disconnect();
00629 
00630         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00631 
00632     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00633         valid_test_map_keys = []
00634 
00635         for test_id in test_map_keys:
00636             test = TEST_MAP[test_id]
00637             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00638                 continue
00639 
00640             if test_ids and test_id not in test_ids:
00641                 continue
00642 
00643             if self.opts_test_only_peripheral  and not test.peripherals:
00644                 if self.opts_verbose_skipped_tests :
00645                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00646                 continue
00647 
00648             if self.opts_peripheral_by_names  and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names ]):
00649                 # We will skip tests not forced with -p option
00650                 if self.opts_verbose_skipped_tests :
00651                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00652                 continue
00653 
00654             if self.opts_test_only_common  and test.peripherals:
00655                 if self.opts_verbose_skipped_tests :
00656                     print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral test skipped for target %s'% (target))
00657                 continue
00658 
00659             if not include_non_automated and not test.automated:
00660                 if self.opts_verbose_skipped_tests :
00661                     print self.logger .log_line(self.logger .LogType.INFO, 'Non automated test skipped for target %s'% (target))
00662                 continue
00663 
00664             if test.is_supported(target, toolchain):
00665                 if test.peripherals is None and self.opts_only_build_tests :
00666                     # When users are using 'build only flag' and test do not have
00667                     # specified peripherals we can allow test building by default
00668                     pass
00669                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00670                     # If we force peripheral with option -p we expect test
00671                     # to pass even if peripheral is not in MUTs file.
00672                     pass
00673                 elif not self.is_peripherals_available (target, test.peripherals):
00674                     if self.opts_verbose_skipped_tests :
00675                         if test.peripherals:
00676                             print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
00677                         else:
00678                             print self.logger .log_line(self.logger .LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
00679                     continue
00680 
00681                 # The test has made it through all the filters, so add it to the valid tests list
00682                 valid_test_map_keys.append(test_id)
00683 
00684         return valid_test_map_keys
00685 
00686     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00687         # NOTE: This will not preserve order
00688         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00689 
00690     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00691         """ Prints well-formed summary with results (SQL table like)
00692             table shows text x toolchain test result matrix
00693         """
00694         RESULT_INDEX = 0
00695         TARGET_INDEX = 1
00696         TOOLCHAIN_INDEX = 2
00697         TEST_INDEX = 3
00698         DESC_INDEX = 4
00699 
00700         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00701         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00702         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00703         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00704 
00705         result = "Test summary:\n"
00706         for target in unique_targets:
00707             result_dict = {} # test : { toolchain : result }
00708             unique_target_toolchains = []
00709             for test in test_summary:
00710                 if test[TARGET_INDEX] == target:
00711                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00712                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00713                     if test[TEST_INDEX] not in result_dict:
00714                         result_dict[test[TEST_INDEX]] = {}
00715                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00716 
00717             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00718             pt = PrettyTable(pt_cols)
00719             for col in pt_cols:
00720                 pt.align[col] = "l"
00721             pt.padding_width = 1 # One space between column edges and contents (default)
00722 
00723             for test in unique_tests:
00724                 if test in result_dict:
00725                     test_results = result_dict[test]
00726                     if test in unique_test_desc:
00727                         row = [target, test, unique_test_desc[test]]
00728                         for toolchain in unique_toolchains:
00729                             if toolchain in test_results:
00730                                 row.append(test_results[toolchain])
00731                         pt.add_row(row)
00732             result += pt.get_string()
00733             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00734                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00735             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00736         return result
00737 
00738     def generate_test_summary (self, test_summary, shuffle_seed=None):
00739         """ Prints well-formed summary with results (SQL table like)
00740             table shows target x test results matrix across
00741         """
00742         success_code = 0    # Success code that can be leter returned to
00743         result = "Test summary:\n"
00744         # Pretty table package is used to print results
00745         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00746                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00747         pt.align["Result"] = "l" # Left align
00748         pt.align["Target"] = "l" # Left align
00749         pt.align["Toolchain"] = "l" # Left align
00750         pt.align["Test ID"] = "l" # Left align
00751         pt.align["Test Description"] = "l" # Left align
00752         pt.padding_width = 1 # One space between column edges and contents (default)
00753 
00754         result_dict = {self.TEST_RESULT_OK  : 0,
00755                        self.TEST_RESULT_FAIL  : 0,
00756                        self.TEST_RESULT_ERROR  : 0,
00757                        self.TEST_RESULT_UNDEF  : 0,
00758                        self.TEST_RESULT_IOERR_COPY  : 0,
00759                        self.TEST_RESULT_IOERR_DISK  : 0,
00760                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00761                        self.TEST_RESULT_NO_IMAGE  : 0,
00762                        self.TEST_RESULT_TIMEOUT  : 0,
00763                        self.TEST_RESULT_MBED_ASSERT  : 0,
00764                        self.TEST_RESULT_BUILD_FAILED  : 0,
00765                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00766         }
00767 
00768         for test in test_summary:
00769             if test[0] in result_dict:
00770                 result_dict[test[0]] += 1
00771             pt.add_row(test)
00772         result += pt.get_string()
00773         result += "\n"
00774 
00775         # Print result count
00776         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
00777         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00778                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00779         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00780         return result
00781 
00782     def test_loop_list_to_dict (self, test_loops_str):
00783         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00784         """
00785         result = {}
00786         if test_loops_str:
00787             test_loops = test_loops_str
00788             for test_loop in test_loops:
00789                 test_loop_count = test_loop.split('=')
00790                 if len(test_loop_count) == 2:
00791                     _test_id, _test_loops = test_loop_count
00792                     try:
00793                         _test_loops = int(_test_loops)
00794                     except:
00795                         continue
00796                     result[_test_id] = _test_loops
00797         return result
00798 
00799     def get_test_loop_count (self, test_id):
00800         """ This function returns no. of loops per test (deducted by test_id_.
00801             If test is not in list of redefined loop counts it will use default value.
00802         """
00803         result = self.GLOBAL_LOOPS_COUNT 
00804         if test_id in self.TEST_LOOPS_DICT :
00805             result = self.TEST_LOOPS_DICT [test_id]
00806         return result
00807 
00808     def delete_file (self, file_path):
00809         """ Remove file from the system
00810         """
00811         result = True
00812         resutl_msg = ""
00813         try:
00814             os.remove(file_path)
00815         except Exception, e:
00816             resutl_msg = e
00817             result = False
00818         return result, resutl_msg
00819 
00820     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00821         """ Test is being invoked for given MUT.
00822         """
00823         # Get test information, image and test timeout
00824         test_id = data['test_id']
00825         test = TEST_MAP[test_id]
00826         test_description = TEST_MAP[test_id].get_description()
00827         image = data["image"]
00828         duration = data.get("duration", 10)
00829 
00830         if mut is None:
00831             print "Error: No Mbed available: MUT[%s]" % data['mcu']
00832             return None
00833 
00834         mcu = mut['mcu']
00835         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00836 
00837         if self.db_logger :
00838             self.db_logger .reconnect()
00839 
00840         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00841 
00842         # Tests can be looped so test results must be stored for the same test
00843         test_all_result = []
00844         # Test results for one test ran few times
00845         detailed_test_results = {}  # { Loop_number: { results ... } }
00846 
00847         for test_index in range(test_loops):
00848 
00849             # If mbedls is available and we are auto detecting MUT info,
00850             # update MUT info (mounting may changed)
00851             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00852                 platform_name_filter = [mcu]
00853                 muts_list = {}
00854                 found = False
00855 
00856                 for i in range(0, 60):
00857                     print('Looking for %s with MBEDLS' % mcu)
00858                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00859 
00860                     if 1 not in muts_list:
00861                         sleep(3)
00862                     else:
00863                         found = True
00864                         break
00865 
00866                 if not found:
00867                     print "Error: mbed not found with MBEDLS: %s" % data['mcu']
00868                     return None
00869                 else:
00870                     mut = muts_list[1]
00871 
00872             disk = mut.get('disk')
00873             port = mut.get('port')
00874 
00875             if disk is None or port is None:
00876                 return None
00877 
00878             target_by_mcu = TARGET_MAP[mut['mcu']]
00879             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00880             # Some extra stuff can be declared in MUTs structure
00881             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00882             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00883 
00884             # When the build and test system were separate, this was relative to a
00885             # base network folder base path: join(NETWORK_BASE_PATH, )
00886             image_path = image
00887 
00888             # Host test execution
00889             start_host_exec_time = time()
00890 
00891             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00892             _copy_method = selected_copy_method
00893 
00894             if not exists(image_path):
00895                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00896                 elapsed_time = 0
00897                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00898                 print single_test_output
00899             else:
00900                 # Host test execution
00901                 start_host_exec_time = time()
00902 
00903                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00904                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00905                 host_test_result = self.run_host_test (test.host_test,
00906                                                       image_path, disk, port, duration,
00907                                                       micro=target_name,
00908                                                       verbose=host_test_verbose,
00909                                                       reset=host_test_reset,
00910                                                       reset_tout=reset_tout,
00911                                                       copy_method=selected_copy_method,
00912                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00913                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00914 
00915             # Store test result
00916             test_all_result.append(single_test_result)
00917             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00918             elapsed_time = single_testduration  # TIme of single test case execution after reset
00919 
00920             detailed_test_results[test_index] = {
00921                 'result' : single_test_result,
00922                 'output' : single_test_output,
00923                 'target_name' : target_name,
00924                 'target_name_unique' : target_name_unique,
00925                 'toolchain_name' : toolchain_name,
00926                 'id' : test_id,
00927                 'description' : test_description,
00928                 'elapsed_time' : round(elapsed_time, 2),
00929                 'duration' : single_timeout,
00930                 'copy_method' : _copy_method,
00931             }
00932 
00933             print self.print_test_result (single_test_result, target_name_unique, toolchain_name,
00934                                          test_id, test_description, elapsed_time, single_timeout)
00935 
00936             # Update database entries for ongoing test
00937             if self.db_logger  and self.db_logger .is_connected():
00938                 test_type = 'SingleTest'
00939                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00940                                                  target_name,
00941                                                  toolchain_name,
00942                                                  test_type,
00943                                                  test_id,
00944                                                  single_test_result,
00945                                                  single_test_output,
00946                                                  elapsed_time,
00947                                                  single_timeout,
00948                                                  test_index)
00949 
00950             # If we perform waterfall test we test until we get OK and we stop testing
00951             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
00952                 break
00953 
00954         if self.db_logger :
00955             self.db_logger .disconnect()
00956 
00957         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
00958                 target_name_unique,
00959                 toolchain_name,
00960                 test_id,
00961                 test_description,
00962                 round(elapsed_time, 2),
00963                 single_timeout,
00964                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
00965 
00966     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
00967         """ Function determines MUT's mbed disk/port and copies binary to
00968             target.
00969         """
00970         handle_results = []
00971         data = json.loads(test_spec)
00972 
00973         # Find a suitable MUT:
00974         mut = None
00975         for id, m in self.muts .iteritems():
00976             if m['mcu'] == data['mcu']:
00977                 mut = m
00978                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
00979                 handle_results.append(handle_result)
00980 
00981         return handle_results
00982 
00983     def print_test_result (self, test_result, target_name, toolchain_name,
00984                           test_id, test_description, elapsed_time, duration):
00985         """ Use specific convention to print test result and related data
00986         """
00987         tokens = []
00988         tokens.append("TargetTest")
00989         tokens.append(target_name)
00990         tokens.append(toolchain_name)
00991         tokens.append(test_id)
00992         tokens.append(test_description)
00993         separator = "::"
00994         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
00995         result = separator.join(tokens) + " [" + test_result +"]" + time_info
00996         return Fore.MAGENTA + result + Fore.RESET
00997 
00998     def shape_test_loop_ok_result_count (self, test_all_result):
00999         """ Reformats list of results to simple string
01000         """
01001         test_loop_count = len(test_all_result)
01002         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01003         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01004 
01005     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01006         """ Reformats list of results to simple string
01007         """
01008         result = self.TEST_RESULT_FAIL 
01009 
01010         if all(test_all_result[0] == res for res in test_all_result):
01011             result = test_all_result[0]
01012         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01013             result = self.TEST_RESULT_OK 
01014 
01015         return result
01016 
01017     def run_host_test (self, name, image_path, disk, port, duration,
01018                       micro=None, reset=None, reset_tout=None,
01019                       verbose=False, copy_method=None, program_cycle_s=None):
01020         """ Function creates new process with host test configured with particular test case.
01021             Function also is pooling for serial port activity from process to catch all data
01022             printed by test runner and host test during test execution
01023         """
01024 
01025         def get_char_from_queue(obs):
01026             """ Get character from queue safe way
01027             """
01028             try:
01029                 c = obs.queue.get(block=True, timeout=0.5)
01030             except Empty, _:
01031                 c = None
01032             return c
01033 
01034         def filter_queue_char(c):
01035             """ Filters out non ASCII characters from serial port
01036             """
01037             if ord(c) not in range(128):
01038                 c = ' '
01039             return c
01040 
01041         def get_test_result(output):
01042             """ Parse test 'output' data
01043             """
01044             result = self.TEST_RESULT_TIMEOUT 
01045             for line in "".join(output).splitlines():
01046                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01047                 if search_result and len(search_result.groups()):
01048                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01049                     break
01050             return result
01051 
01052         def get_auto_property_value(property_name, line):
01053             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01054                 Returns string
01055             """
01056             result = None
01057             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01058                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01059                 if property is not None and len(property.groups()) == 1:
01060                     result = property.groups()[0]
01061             return result
01062 
01063         # print "{%s} port:%s disk:%s"  % (name, port, disk),
01064         cmd = ["python",
01065                '%s.py'% name,
01066                '-d', disk,
01067                '-f', '"%s"'% image_path,
01068                '-p', port,
01069                '-t', str(duration),
01070                '-C', str(program_cycle_s)]
01071 
01072         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01073             cmd += ['--auto']
01074 
01075         # Add extra parameters to host_test
01076         if copy_method is not None:
01077             cmd += ["-c", copy_method]
01078         if micro is not None:
01079             cmd += ["-m", micro]
01080         if reset is not None:
01081             cmd += ["-r", reset]
01082         if reset_tout is not None:
01083             cmd += ["-R", str(reset_tout)]
01084 
01085         if verbose:
01086             print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
01087             print "Test::Output::Start"
01088 
01089         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01090         obs = ProcessObserver(proc)
01091         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01092         line = ''
01093         output = []
01094         start_time = time()
01095         while (time() - start_time) < (2 * duration):
01096             c = get_char_from_queue(obs)
01097             if c:
01098                 if verbose:
01099                     sys.stdout.write(c)
01100                 c = filter_queue_char(c)
01101                 output.append(c)
01102                 # Give the mbed under test a way to communicate the end of the test
01103                 if c in ['\n', '\r']:
01104 
01105                     # Checking for auto-detection information from the test about MUT reset moment
01106                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01107                         # We will update this marker only once to prevent multiple time resets
01108                         update_once_flag['reset_target'] = True
01109                         start_time = time()
01110 
01111                     # Checking for auto-detection information from the test about timeout
01112                     auto_timeout_val = get_auto_property_value('timeout', line)
01113                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01114                         # We will update this marker only once to prevent multiple time resets
01115                         update_once_flag['timeout'] = True
01116                         duration = int(auto_timeout_val)
01117 
01118                     # Detect mbed assert:
01119                     if 'mbed assertation failed: ' in line:
01120                         output.append('{{mbed_assert}}')
01121                         break
01122 
01123                     # Check for test end
01124                     if '{end}' in line:
01125                         break
01126                     line = ''
01127                 else:
01128                     line += c
01129         end_time = time()
01130         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01131 
01132         c = get_char_from_queue(obs)
01133 
01134         if c:
01135             if verbose:
01136                 sys.stdout.write(c)
01137             c = filter_queue_char(c)
01138             output.append(c)
01139 
01140         if verbose:
01141             print "Test::Output::Finish"
01142         # Stop test process
01143         obs.stop()
01144 
01145         result = get_test_result(output)
01146         return (result, "".join(output), testcase_duration, duration)
01147 
01148     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01149         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01150         """
01151         if peripherals is not None:
01152             peripherals = set(peripherals)
01153         for id, mut in self.muts .iteritems():
01154             # Target MCU name check
01155             if mut["mcu"] != target_mcu_name:
01156                 continue
01157             # Peripherals check
01158             if peripherals is not None:
01159                 if 'peripherals' not in mut:
01160                     continue
01161                 if not peripherals.issubset(set(mut['peripherals'])):
01162                     continue
01163             return True
01164         return False
01165 
01166     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01167         """ Function prepares JSON structure describing test specification
01168         """
01169         test_spec = {
01170             "mcu": mcu,
01171             "image": image_path,
01172             "duration": duration,
01173             "test_id": test_id,
01174         }
01175         return json.dumps(test_spec)
01176 
01177 
01178 def get_unique_value_from_summary (test_summary, index):
01179     """ Gets list of unique target names
01180     """
01181     result = []
01182     for test in test_summary:
01183         target_name = test[index]
01184         if target_name not in result:
01185             result.append(target_name)
01186     return sorted(result)
01187 
01188 
01189 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01190     """ Gets list of unique target names and return dictionary
01191     """
01192     result = {}
01193     for test in test_summary:
01194         key = test[index_key]
01195         val = test[index_val]
01196         if key not in result:
01197             result[key] = val
01198     return result
01199 
01200 
01201 def show_json_file_format_error (json_spec_filename, line, column):
01202     """ Prints JSON broken content
01203     """
01204     with open(json_spec_filename) as data_file:
01205         line_no = 1
01206         for json_line in data_file:
01207             if line_no + 5 >= line: # Print last few lines before error
01208                 print 'Line %d:\t'%line_no + json_line, # Prints line
01209             if line_no == line:
01210                 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
01211                 break
01212             line_no += 1
01213 
01214 
01215 def json_format_error_defect_pos (json_error_msg):
01216     """ Gets first error line and column in JSON file format.
01217         Parsed from exception thrown by json.loads() string
01218     """
01219     result = None
01220     line, column = 0, 0
01221     # Line value search
01222     line_search = re.search('line [0-9]+', json_error_msg)
01223     if line_search is not None:
01224         ls = line_search.group().split(' ')
01225         if len(ls) == 2:
01226             line = int(ls[1])
01227             # Column position search
01228             column_search = re.search('column [0-9]+', json_error_msg)
01229             if column_search is not None:
01230                 cs = column_search.group().split(' ')
01231                 if len(cs) == 2:
01232                     column = int(cs[1])
01233                     result = [line, column]
01234     return result
01235 
01236 
01237 def get_json_data_from_file (json_spec_filename, verbose=False):
01238     """ Loads from file JSON formatted string to data structure
01239     """
01240     result = None
01241     try:
01242         with open(json_spec_filename) as data_file:
01243             try:
01244                 result = json.load(data_file)
01245             except ValueError as json_error_msg:
01246                 result = None
01247                 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
01248                 # We can print where error occurred inside JSON file if we can parse exception msg
01249                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01250                 if json_format_defect_pos is not None:
01251                     line = json_format_defect_pos[0]
01252                     column = json_format_defect_pos[1]
01253                     print
01254                     show_json_file_format_error(json_spec_filename, line, column)
01255 
01256     except IOError as fileopen_error_msg:
01257         print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
01258         print
01259     if verbose and result:
01260         pp = pprint.PrettyPrinter(indent=4)
01261         pp.pprint(result)
01262     return result
01263 
01264 
01265 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01266     """ Prints MUTs configuration passed to test script for verboseness
01267     """
01268     muts_info_cols = []
01269     # We need to check all unique properties for each defined MUT
01270     for k in json_data:
01271         mut_info = json_data[k]
01272         for mut_property in mut_info:
01273             if mut_property not in muts_info_cols:
01274                 muts_info_cols.append(mut_property)
01275 
01276     # Prepare pretty table object to display all MUTs
01277     pt_cols = ["index"] + muts_info_cols
01278     pt = PrettyTable(pt_cols)
01279     for col in pt_cols:
01280         pt.align[col] = "l"
01281 
01282     # Add rows to pretty print object
01283     for k in json_data:
01284         row = [k]
01285         mut_info = json_data[k]
01286 
01287         add_row = True
01288         if platform_filter and 'mcu' in mut_info:
01289             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01290         if add_row:
01291             for col in muts_info_cols:
01292                 cell_val = mut_info[col] if col in mut_info else None
01293                 if type(cell_val) == ListType:
01294                     cell_val = join_delim.join(cell_val)
01295                 row.append(cell_val)
01296             pt.add_row(row)
01297     return pt.get_string()
01298 
01299 
01300 def print_test_configuration_from_json (json_data, join_delim=", "):
01301     """ Prints test specification configuration passed to test script for verboseness
01302     """
01303     toolchains_info_cols = []
01304     # We need to check all toolchains for each device
01305     for k in json_data:
01306         # k should be 'targets'
01307         targets = json_data[k]
01308         for target in targets:
01309             toolchains = targets[target]
01310             for toolchain in toolchains:
01311                 if toolchain not in toolchains_info_cols:
01312                     toolchains_info_cols.append(toolchain)
01313 
01314     # Prepare pretty table object to display test specification
01315     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01316     pt = PrettyTable(pt_cols)
01317     for col in pt_cols:
01318         pt.align[col] = "l"
01319 
01320     # { target : [conflicted toolchains] }
01321     toolchain_conflicts = {}
01322     toolchain_path_conflicts = []
01323     for k in json_data:
01324         # k should be 'targets'
01325         targets = json_data[k]
01326         for target in targets:
01327             target_supported_toolchains = get_target_supported_toolchains(target)
01328             if not target_supported_toolchains:
01329                 target_supported_toolchains = []
01330             target_name = target if target in TARGET_MAP else "%s*"% target
01331             row = [target_name]
01332             toolchains = targets[target]
01333 
01334             for toolchain in sorted(toolchains_info_cols):
01335                 # Check for conflicts: target vs toolchain
01336                 conflict = False
01337                 conflict_path = False
01338                 if toolchain in toolchains:
01339                     if toolchain not in target_supported_toolchains:
01340                         conflict = True
01341                         if target not in toolchain_conflicts:
01342                             toolchain_conflicts[target] = []
01343                         toolchain_conflicts[target].append(toolchain)
01344                 # Add marker inside table about target usage / conflict
01345                 cell_val = 'Yes' if toolchain in toolchains else '-'
01346                 if conflict:
01347                     cell_val += '*'
01348                 # Check for conflicts: toolchain vs toolchain path
01349                 if toolchain in TOOLCHAIN_PATHS:
01350                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01351                     if not os.path.isdir(toolchain_path):
01352                         conflict_path = True
01353                         if toolchain not in toolchain_path_conflicts:
01354                             toolchain_path_conflicts.append(toolchain)
01355                 if conflict_path:
01356                     cell_val += '#'
01357                 row.append(cell_val)
01358             pt.add_row(row)
01359 
01360     # generate result string
01361     result = pt.get_string()    # Test specification table
01362     if toolchain_conflicts or toolchain_path_conflicts:
01363         result += "\n"
01364         result += "Toolchain conflicts:\n"
01365         for target in toolchain_conflicts:
01366             if target not in TARGET_MAP:
01367                 result += "\t* Target %s unknown\n"% (target)
01368             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01369             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01370             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01371 
01372         for toolchain in toolchain_path_conflicts:
01373         # Let's check toolchain configuration
01374             if toolchain in TOOLCHAIN_PATHS:
01375                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01376                 if not os.path.isdir(toolchain_path):
01377                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01378     return result
01379 
01380 
01381 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01382     """ Generates table summary with all test cases and additional test cases
01383         information using pretty print functionality. Allows test suite user to
01384         see test cases
01385     """
01386     # get all unique test ID prefixes
01387     unique_test_id = []
01388     for test in TESTS:
01389         split = test['id'].split('_')[:-1]
01390         test_id_prefix = '_'.join(split)
01391         if test_id_prefix not in unique_test_id:
01392             unique_test_id.append(test_id_prefix)
01393     unique_test_id.sort()
01394     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01395     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01396 
01397     test_properties = ['id',
01398                        'automated',
01399                        'description',
01400                        'peripherals',
01401                        'host_test',
01402                        'duration'] if cols is None else cols
01403 
01404     # All tests status table print
01405     pt = PrettyTable(test_properties)
01406     for col in test_properties:
01407         pt.align[col] = "l"
01408     pt.align['duration'] = "r"
01409 
01410     counter_all = 0
01411     counter_automated = 0
01412     pt.padding_width = 1 # One space between column edges and contents (default)
01413 
01414     for test_id in sorted(TEST_MAP.keys()):
01415         if platform_filter is not None:
01416             # FIlter out platforms using regex
01417             if re.search(platform_filter, test_id) is None:
01418                 continue
01419         row = []
01420         test = TEST_MAP[test_id]
01421         split = test_id.split('_')[:-1]
01422         test_id_prefix = '_'.join(split)
01423 
01424         for col in test_properties:
01425             col_value = test[col]
01426             if type(test[col]) == ListType:
01427                 col_value = join_delim.join(test[col])
01428             elif test[col] == None:
01429                 col_value = "-"
01430 
01431             row.append(col_value)
01432         if test['automated'] == True:
01433             counter_dict_test_id_types[test_id_prefix] += 1
01434             counter_automated += 1
01435         pt.add_row(row)
01436         # Update counters
01437         counter_all += 1
01438         counter_dict_test_id_types_all[test_id_prefix] += 1
01439     result = pt.get_string()
01440     result += "\n\n"
01441 
01442     if result_summary and not platform_filter:
01443         # Automation result summary
01444         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01445         pt = PrettyTable(test_id_cols)
01446         pt.align['automated'] = "r"
01447         pt.align['all'] = "r"
01448         pt.align['percent [%]'] = "r"
01449 
01450         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01451         str_progress = progress_bar(percent_progress, 75)
01452         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01453         result += "Automation coverage:\n"
01454         result += pt.get_string()
01455         result += "\n\n"
01456 
01457         # Test automation coverage table print
01458         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01459         pt = PrettyTable(test_id_cols)
01460         pt.align['id'] = "l"
01461         pt.align['automated'] = "r"
01462         pt.align['all'] = "r"
01463         pt.align['percent [%]'] = "r"
01464         for unique_id in unique_test_id:
01465             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01466             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01467             str_progress = progress_bar(percent_progress, 75)
01468             row = [unique_id,
01469                    counter_dict_test_id_types[unique_id],
01470                    counter_dict_test_id_types_all[unique_id],
01471                    percent_progress,
01472                    "[" + str_progress + "]"]
01473             pt.add_row(row)
01474         result += "Test automation coverage:\n"
01475         result += pt.get_string()
01476         result += "\n\n"
01477     return result
01478 
01479 
01480 def progress_bar (percent_progress, saturation=0):
01481     """ This function creates progress bar with optional simple saturation mark
01482     """
01483     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01484     str_progress = '#' * step + '.' * int(50 - step)
01485     c = '!' if str_progress[38] == '.' else '|'
01486     if saturation > 0:
01487         saturation = saturation / 2
01488         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01489     return str_progress
01490 
01491 
01492 def singletest_in_cli_mode (single_test):
01493     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01494 
01495         @return returns success code (0 == success) for building and running tests
01496     """
01497     start = time()
01498     # Execute tests depending on options and filter applied
01499     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01500     elapsed_time = time() - start
01501 
01502     # Human readable summary
01503     if not single_test.opts_suppress_summary:
01504         # prints well-formed summary with results (SQL table like)
01505         print single_test.generate_test_summary(test_summary, shuffle_seed)
01506     if single_test.opts_test_x_toolchain_summary:
01507         # prints well-formed summary with results (SQL table like)
01508         # table shows text x toolchain test result matrix
01509         print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
01510 
01511     print "Completed in %.2f sec"% (elapsed_time)
01512     print
01513     # Write summary of the builds
01514 
01515     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01516     status = print_report_exporter.report(build_report)
01517 
01518     # Store extra reports in files
01519     if single_test.opts_report_html_file_name:
01520         # Export results in form of HTML report to separate file
01521         report_exporter = ReportExporter(ResultExporterType.HTML)
01522         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01523     if single_test.opts_report_junit_file_name:
01524         # Export results in form of JUnit XML report to separate file
01525         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01526         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01527     if single_test.opts_report_text_file_name:
01528         # Export results in form of a text file
01529         report_exporter = ReportExporter(ResultExporterType.TEXT)
01530         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01531     if single_test.opts_report_build_file_name:
01532         # Export build results as html report to sparate file
01533         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01534         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01535 
01536     # Returns True if no build failures of the test projects or their dependencies
01537     return status
01538 
01539 class TestLogger ():
01540     """ Super-class for logging and printing ongoing events for test suite pass
01541     """
01542     def __init__ (self, store_log=True):
01543         """ We can control if logger actually stores log in memory
01544             or just handled all log entries immediately
01545         """
01546         self.log  = []
01547         self.log_to_file  = False
01548         self.log_file_name  = None
01549         self.store_log  = store_log
01550 
01551         self.LogType  = construct_enum(INFO='Info',
01552                                       WARN='Warning',
01553                                       NOTIF='Notification',
01554                                       ERROR='Error',
01555                                       EXCEPT='Exception')
01556 
01557         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01558                                             APPEND=2)    # Append to existing log file
01559 
01560     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01561         """ Log one line of text
01562         """
01563         log_timestamp = time()
01564         log_entry = {'log_type' : LogType,
01565                      'log_timestamp' : log_timestamp,
01566                      'log_line' : log_line,
01567                      '_future' : None
01568         }
01569         # Store log in memory
01570         if self.store_log :
01571             self.log .append(log_entry)
01572         return log_entry
01573 
01574 
01575 class CLITestLogger (TestLogger ):
01576     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01577     """
01578     def __init__(self, store_log=True, file_name=None):
01579         TestLogger.__init__(self)
01580         self.log_file_name  = file_name
01581         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01582         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01583 
01584     def log_print (self, log_entry, timestamp=True):
01585         """ Prints on screen formatted log entry
01586         """
01587         ts = log_entry['log_timestamp']
01588         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01589         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01590         return timestamp_str + log_line_str
01591 
01592     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01593         """ Logs line, if log file output was specified log line will be appended
01594             at the end of log file
01595         """
01596         log_entry = TestLogger.log_line(self, LogType, log_line)
01597         log_line_str = self.log_print (log_entry, timestamp)
01598         if self.log_file_name  is not None:
01599             try:
01600                 with open(self.log_file_name , 'a') as f:
01601                     f.write(log_line_str + line_delim)
01602             except IOError:
01603                 pass
01604         return log_line_str
01605 
01606 
01607 def factory_db_logger (db_url):
01608     """ Factory database driver depending on database type supplied in database connection string db_url
01609     """
01610     if db_url is not None:
01611         from tools.test_mysql import MySQLDBAccess
01612         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01613         if connection_info is not None:
01614             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01615             if db_type == 'mysql':
01616                 return MySQLDBAccess()
01617     return None
01618 
01619 
01620 def detect_database_verbose (db_url):
01621     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01622     """
01623     result = BaseDBAccess().parse_db_connection_string(db_url)
01624     if result is not None:
01625         # Parsing passed
01626         (db_type, username, password, host, db_name) = result
01627         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01628         # Let's try to connect
01629         db_ = factory_db_logger(db_url)
01630         if db_ is not None:
01631             print "Connecting to database '%s'..."% db_url,
01632             db_.connect(host, username, password, db_name)
01633             if db_.is_connected():
01634                 print "ok"
01635                 print "Detecting database..."
01636                 print db_.detect_database(verbose=True)
01637                 print "Disconnecting...",
01638                 db_.disconnect()
01639                 print "done"
01640         else:
01641             print "Database type '%s' unknown"% db_type
01642     else:
01643         print "Parse error: '%s' - DB Url error"% (db_url)
01644 
01645 
01646 def get_module_avail (module_name):
01647     """ This function returns True if module_name is already impored module
01648     """
01649     return module_name in sys.modules.keys()
01650 
01651 
01652 def get_autodetected_MUTS_list(platform_name_filter=None):
01653     oldError = None
01654     if os.name == 'nt':
01655         # Disable Windows error box temporarily
01656         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01657 
01658     mbeds = mbed_lstools.create()
01659     detect_muts_list = mbeds.list_mbeds()
01660 
01661     if os.name == 'nt':
01662         ctypes.windll.kernel32.SetErrorMode(oldError)
01663 
01664     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01665 
01666 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01667     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01668         If function fails to auto-detect devices it will return empty dictionary.
01669 
01670         if get_module_avail('mbed_lstools'):
01671             mbeds = mbed_lstools.create()
01672             mbeds_list = mbeds.list_mbeds()
01673 
01674         @param mbeds_list list of mbeds captured from mbed_lstools
01675         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01676     """
01677     result = {}   # Should be in muts_all.json format
01678     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01679     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01680     index = 1
01681     for mut in mbeds_list:
01682         # Filter the MUTS if a filter is specified
01683 
01684         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01685             continue
01686 
01687         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01688         # if not we  are creating our own unique value (last few chars from platform's target_id).
01689         m = {'mcu': mut['platform_name'],
01690              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01691              'port': mut['serial_port'],
01692              'disk': mut['mount_point'],
01693              'peripherals': []     # No peripheral detection
01694              }
01695         if index not in result:
01696             result[index] = {}
01697         result[index] = m
01698         index += 1
01699     return result
01700 
01701 
01702 def get_autodetected_TEST_SPEC (mbeds_list,
01703                                use_default_toolchain=True,
01704                                use_supported_toolchains=False,
01705                                toolchain_filter=None,
01706                                platform_name_filter=None):
01707     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01708         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01709 
01710         use_default_toolchain - if True add default toolchain to test_spec
01711         use_supported_toolchains - if True add all supported toolchains to test_spec
01712         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01713     """
01714     result = {'targets': {} }
01715 
01716     for mut in mbeds_list:
01717         mcu = mut['mcu']
01718         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01719             if mcu in TARGET_MAP:
01720                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01721                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01722 
01723                 # Decide which toolchains should be added to test specification toolchain pool for each target
01724                 toolchains = []
01725                 if use_default_toolchain:
01726                     toolchains.append(default_toolchain)
01727                 if use_supported_toolchains:
01728                     toolchains += supported_toolchains
01729                 if toolchain_filter is not None:
01730                     all_toolchains = supported_toolchains + [default_toolchain]
01731                     for toolchain in toolchain_filter:
01732                         if toolchain in all_toolchains:
01733                             toolchains.append(toolchain)
01734 
01735                 result['targets'][mcu] = list(set(toolchains))
01736     return result
01737 
01738 
01739 def get_default_test_options_parser ():
01740     """ Get common test script options used by CLI, web services etc.
01741     """
01742     parser = argparse.ArgumentParser()
01743     parser.add_argument('-i', '--tests',
01744                         dest='test_spec_filename',
01745                         metavar="FILE",
01746                         type=argparse_filestring_type,
01747                         help='Points to file with test specification')
01748 
01749     parser.add_argument('-M', '--MUTS',
01750                         dest='muts_spec_filename',
01751                         metavar="FILE",
01752                         type=argparse_filestring_type,
01753                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01754 
01755     parser.add_argument("-j", "--jobs",
01756                         dest='jobs',
01757                         metavar="NUMBER",
01758                         type=int,
01759                         help="Define number of compilation jobs. Default value is 1")
01760 
01761     if get_module_avail('mbed_lstools'):
01762         # Additional features available when mbed_lstools is installed on host and imported
01763         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01764         parser.add_argument('--auto',
01765                             dest='auto_detect',
01766                             action="store_true",
01767                             help='Use mbed-ls module to detect all connected mbed devices')
01768 
01769         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01770         parser.add_argument('--tc',
01771                             dest='toolchains_filter',
01772                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01773                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01774 
01775         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01776         parser.add_argument('--oper',
01777                             dest='operability_checks',
01778                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01779                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01780 
01781     parser.add_argument('--clean',
01782                         dest='clean',
01783                         action="store_true",
01784                         help='Clean the build directory')
01785 
01786     parser.add_argument('-P', '--only-peripherals',
01787                         dest='test_only_peripheral',
01788                         default=False,
01789                         action="store_true",
01790                         help='Test only peripheral declared for MUT and skip common tests')
01791 
01792     parser.add_argument('-C', '--only-commons',
01793                         dest='test_only_common',
01794                         default=False,
01795                         action="store_true",
01796                         help='Test only board internals. Skip perpherials tests and perform common tests')
01797 
01798     parser.add_argument('-n', '--test-by-names',
01799                         dest='test_by_names',
01800                         type=argparse_many(str),
01801                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01802 
01803     parser.add_argument('-p', '--peripheral-by-names',
01804                       dest='peripheral_by_names',
01805                       type=argparse_many(str),
01806                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01807 
01808     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01809     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01810 
01811     parser.add_argument('-c', '--copy-method',
01812                         dest='copy_method',
01813                         type=argparse_uppercase_type(copy_methods, "flash method"),
01814                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01815 
01816     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01817     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01818 
01819     parser.add_argument('-r', '--reset-type',
01820                         dest='mut_reset_type',
01821                         default=None,
01822                         type=argparse_uppercase_type(reset_methods, "reset method"),
01823                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01824 
01825     parser.add_argument('-g', '--goanna-for-tests',
01826                         dest='goanna_for_tests',
01827                         action="store_true",
01828                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01829 
01830     parser.add_argument('-G', '--goanna-for-sdk',
01831                         dest='goanna_for_mbed_sdk',
01832                         action="store_true",
01833                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01834 
01835     parser.add_argument('-s', '--suppress-summary',
01836                         dest='suppress_summary',
01837                         default=False,
01838                         action="store_true",
01839                         help='Suppresses display of wellformatted table with test results')
01840 
01841     parser.add_argument('-t', '--test-summary',
01842                         dest='test_x_toolchain_summary',
01843                         default=False,
01844                         action="store_true",
01845                         help='Displays wellformatted table with test x toolchain test result per target')
01846 
01847     parser.add_argument('-A', '--test-automation-report',
01848                         dest='test_automation_report',
01849                         default=False,
01850                         action="store_true",
01851                         help='Prints information about all tests and exits')
01852 
01853     parser.add_argument('-R', '--test-case-report',
01854                         dest='test_case_report',
01855                         default=False,
01856                         action="store_true",
01857                         help='Prints information about all test cases and exits')
01858 
01859     parser.add_argument("-S", "--supported-toolchains",
01860                         action="store_true",
01861                         dest="supported_toolchains",
01862                         default=False,
01863                         help="Displays supported matrix of MCUs and toolchains")
01864 
01865     parser.add_argument("-O", "--only-build",
01866                         action="store_true",
01867                         dest="only_build_tests",
01868                         default=False,
01869                         help="Only build tests, skips actual test procedures (flashing etc.)")
01870 
01871     parser.add_argument('--parallel',
01872                         dest='parallel_test_exec',
01873                         default=False,
01874                         action="store_true",
01875                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01876 
01877     parser.add_argument('--config',
01878                         dest='verbose_test_configuration_only',
01879                         default=False,
01880                         action="store_true",
01881                         help='Displays full test specification and MUTs configration and exits')
01882 
01883     parser.add_argument('--loops',
01884                         dest='test_loops_list',
01885                         type=argparse_many(str),
01886                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01887 
01888     parser.add_argument('--global-loops',
01889                         dest='test_global_loops_value',
01890                         type=int,
01891                         help='Set global number of test loops per test. Default value is set 1')
01892 
01893     parser.add_argument('--consolidate-waterfall',
01894                         dest='consolidate_waterfall_test',
01895                         default=False,
01896                         action="store_true",
01897                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01898 
01899     parser.add_argument('-W', '--waterfall',
01900                         dest='waterfall_test',
01901                         default=False,
01902                         action="store_true",
01903                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01904 
01905     parser.add_argument('-N', '--firmware-name',
01906                         dest='firmware_global_name',
01907                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01908 
01909     parser.add_argument('-u', '--shuffle',
01910                         dest='shuffle_test_order',
01911                         default=False,
01912                         action="store_true",
01913                         help='Shuffles test execution order')
01914 
01915     parser.add_argument('--shuffle-seed',
01916                         dest='shuffle_test_seed',
01917                         default=None,
01918                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01919 
01920     parser.add_argument('-f', '--filter',
01921                         dest='general_filter_regex',
01922                         type=argparse_many(str),
01923                         default=None,
01924                         help='For some commands you can use filter to filter out results')
01925 
01926     parser.add_argument('--inc-timeout',
01927                         dest='extend_test_timeout',
01928                         metavar="NUMBER",
01929                         type=int,
01930                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01931 
01932     parser.add_argument('--db',
01933                         dest='db_url',
01934                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01935 
01936     parser.add_argument('-l', '--log',
01937                         dest='log_file_name',
01938                         help='Log events to external file (note not all console entries may be visible in log file)')
01939 
01940     parser.add_argument('--report-html',
01941                         dest='report_html_file_name',
01942                         help='You can log test suite results in form of HTML report')
01943 
01944     parser.add_argument('--report-junit',
01945                         dest='report_junit_file_name',
01946                         help='You can log test suite results in form of JUnit compliant XML report')
01947 
01948     parser.add_argument("--report-build",
01949                         dest="report_build_file_name",
01950                         help="Output the build results to a junit xml file")
01951 
01952     parser.add_argument("--report-text",
01953                         dest="report_text_file_name",
01954                         help="Output the build results to a text file")
01955 
01956     parser.add_argument('--verbose-skipped',
01957                         dest='verbose_skipped_tests',
01958                         default=False,
01959                         action="store_true",
01960                         help='Prints some extra information about skipped tests')
01961 
01962     parser.add_argument('-V', '--verbose-test-result',
01963                         dest='verbose_test_result_only',
01964                         default=False,
01965                         action="store_true",
01966                         help='Prints test serial output')
01967 
01968     parser.add_argument('-v', '--verbose',
01969                         dest='verbose',
01970                         default=False,
01971                         action="store_true",
01972                         help='Verbose mode (prints some extra information)')
01973 
01974     parser.add_argument('--version',
01975                         dest='version',
01976                         default=False,
01977                         action="store_true",
01978                         help='Prints script version and exits')
01979     return parser
01980 
01981 def test_path_to_name (path, base):
01982     """Change all slashes in a path into hyphens
01983     This creates a unique cross-platform test name based on the path
01984     This can eventually be overriden by a to-be-determined meta-data mechanism"""
01985     name_parts = []
01986     head, tail = os.path.split(relpath(path,base))
01987     while (tail and tail != "."):
01988         name_parts.insert(0, tail)
01989         head, tail = os.path.split(head)
01990 
01991     return "-".join(name_parts).lower()
01992 
01993 def find_tests (base_dir, target_name, toolchain_name, options=None, app_config=None):
01994     """ Finds all tests in a directory recursively
01995     base_dir: path to the directory to scan for tests (ex. 'path/to/project')
01996     target_name: name of the target to use for scanning (ex. 'K64F')
01997     toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
01998     options: Compile options to pass to the toolchain (ex. ['debug-info'])
01999     app_config - location of a chosen mbed_app.json file
02000     """
02001 
02002     tests = {}
02003 
02004     # Prepare the toolchain
02005     toolchain = prepare_toolchain([base_dir], target_name, toolchain_name, options=options,
02006                                   silent=True, app_config=app_config)
02007 
02008     # Scan the directory for paths to probe for 'TESTS' folders
02009     base_resources = scan_resources([base_dir], toolchain)
02010 
02011     dirs = base_resources.inc_dirs
02012     for directory in dirs:
02013         subdirs = os.listdir(directory)
02014 
02015         # If the directory contains a subdirectory called 'TESTS', scan it for test cases
02016         if 'TESTS' in subdirs:
02017             walk_base_dir = join(directory, 'TESTS')
02018             test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
02019 
02020             # Loop through all subdirectories
02021             for d in test_resources.inc_dirs:
02022 
02023                 # If the test case folder is not called 'host_tests' and it is
02024                 # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
02025                 # then add it to the tests
02026                 path_depth = get_path_depth(relpath(d, walk_base_dir))
02027                 if path_depth == 2:
02028                     test_group_directory_path, test_case_directory = os.path.split(d)
02029                     test_group_directory = os.path.basename(test_group_directory_path)
02030                     
02031                     # Check to make sure discoverd folder is not in a host test directory
02032                     if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
02033                         test_name = test_path_to_name(d, base_dir)
02034                         tests[test_name] = d
02035 
02036     return tests
02037 
02038 def print_tests (tests, format="list", sort=True):
02039     """Given a dictionary of tests (as returned from "find_tests"), print them
02040     in the specified format"""
02041     if format == "list":
02042         for test_name in sorted(tests.keys()):
02043             test_path = tests[test_name]
02044             print "Test Case:"
02045             print "    Name: %s" % test_name
02046             print "    Path: %s" % test_path
02047     elif format == "json":
02048         print json.dumps(tests, indent=2)
02049     else:
02050         print "Unknown format '%s'" % format
02051         sys.exit(1)
02052 
02053 def norm_relative_path (path, start):
02054     """This function will create a normalized, relative path. It mimics the
02055     python os.path.relpath function, but also normalizes a Windows-syle path
02056     that use backslashes to a Unix style path that uses forward slashes."""
02057     path = os.path.normpath(path)
02058     path = os.path.relpath(path, start)
02059     path = path.replace("\\", "/")
02060     return path
02061 
02062 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02063         options=None, clean=False, notify=None, verbose=False, jobs=1,
02064         macros=None, silent=False, report=None, properties=None,
02065         continue_on_build_fail=False, app_config=None):
02066     """Given the data structure from 'find_tests' and the typical build parameters,
02067     build all the tests
02068 
02069     Returns a tuple of the build result (True or False) followed by the test
02070     build data structure"""
02071 
02072     execution_directory = "."
02073     base_path = norm_relative_path(build_path, execution_directory)
02074 
02075     target_name = target if isinstance(target, str) else target.name
02076     
02077     test_build = {
02078         "platform": target_name,
02079         "toolchain": toolchain_name,
02080         "base_path": base_path,
02081         "baud_rate": 9600,
02082         "binary_type": "bootable",
02083         "tests": {}
02084     }
02085 
02086     result = True
02087 
02088     map_outputs_total = list()
02089     for test_name, test_path in tests.iteritems():
02090         test_build_path = os.path.join(build_path, test_path)
02091         src_path = base_source_paths + [test_path]
02092         bin_file = None
02093         test_case_folder_name = os.path.basename(test_path)
02094         
02095         
02096         try:
02097             bin_file = build_project(src_path, test_build_path, target, toolchain_name,
02098                                      options=options,
02099                                      jobs=jobs,
02100                                      clean=clean,
02101                                      macros=macros,
02102                                      name=test_case_folder_name,
02103                                      project_id=test_name,
02104                                      report=report,
02105                                      properties=properties,
02106                                      verbose=verbose,
02107                                      app_config=app_config)
02108 
02109         except Exception, e:
02110             if not isinstance(e, NotSupportedException):
02111                 result = False
02112 
02113                 if continue_on_build_fail:
02114                     continue
02115                 else:
02116                     break
02117 
02118         # If a clean build was carried out last time, disable it for the next build.
02119         # Otherwise the previously built test will be deleted.
02120         if clean:
02121             clean = False
02122 
02123         # Normalize the path
02124         if bin_file:
02125             bin_file = norm_relative_path(bin_file, execution_directory)
02126 
02127             test_build['tests'][test_name] = {
02128                 "binaries": [
02129                     {
02130                         "path": bin_file
02131                     }
02132                 ]
02133             }
02134 
02135             print 'Image: %s'% bin_file
02136 
02137     test_builds = {}
02138     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02139     
02140 
02141     return result, test_builds
02142 
02143 
02144 def test_spec_from_test_builds(test_builds):
02145     return {
02146         "builds": test_builds
02147     }