Knight KE / Mbed OS Game_Master
Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 from __future__ import print_function
00020 import six
00021 
00022 import os
00023 import re
00024 import sys
00025 import json
00026 import uuid
00027 import pprint
00028 import random
00029 import argparse
00030 import datetime
00031 import threading
00032 import ctypes
00033 import functools
00034 from colorama import Fore, Back, Style
00035 from prettytable import PrettyTable
00036 from copy import copy, deepcopy
00037 
00038 from time import sleep, time
00039 try:
00040     from Queue import Queue, Empty
00041 except ImportError:
00042     from queue import Queue, Empty
00043 from os.path import join, exists, basename, relpath
00044 from threading import Thread, Lock
00045 from multiprocessing import Pool, cpu_count
00046 from subprocess import Popen, PIPE
00047 
00048 # Imports related to mbed build api
00049 from tools.tests import TESTS
00050 from tools.tests import TEST_MAP
00051 from tools.paths import BUILD_DIR
00052 from tools.paths import HOST_TESTS
00053 from tools.utils import ToolException
00054 from tools.utils import NotSupportedException
00055 from tools.utils import construct_enum
00056 from tools.memap import MemapParser
00057 from tools.targets import TARGET_MAP, Target
00058 import tools.test_configs as TestConfig
00059 from tools.test_db import BaseDBAccess
00060 from tools.build_api import build_project, build_mbed_libs, build_lib
00061 from tools.build_api import get_target_supported_toolchains
00062 from tools.build_api import write_build_report
00063 from tools.build_api import prep_report
00064 from tools.build_api import prep_properties
00065 from tools.build_api import create_result
00066 from tools.build_api import add_result_to_report
00067 from tools.build_api import prepare_toolchain
00068 from tools.build_api import scan_resources
00069 from tools.build_api import get_config
00070 from tools.libraries import LIBRARIES, LIBRARY_MAP
00071 from tools.options import extract_profile
00072 from tools.toolchains import TOOLCHAIN_PATHS
00073 from tools.toolchains import TOOLCHAINS
00074 from tools.test_exporters import ReportExporter, ResultExporterType
00075 from tools.utils import argparse_filestring_type
00076 from tools.utils import argparse_uppercase_type
00077 from tools.utils import argparse_lowercase_type
00078 from tools.utils import argparse_many
00079 from tools.notifier.mock import MockNotifier
00080 from tools.notifier.term import TerminalNotifier
00081 
00082 import tools.host_tests.host_tests_plugins as host_tests_plugins
00083 
00084 try:
00085     import mbed_lstools
00086     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00087 except:
00088     pass
00089 
00090 
00091 class ProcessObserver(Thread):
00092     def __init__(self, proc):
00093         Thread.__init__(self)
00094         self.proc = proc
00095         self.queue = Queue()
00096         self.daemon = True
00097         self.active = True
00098         self.start()
00099 
00100     def run(self):
00101         while self.active:
00102             c = self.proc.stdout.read(1)
00103             self.queue.put(c)
00104 
00105     def stop(self):
00106         self.active = False
00107         try:
00108             self.proc.terminate()
00109         except Exception:
00110             pass
00111 
00112 
00113 class SingleTestExecutor (threading.Thread):
00114     """ Example: Single test class in separate thread usage
00115     """
00116     def __init__(self, single_test):
00117         self.single_test  = single_test
00118         threading.Thread.__init__(self)
00119 
00120     def run(self):
00121         start = time()
00122         # Execute tests depending on options and filter applied
00123         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00124         elapsed_time = time() - start
00125 
00126         # Human readable summary
00127         if not self.single_test .opts_suppress_summary:
00128             # prints well-formed summary with results (SQL table like)
00129             print(self.single_test .generate_test_summary(test_summary,
00130                                                          shuffle_seed))
00131         if self.single_test .opts_test_x_toolchain_summary:
00132             # prints well-formed summary with results (SQL table like)
00133             # table shows text x toolchain test result matrix
00134             print(self.single_test .generate_test_summary_by_target(
00135                 test_summary, shuffle_seed))
00136         print("Completed in %.2f sec"% (elapsed_time))
00137 
00138 
00139 class SingleTestRunner (object):
00140     """ Object wrapper for single test run which may involve multiple MUTs
00141     """
00142     RE_DETECT_TESTCASE_RESULT = None
00143 
00144     # Return codes for test script
00145     TEST_RESULT_OK = "OK"
00146     TEST_RESULT_FAIL = "FAIL"
00147     TEST_RESULT_ERROR = "ERROR"
00148     TEST_RESULT_UNDEF = "UNDEF"
00149     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00150     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00151     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00152     TEST_RESULT_TIMEOUT = "TIMEOUT"
00153     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00154     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00155     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00156     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00157 
00158     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00159     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00160     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00161 
00162     muts = {} # MUTs descriptor (from external file)
00163     test_spec = {} # Test specification (from external file)
00164 
00165     # mbed test suite -> SingleTestRunner
00166     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00167                            "failure" : TEST_RESULT_FAIL,
00168                            "error" : TEST_RESULT_ERROR,
00169                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00170                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00171                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00172                            "timeout" : TEST_RESULT_TIMEOUT,
00173                            "no_image" : TEST_RESULT_NO_IMAGE,
00174                            "end" : TEST_RESULT_UNDEF,
00175                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00176                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00177                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00178     }
00179 
00180     def __init__ (self,
00181                  _global_loops_count=1,
00182                  _test_loops_list=None,
00183                  _muts={},
00184                  _clean=False,
00185                  _parser=None,
00186                  _opts=None,
00187                  _opts_db_url=None,
00188                  _opts_log_file_name=None,
00189                  _opts_report_html_file_name=None,
00190                  _opts_report_junit_file_name=None,
00191                  _opts_report_build_file_name=None,
00192                  _opts_report_text_file_name=None,
00193                  _opts_build_report={},
00194                  _opts_build_properties={},
00195                  _test_spec={},
00196                  _opts_goanna_for_mbed_sdk=None,
00197                  _opts_goanna_for_tests=None,
00198                  _opts_shuffle_test_order=False,
00199                  _opts_shuffle_test_seed=None,
00200                  _opts_test_by_names=None,
00201                  _opts_peripheral_by_names=None,
00202                  _opts_test_only_peripheral=False,
00203                  _opts_test_only_common=False,
00204                  _opts_verbose_skipped_tests=False,
00205                  _opts_verbose_test_result_only=False,
00206                  _opts_verbose=False,
00207                  _opts_firmware_global_name=None,
00208                  _opts_only_build_tests=False,
00209                  _opts_parallel_test_exec=False,
00210                  _opts_suppress_summary=False,
00211                  _opts_test_x_toolchain_summary=False,
00212                  _opts_copy_method=None,
00213                  _opts_mut_reset_type=None,
00214                  _opts_jobs=None,
00215                  _opts_waterfall_test=None,
00216                  _opts_consolidate_waterfall_test=None,
00217                  _opts_extend_test_timeout=None,
00218                  _opts_auto_detect=None,
00219                  _opts_include_non_automated=False):
00220         """ Let's try hard to init this object
00221         """
00222         from colorama import init
00223         init()
00224 
00225         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00226         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00227         # Settings related to test loops counters
00228         try:
00229             _global_loops_count = int(_global_loops_count)
00230         except:
00231             _global_loops_count = 1
00232         if _global_loops_count < 1:
00233             _global_loops_count = 1
00234         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00235         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00236         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00237 
00238         self.shuffle_random_seed  = 0.0
00239         self.SHUFFLE_SEED_ROUND  = 10
00240 
00241         # MUT list and test specification storage
00242         self.muts  = _muts
00243         self.test_spec  = _test_spec
00244 
00245         # Settings passed e.g. from command line
00246         self.opts_db_url  = _opts_db_url
00247         self.opts_log_file_name  = _opts_log_file_name
00248         self.opts_report_html_file_name  = _opts_report_html_file_name
00249         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00250         self.opts_report_build_file_name  = _opts_report_build_file_name
00251         self.opts_report_text_file_name  = _opts_report_text_file_name
00252         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00253         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00254         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00255         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00256         self.opts_test_by_names  = _opts_test_by_names
00257         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00258         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00259         self.opts_test_only_common  = _opts_test_only_common
00260         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00261         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00262         self.opts_verbose  = _opts_verbose
00263         self.opts_firmware_global_name  = _opts_firmware_global_name
00264         self.opts_only_build_tests  = _opts_only_build_tests
00265         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00266         self.opts_suppress_summary  = _opts_suppress_summary
00267         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00268         self.opts_copy_method  = _opts_copy_method
00269         self.opts_mut_reset_type  = _opts_mut_reset_type
00270         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00271         self.opts_waterfall_test  = _opts_waterfall_test
00272         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00273         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00274         self.opts_clean  = _clean
00275         self.opts_parser  = _parser
00276         self.opts  = _opts
00277         self.opts_auto_detect  = _opts_auto_detect
00278         self.opts_include_non_automated  = _opts_include_non_automated
00279 
00280         self.build_report  = _opts_build_report
00281         self.build_properties  = _opts_build_properties
00282 
00283         # File / screen logger initialization
00284         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00285 
00286         # Database related initializations
00287         self.db_logger  = factory_db_logger(self.opts_db_url )
00288         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00289         # Let's connect to database to set up credentials and confirm database is ready
00290         if self.db_logger :
00291             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00292             if self.db_logger .is_connected():
00293                 # Get hostname and uname so we can use it as build description
00294                 # when creating new build_id in external database
00295                 (_hostname, _uname) = self.db_logger .get_hostname()
00296                 _host_location = os.path.dirname(os.path.abspath(__file__))
00297                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00298                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00299                 self.db_logger .disconnect()
00300 
00301     def dump_options (self):
00302         """ Function returns data structure with common settings passed to SingelTestRunner
00303             It can be used for example to fill _extra fields in database storing test suite single run data
00304             Example:
00305             data = self.dump_options()
00306             or
00307             data_str = json.dumps(self.dump_options())
00308         """
00309         result = {"db_url" : str(self.opts_db_url ),
00310                   "log_file_name" :  str(self.opts_log_file_name ),
00311                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00312                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00313                   "test_by_names" :  str(self.opts_test_by_names ),
00314                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00315                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00316                   "test_only_common" :  str(self.opts_test_only_common ),
00317                   "verbose" :  str(self.opts_verbose ),
00318                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00319                   "only_build_tests" :  str(self.opts_only_build_tests ),
00320                   "copy_method" :  str(self.opts_copy_method ),
00321                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00322                   "jobs" :  str(self.opts_jobs ),
00323                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00324                   "_dummy" : ''
00325         }
00326         return result
00327 
00328     def shuffle_random_func(self):
00329         return self.shuffle_random_seed 
00330 
00331     def is_shuffle_seed_float (self):
00332         """ return true if function parameter can be converted to float
00333         """
00334         result = True
00335         try:
00336             float(self.shuffle_random_seed )
00337         except ValueError:
00338             result = False
00339         return result
00340 
00341     # This will store target / toolchain specific properties
00342     test_suite_properties_ext = {}  # target : toolchain
00343     # Here we store test results
00344     test_summary = []
00345     # Here we store test results in extended data structure
00346     test_summary_ext = {}
00347     execute_thread_slice_lock = Lock()
00348 
00349     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00350         for toolchain in toolchains:
00351             tt_id = "%s::%s" % (toolchain, target)
00352 
00353             T = TARGET_MAP[target]
00354 
00355             # print target, toolchain
00356             # Test suite properties returned to external tools like CI
00357             test_suite_properties = {
00358                 'jobs': self.opts_jobs ,
00359                 'clean': clean,
00360                 'target': target,
00361                 'vendor': T.extra_labels[0],
00362                 'test_ids': ', '.join(test_ids),
00363                 'toolchain': toolchain,
00364                 'shuffle_random_seed': self.shuffle_random_seed 
00365             }
00366 
00367 
00368             # print '=== %s::%s ===' % (target, toolchain)
00369             # Let's build our test
00370             if target not in TARGET_MAP:
00371                 print(self.logger .log_line(
00372                     self.logger .LogType.NOTIF,
00373                     'Skipped tests for %s target. Target platform not found' %
00374                     (target)))
00375                 continue
00376 
00377             clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk  or
00378                                        self.opts_clean  or clean)
00379 
00380             profile = extract_profile(self.opts_parser , self.opts , toolchain)
00381             stats_depth = self.opts .stats_depth or 2
00382 
00383             try:
00384                 build_mbed_libs_result = build_mbed_libs(
00385                     T, toolchain,
00386                     clean=clean_mbed_libs_options,
00387                     jobs=self.opts_jobs ,
00388                     report=build_report,
00389                     properties=build_properties,
00390                     build_profile=profile,
00391                     notify=TerminalNotifier())
00392 
00393                 if not build_mbed_libs_result:
00394                     print(self.logger .log_line(
00395                         self.logger .LogType.NOTIF,
00396                         'Skipped tests for %s target. Toolchain %s is not '
00397                         'supported for this target'% (T.name, toolchain)))
00398                     continue
00399 
00400             except ToolException:
00401                 print(self.logger .log_line(
00402                     self.logger .LogType.ERROR,
00403                     'There were errors while building MBED libs for %s using %s'
00404                     % (target, toolchain)))
00405                 continue
00406 
00407             build_dir = join(BUILD_DIR, "test", target, toolchain)
00408 
00409             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00410             test_suite_properties['build_dir'] = build_dir
00411             test_suite_properties['skipped'] = []
00412 
00413             # Enumerate through all tests and shuffle test order if requested
00414             test_map_keys = sorted(TEST_MAP.keys())
00415 
00416             if self.opts_shuffle_test_order :
00417                 random.shuffle(test_map_keys, self.shuffle_random_func )
00418                 # Update database with shuffle seed f applicable
00419                 if self.db_logger :
00420                     self.db_logger .reconnect();
00421                     if self.db_logger .is_connected():
00422                         self.db_logger .update_build_id_info(
00423                             self.db_logger_build_id ,
00424                             _shuffle_seed=self.shuffle_random_func ())
00425                         self.db_logger .disconnect();
00426 
00427             if self.db_logger :
00428                 self.db_logger .reconnect();
00429                 if self.db_logger .is_connected():
00430                     # Update MUTs and Test Specification in database
00431                     self.db_logger .update_build_id_info(
00432                         self.db_logger_build_id ,
00433                         _muts=self.muts , _test_spec=self.test_spec )
00434                     # Update Extra information in database (some options passed to test suite)
00435                     self.db_logger .update_build_id_info(
00436                         self.db_logger_build_id ,
00437                         _extra=json.dumps(self.dump_options ()))
00438                     self.db_logger .disconnect();
00439 
00440             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00441             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00442 
00443             for skipped_test_id in skipped_test_map_keys:
00444                 test_suite_properties['skipped'].append(skipped_test_id)
00445 
00446 
00447             # First pass through all tests and determine which libraries need to be built
00448             libraries = []
00449             for test_id in valid_test_map_keys:
00450                 test = TEST_MAP[test_id]
00451 
00452                 # Detect which lib should be added to test
00453                 # Some libs have to compiled like RTOS or ETH
00454                 for lib in LIBRARIES:
00455                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00456                         libraries.append(lib['id'])
00457 
00458 
00459             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00460 
00461             # Build all required libraries
00462             for lib_id in libraries:
00463                 try:
00464                     build_lib(lib_id,
00465                               T,
00466                               toolchain,
00467                               clean=clean_mbed_libs_options,
00468                               jobs=self.opts_jobs ,
00469                               report=build_report,
00470                               properties=build_properties,
00471                               build_profile=profile,
00472                               notify=TerminalNotifier())
00473 
00474                 except ToolException:
00475                     print(self.logger .log_line(
00476                         self.logger .LogType.ERROR,
00477                         'There were errors while building library %s' % lib_id))
00478                     continue
00479 
00480 
00481             for test_id in valid_test_map_keys:
00482                 test = TEST_MAP[test_id]
00483 
00484                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00485 
00486                 # TODO: move this 2 below loops to separate function
00487                 INC_DIRS = []
00488                 for lib_id in libraries:
00489                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00490                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00491 
00492                 MACROS = []
00493                 for lib_id in libraries:
00494                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00495                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00496                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00497                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00498                 test_uuid = uuid.uuid4()
00499                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00500 
00501                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00502                 if target not in self.test_summary_ext :
00503                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00504                 if toolchain not in self.test_summary_ext [target]:
00505                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00506 
00507                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00508 
00509                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00510                 try:
00511                     path = build_project(
00512                         test.source_dir, join(build_dir, test_id), T,
00513                         toolchain, test.dependencies, clean=clean_project_options,
00514                         name=project_name, macros=MACROS,
00515                         inc_dirs=INC_DIRS, jobs=self.opts_jobs , report=build_report,
00516                         properties=build_properties, project_id=test_id,
00517                         project_description=test.get_description(),
00518                         build_profile=profile, stats_depth=stats_depth,
00519                         notify=TerminalNotifier(),
00520                     )
00521 
00522                 except Exception as e:
00523                     project_name_str = project_name if project_name is not None else test_id
00524 
00525 
00526                     test_result = self.TEST_RESULT_FAIL 
00527 
00528                     if isinstance(e, ToolException):
00529                         print(self.logger .log_line(
00530                             self.logger .LogType.ERROR,
00531                             'There were errors while building project %s' %
00532                             project_name_str))
00533                         test_result = self.TEST_RESULT_BUILD_FAILED 
00534                     elif isinstance(e, NotSupportedException):
00535                         print(self.logger .log_line(
00536                             self.logger .LogType.INFO,
00537                             'Project %s is not supported' % project_name_str))
00538                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00539 
00540 
00541                     # Append test results to global test summary
00542                     self.test_summary .append(
00543                         (test_result, target, toolchain, test_id,
00544                          test.get_description(), 0, 0, '-')
00545                     )
00546 
00547                     # Add detailed test result to test summary structure
00548                     if test_id not in self.test_summary_ext [target][toolchain]:
00549                         self.test_summary_ext [target][toolchain][test_id] = []
00550 
00551                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00552                         'result' : test_result,
00553                         'output' : '',
00554                         'target_name' : target,
00555                         'target_name_unique': target,
00556                         'toolchain_name' : toolchain,
00557                         'id' : test_id,
00558                         'description' : test.get_description(),
00559                         'elapsed_time' : 0,
00560                         'duration' : 0,
00561                         'copy_method' : None
00562                     }})
00563                     continue
00564 
00565                 if self.opts_only_build_tests :
00566                     # With this option we are skipping testing phase
00567                     continue
00568 
00569                 # Test duration can be increased by global value
00570                 test_duration = test.duration
00571                 if self.opts_extend_test_timeout  is not None:
00572                     test_duration += self.opts_extend_test_timeout 
00573 
00574                 # For an automated test the duration act as a timeout after
00575                 # which the test gets interrupted
00576                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00577                 test_loops = self.get_test_loop_count (test_id)
00578 
00579                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00580                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00581                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00582 
00583                 # read MUTs, test specification and perform tests
00584                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00585 
00586                 if handle_results is None:
00587                     continue
00588 
00589                 for handle_result in handle_results:
00590                     if handle_result:
00591                         single_test_result, detailed_test_results = handle_result
00592                     else:
00593                         continue
00594 
00595                     # Append test results to global test summary
00596                     if single_test_result is not None:
00597                         self.test_summary .append(single_test_result)
00598 
00599                     # Add detailed test result to test summary structure
00600                     if target not in self.test_summary_ext [target][toolchain]:
00601                         if test_id not in self.test_summary_ext [target][toolchain]:
00602                             self.test_summary_ext [target][toolchain][test_id] = []
00603 
00604                         append_test_result = detailed_test_results
00605 
00606                         # If waterfall and consolidate-waterfall options are enabled,
00607                         # only include the last test result in the report.
00608                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00609                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00610 
00611                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00612 
00613             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00614             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00615 
00616         q.put(target + '_'.join(toolchains))
00617         return
00618 
00619     def execute(self):
00620         clean = self.test_spec .get('clean', False)
00621         test_ids = self.test_spec .get('test_ids', [])
00622         q = Queue()
00623 
00624         # Generate seed for shuffle if seed is not provided in
00625         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00626         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00627             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00628 
00629 
00630         if self.opts_parallel_test_exec :
00631             ###################################################################
00632             # Experimental, parallel test execution per singletest instance.
00633             ###################################################################
00634             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00635             # Note: We are building here in parallel for each target separately!
00636             # So we are not building the same thing multiple times and compilers
00637             # in separate threads do not collide.
00638             # Inside execute_thread_slice() function function handle() will be called to
00639             # get information about available MUTs (per target).
00640             for target, toolchains in self.test_spec ['targets'].items():
00641                 self.test_suite_properties_ext [target] = {}
00642                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00643                 t.daemon = True
00644                 t.start()
00645                 execute_threads.append(t)
00646 
00647             for t in execute_threads:
00648                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00649         else:
00650             # Serialized (not parallel) test execution
00651             for target, toolchains in self.test_spec ['targets'].items():
00652                 if target not in self.test_suite_properties_ext :
00653                     self.test_suite_properties_ext [target] = {}
00654 
00655                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00656                 q.get()
00657 
00658         if self.db_logger :
00659             self.db_logger .reconnect();
00660             if self.db_logger .is_connected():
00661                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00662                 self.db_logger .disconnect();
00663 
00664         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00665 
00666     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00667         valid_test_map_keys = []
00668 
00669         for test_id in test_map_keys:
00670             test = TEST_MAP[test_id]
00671             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00672                 continue
00673 
00674             if test_ids and test_id not in test_ids:
00675                 continue
00676 
00677             if self.opts_test_only_peripheral  and not test.peripherals:
00678                 if self.opts_verbose_skipped_tests :
00679                     print(self.logger .log_line(
00680                         self.logger .LogType.INFO,
00681                         'Common test skipped for target %s' % target))
00682                 continue
00683 
00684             if (self.opts_peripheral_by_names  and test.peripherals and
00685                 not any((i in self.opts_peripheral_by_names )
00686                         for i in test.peripherals)):
00687                 # We will skip tests not forced with -p option
00688                 if self.opts_verbose_skipped_tests :
00689                     print(self.logger .log_line(
00690                         self.logger .LogType.INFO,
00691                         'Common test skipped for target %s' % target))
00692                 continue
00693 
00694             if self.opts_test_only_common  and test.peripherals:
00695                 if self.opts_verbose_skipped_tests :
00696                     print(self.logger .log_line(
00697                         self.logger .LogType.INFO,
00698                         'Peripheral test skipped for target %s' % target))
00699                 continue
00700 
00701             if not include_non_automated and not test.automated:
00702                 if self.opts_verbose_skipped_tests :
00703                     print(self.logger .log_line(
00704                         self.logger .LogType.INFO,
00705                         'Non automated test skipped for target %s' % target))
00706                 continue
00707 
00708             if test.is_supported(target, toolchain):
00709                 if test.peripherals is None and self.opts_only_build_tests :
00710                     # When users are using 'build only flag' and test do not have
00711                     # specified peripherals we can allow test building by default
00712                     pass
00713                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00714                     # If we force peripheral with option -p we expect test
00715                     # to pass even if peripheral is not in MUTs file.
00716                     pass
00717                 elif not self.is_peripherals_available (target, test.peripherals):
00718                     if self.opts_verbose_skipped_tests :
00719                         if test.peripherals:
00720                             print(self.logger .log_line(
00721                                 self.logger .LogType.INFO,
00722                                 'Peripheral %s test skipped for target %s' %
00723                                 (",".join(test.peripherals), target)))
00724                         else:
00725                             print(self.logger .log_line(
00726                                 self.logger .LogType.INFO,
00727                                 'Test %s skipped for target %s' %
00728                                 (test_id, target)))
00729                     continue
00730 
00731                 # The test has made it through all the filters, so add it to the valid tests list
00732                 valid_test_map_keys.append(test_id)
00733 
00734         return valid_test_map_keys
00735 
00736     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00737         # NOTE: This will not preserve order
00738         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00739 
00740     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00741         """ Prints well-formed summary with results (SQL table like)
00742             table shows text x toolchain test result matrix
00743         """
00744         RESULT_INDEX = 0
00745         TARGET_INDEX = 1
00746         TOOLCHAIN_INDEX = 2
00747         TEST_INDEX = 3
00748         DESC_INDEX = 4
00749 
00750         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00751         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00752         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00753         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00754 
00755         result = "Test summary:\n"
00756         for target in unique_targets:
00757             result_dict = {} # test : { toolchain : result }
00758             unique_target_toolchains = []
00759             for test in test_summary:
00760                 if test[TARGET_INDEX] == target:
00761                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00762                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00763                     if test[TEST_INDEX] not in result_dict:
00764                         result_dict[test[TEST_INDEX]] = {}
00765                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00766 
00767             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00768             pt = PrettyTable(pt_cols)
00769             for col in pt_cols:
00770                 pt.align[col] = "l"
00771             pt.padding_width = 1 # One space between column edges and contents (default)
00772 
00773             for test in unique_tests:
00774                 if test in result_dict:
00775                     test_results = result_dict[test]
00776                     if test in unique_test_desc:
00777                         row = [target, test, unique_test_desc[test]]
00778                         for toolchain in unique_toolchains:
00779                             if toolchain in test_results:
00780                                 row.append(test_results[toolchain])
00781                         pt.add_row(row)
00782             result += pt.get_string()
00783             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00784                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00785             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00786         return result
00787 
00788     def generate_test_summary (self, test_summary, shuffle_seed=None):
00789         """ Prints well-formed summary with results (SQL table like)
00790             table shows target x test results matrix across
00791         """
00792         success_code = 0    # Success code that can be leter returned to
00793         result = "Test summary:\n"
00794         # Pretty table package is used to print results
00795         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00796                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00797         pt.align["Result"] = "l" # Left align
00798         pt.align["Target"] = "l" # Left align
00799         pt.align["Toolchain"] = "l" # Left align
00800         pt.align["Test ID"] = "l" # Left align
00801         pt.align["Test Description"] = "l" # Left align
00802         pt.padding_width = 1 # One space between column edges and contents (default)
00803 
00804         result_dict = {self.TEST_RESULT_OK  : 0,
00805                        self.TEST_RESULT_FAIL  : 0,
00806                        self.TEST_RESULT_ERROR  : 0,
00807                        self.TEST_RESULT_UNDEF  : 0,
00808                        self.TEST_RESULT_IOERR_COPY  : 0,
00809                        self.TEST_RESULT_IOERR_DISK  : 0,
00810                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00811                        self.TEST_RESULT_NO_IMAGE  : 0,
00812                        self.TEST_RESULT_TIMEOUT  : 0,
00813                        self.TEST_RESULT_MBED_ASSERT  : 0,
00814                        self.TEST_RESULT_BUILD_FAILED  : 0,
00815                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00816         }
00817 
00818         for test in test_summary:
00819             if test[0] in result_dict:
00820                 result_dict[test[0]] += 1
00821             pt.add_row(test)
00822         result += pt.get_string()
00823         result += "\n"
00824 
00825         # Print result count
00826         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
00827         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00828                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00829         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00830         return result
00831 
00832     def test_loop_list_to_dict (self, test_loops_str):
00833         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00834         """
00835         result = {}
00836         if test_loops_str:
00837             test_loops = test_loops_str
00838             for test_loop in test_loops:
00839                 test_loop_count = test_loop.split('=')
00840                 if len(test_loop_count) == 2:
00841                     _test_id, _test_loops = test_loop_count
00842                     try:
00843                         _test_loops = int(_test_loops)
00844                     except:
00845                         continue
00846                     result[_test_id] = _test_loops
00847         return result
00848 
00849     def get_test_loop_count (self, test_id):
00850         """ This function returns no. of loops per test (deducted by test_id_.
00851             If test is not in list of redefined loop counts it will use default value.
00852         """
00853         result = self.GLOBAL_LOOPS_COUNT 
00854         if test_id in self.TEST_LOOPS_DICT :
00855             result = self.TEST_LOOPS_DICT [test_id]
00856         return result
00857 
00858     def delete_file (self, file_path):
00859         """ Remove file from the system
00860         """
00861         result = True
00862         resutl_msg = ""
00863         try:
00864             os.remove(file_path)
00865         except Exception as e:
00866             resutl_msg = e
00867             result = False
00868         return result, resutl_msg
00869 
00870     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00871         """ Test is being invoked for given MUT.
00872         """
00873         # Get test information, image and test timeout
00874         test_id = data['test_id']
00875         test = TEST_MAP[test_id]
00876         test_description = TEST_MAP[test_id].get_description()
00877         image = data["image"]
00878         duration = data.get("duration", 10)
00879 
00880         if mut is None:
00881             print("Error: No Mbed available: MUT[%s]" % data['mcu'])
00882             return None
00883 
00884         mcu = mut['mcu']
00885         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00886 
00887         if self.db_logger :
00888             self.db_logger .reconnect()
00889 
00890         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00891 
00892         # Tests can be looped so test results must be stored for the same test
00893         test_all_result = []
00894         # Test results for one test ran few times
00895         detailed_test_results = {}  # { Loop_number: { results ... } }
00896 
00897         for test_index in range(test_loops):
00898 
00899             # If mbedls is available and we are auto detecting MUT info,
00900             # update MUT info (mounting may changed)
00901             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00902                 platform_name_filter = [mcu]
00903                 muts_list = {}
00904                 found = False
00905 
00906                 for i in range(0, 60):
00907                     print('Looking for %s with MBEDLS' % mcu)
00908                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00909 
00910                     if 1 not in muts_list:
00911                         sleep(3)
00912                     else:
00913                         found = True
00914                         break
00915 
00916                 if not found:
00917                     print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
00918                     return None
00919                 else:
00920                     mut = muts_list[1]
00921 
00922             disk = mut.get('disk')
00923             port = mut.get('port')
00924 
00925             if disk is None or port is None:
00926                 return None
00927 
00928             target_by_mcu = TARGET_MAP[mut['mcu']]
00929             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00930             # Some extra stuff can be declared in MUTs structure
00931             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00932             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00933 
00934             # When the build and test system were separate, this was relative to a
00935             # base network folder base path: join(NETWORK_BASE_PATH, )
00936             image_path = image
00937 
00938             # Host test execution
00939             start_host_exec_time = time()
00940 
00941             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00942             _copy_method = selected_copy_method
00943 
00944             if not exists(image_path):
00945                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00946                 elapsed_time = 0
00947                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00948                 print(single_test_output)
00949             else:
00950                 # Host test execution
00951                 start_host_exec_time = time()
00952 
00953                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00954                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00955                 host_test_result = self.run_host_test (test.host_test,
00956                                                       image_path, disk, port, duration,
00957                                                       micro=target_name,
00958                                                       verbose=host_test_verbose,
00959                                                       reset=host_test_reset,
00960                                                       reset_tout=reset_tout,
00961                                                       copy_method=selected_copy_method,
00962                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00963                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00964 
00965             # Store test result
00966             test_all_result.append(single_test_result)
00967             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00968             elapsed_time = single_testduration  # TIme of single test case execution after reset
00969 
00970             detailed_test_results[test_index] = {
00971                 'result' : single_test_result,
00972                 'output' : single_test_output,
00973                 'target_name' : target_name,
00974                 'target_name_unique' : target_name_unique,
00975                 'toolchain_name' : toolchain_name,
00976                 'id' : test_id,
00977                 'description' : test_description,
00978                 'elapsed_time' : round(elapsed_time, 2),
00979                 'duration' : single_timeout,
00980                 'copy_method' : _copy_method,
00981             }
00982 
00983             print(self.print_test_result (
00984                 single_test_result, target_name_unique, toolchain_name, test_id,
00985                 test_description, elapsed_time, single_timeout))
00986 
00987             # Update database entries for ongoing test
00988             if self.db_logger  and self.db_logger .is_connected():
00989                 test_type = 'SingleTest'
00990                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00991                                                  target_name,
00992                                                  toolchain_name,
00993                                                  test_type,
00994                                                  test_id,
00995                                                  single_test_result,
00996                                                  single_test_output,
00997                                                  elapsed_time,
00998                                                  single_timeout,
00999                                                  test_index)
01000 
01001             # If we perform waterfall test we test until we get OK and we stop testing
01002             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
01003                 break
01004 
01005         if self.db_logger :
01006             self.db_logger .disconnect()
01007 
01008         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
01009                 target_name_unique,
01010                 toolchain_name,
01011                 test_id,
01012                 test_description,
01013                 round(elapsed_time, 2),
01014                 single_timeout,
01015                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
01016 
01017     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
01018         """ Function determines MUT's mbed disk/port and copies binary to
01019             target.
01020         """
01021         handle_results = []
01022         data = json.loads(test_spec)
01023 
01024         # Find a suitable MUT:
01025         mut = None
01026         for id, m in self.muts .items():
01027             if m['mcu'] == data['mcu']:
01028                 mut = m
01029                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
01030                 handle_results.append(handle_result)
01031 
01032         return handle_results
01033 
01034     def print_test_result (self, test_result, target_name, toolchain_name,
01035                           test_id, test_description, elapsed_time, duration):
01036         """ Use specific convention to print test result and related data
01037         """
01038         tokens = []
01039         tokens.append("TargetTest")
01040         tokens.append(target_name)
01041         tokens.append(toolchain_name)
01042         tokens.append(test_id)
01043         tokens.append(test_description)
01044         separator = "::"
01045         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
01046         result = separator.join(tokens) + " [" + test_result +"]" + time_info
01047         return Fore.MAGENTA + result + Fore.RESET
01048 
01049     def shape_test_loop_ok_result_count (self, test_all_result):
01050         """ Reformats list of results to simple string
01051         """
01052         test_loop_count = len(test_all_result)
01053         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01054         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01055 
01056     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01057         """ Reformats list of results to simple string
01058         """
01059         result = self.TEST_RESULT_FAIL 
01060 
01061         if all(test_all_result[0] == res for res in test_all_result):
01062             result = test_all_result[0]
01063         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01064             result = self.TEST_RESULT_OK 
01065 
01066         return result
01067 
01068     def run_host_test (self, name, image_path, disk, port, duration,
01069                       micro=None, reset=None, reset_tout=None,
01070                       verbose=False, copy_method=None, program_cycle_s=None):
01071         """ Function creates new process with host test configured with particular test case.
01072             Function also is pooling for serial port activity from process to catch all data
01073             printed by test runner and host test during test execution
01074         """
01075 
01076         def get_char_from_queue(obs):
01077             """ Get character from queue safe way
01078             """
01079             try:
01080                 c = obs.queue.get(block=True, timeout=0.5)
01081             except Empty:
01082                 c = None
01083             return c
01084 
01085         def filter_queue_char(c):
01086             """ Filters out non ASCII characters from serial port
01087             """
01088             if ord(c) not in range(128):
01089                 c = ' '
01090             return c
01091 
01092         def get_test_result(output):
01093             """ Parse test 'output' data
01094             """
01095             result = self.TEST_RESULT_TIMEOUT 
01096             for line in "".join(output).splitlines():
01097                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01098                 if search_result and len(search_result.groups()):
01099                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01100                     break
01101             return result
01102 
01103         def get_auto_property_value(property_name, line):
01104             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01105                 Returns string
01106             """
01107             result = None
01108             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01109                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01110                 if property is not None and len(property.groups()) == 1:
01111                     result = property.groups()[0]
01112             return result
01113 
01114         cmd = ["python",
01115                '%s.py'% name,
01116                '-d', disk,
01117                '-f', '"%s"'% image_path,
01118                '-p', port,
01119                '-t', str(duration),
01120                '-C', str(program_cycle_s)]
01121 
01122         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01123             cmd += ['--auto']
01124 
01125         # Add extra parameters to host_test
01126         if copy_method is not None:
01127             cmd += ["-c", copy_method]
01128         if micro is not None:
01129             cmd += ["-m", micro]
01130         if reset is not None:
01131             cmd += ["-r", reset]
01132         if reset_tout is not None:
01133             cmd += ["-R", str(reset_tout)]
01134 
01135         if verbose:
01136             print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
01137             print("Test::Output::Start")
01138 
01139         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01140         obs = ProcessObserver(proc)
01141         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01142         line = ''
01143         output = []
01144         start_time = time()
01145         while (time() - start_time) < (2 * duration):
01146             c = get_char_from_queue(obs)
01147             if c:
01148                 if verbose:
01149                     sys.stdout.write(c)
01150                 c = filter_queue_char(c)
01151                 output.append(c)
01152                 # Give the mbed under test a way to communicate the end of the test
01153                 if c in ['\n', '\r']:
01154 
01155                     # Checking for auto-detection information from the test about MUT reset moment
01156                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01157                         # We will update this marker only once to prevent multiple time resets
01158                         update_once_flag['reset_target'] = True
01159                         start_time = time()
01160 
01161                     # Checking for auto-detection information from the test about timeout
01162                     auto_timeout_val = get_auto_property_value('timeout', line)
01163                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01164                         # We will update this marker only once to prevent multiple time resets
01165                         update_once_flag['timeout'] = True
01166                         duration = int(auto_timeout_val)
01167 
01168                     # Detect mbed assert:
01169                     if 'mbed assertation failed: ' in line:
01170                         output.append('{{mbed_assert}}')
01171                         break
01172 
01173                     # Check for test end
01174                     if '{end}' in line:
01175                         break
01176                     line = ''
01177                 else:
01178                     line += c
01179         end_time = time()
01180         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01181 
01182         c = get_char_from_queue(obs)
01183 
01184         if c:
01185             if verbose:
01186                 sys.stdout.write(c)
01187             c = filter_queue_char(c)
01188             output.append(c)
01189 
01190         if verbose:
01191             print("Test::Output::Finish")
01192         # Stop test process
01193         obs.stop()
01194 
01195         result = get_test_result(output)
01196         return (result, "".join(output), testcase_duration, duration)
01197 
01198     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01199         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01200         """
01201         if peripherals is not None:
01202             peripherals = set(peripherals)
01203         for id, mut in self.muts .items():
01204             # Target MCU name check
01205             if mut["mcu"] != target_mcu_name:
01206                 continue
01207             # Peripherals check
01208             if peripherals is not None:
01209                 if 'peripherals' not in mut:
01210                     continue
01211                 if not peripherals.issubset(set(mut['peripherals'])):
01212                     continue
01213             return True
01214         return False
01215 
01216     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01217         """ Function prepares JSON structure describing test specification
01218         """
01219         test_spec = {
01220             "mcu": mcu,
01221             "image": image_path,
01222             "duration": duration,
01223             "test_id": test_id,
01224         }
01225         return json.dumps(test_spec)
01226 
01227 
01228 def get_unique_value_from_summary (test_summary, index):
01229     """ Gets list of unique target names
01230     """
01231     result = []
01232     for test in test_summary:
01233         target_name = test[index]
01234         if target_name not in result:
01235             result.append(target_name)
01236     return sorted(result)
01237 
01238 
01239 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01240     """ Gets list of unique target names and return dictionary
01241     """
01242     result = {}
01243     for test in test_summary:
01244         key = test[index_key]
01245         val = test[index_val]
01246         if key not in result:
01247             result[key] = val
01248     return result
01249 
01250 
01251 def show_json_file_format_error (json_spec_filename, line, column):
01252     """ Prints JSON broken content
01253     """
01254     with open(json_spec_filename) as data_file:
01255         line_no = 1
01256         for json_line in data_file:
01257             if line_no + 5 >= line: # Print last few lines before error
01258                 print('Line %d:\t'%line_no + json_line)
01259             if line_no == line:
01260                 print('%s\t%s^' (' ' * len('Line %d:' % line_no),
01261                                  '-' * (column - 1)))
01262                 break
01263             line_no += 1
01264 
01265 
01266 def json_format_error_defect_pos (json_error_msg):
01267     """ Gets first error line and column in JSON file format.
01268         Parsed from exception thrown by json.loads() string
01269     """
01270     result = None
01271     line, column = 0, 0
01272     # Line value search
01273     line_search = re.search('line [0-9]+', json_error_msg)
01274     if line_search is not None:
01275         ls = line_search.group().split(' ')
01276         if len(ls) == 2:
01277             line = int(ls[1])
01278             # Column position search
01279             column_search = re.search('column [0-9]+', json_error_msg)
01280             if column_search is not None:
01281                 cs = column_search.group().split(' ')
01282                 if len(cs) == 2:
01283                     column = int(cs[1])
01284                     result = [line, column]
01285     return result
01286 
01287 
01288 def get_json_data_from_file (json_spec_filename, verbose=False):
01289     """ Loads from file JSON formatted string to data structure
01290     """
01291     result = None
01292     try:
01293         with open(json_spec_filename) as data_file:
01294             try:
01295                 result = json.load(data_file)
01296             except ValueError as json_error_msg:
01297                 result = None
01298                 print('JSON file %s parsing failed. Reason: %s' %
01299                       (json_spec_filename, json_error_msg))
01300                 # We can print where error occurred inside JSON file if we can parse exception msg
01301                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01302                 if json_format_defect_pos is not None:
01303                     line = json_format_defect_pos[0]
01304                     column = json_format_defect_pos[1]
01305                     print()
01306                     show_json_file_format_error(json_spec_filename, line, column)
01307 
01308     except IOError as fileopen_error_msg:
01309         print('JSON file %s not opened. Reason: %s\n'%
01310               (json_spec_filename, fileopen_error_msg))
01311     if verbose and result:
01312         pp = pprint.PrettyPrinter(indent=4)
01313         pp.pprint(result)
01314     return result
01315 
01316 
01317 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01318     """ Prints MUTs configuration passed to test script for verboseness
01319     """
01320     muts_info_cols = []
01321     # We need to check all unique properties for each defined MUT
01322     for k in json_data:
01323         mut_info = json_data[k]
01324         for mut_property in mut_info:
01325             if mut_property not in muts_info_cols:
01326                 muts_info_cols.append(mut_property)
01327 
01328     # Prepare pretty table object to display all MUTs
01329     pt_cols = ["index"] + muts_info_cols
01330     pt = PrettyTable(pt_cols)
01331     for col in pt_cols:
01332         pt.align[col] = "l"
01333 
01334     # Add rows to pretty print object
01335     for k in json_data:
01336         row = [k]
01337         mut_info = json_data[k]
01338 
01339         add_row = True
01340         if platform_filter and 'mcu' in mut_info:
01341             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01342         if add_row:
01343             for col in muts_info_cols:
01344                 cell_val = mut_info[col] if col in mut_info else None
01345                 if isinstance(cell_val, list):
01346                     cell_val = join_delim.join(cell_val)
01347                 row.append(cell_val)
01348             pt.add_row(row)
01349     return pt.get_string()
01350 
01351 
01352 def print_test_configuration_from_json (json_data, join_delim=", "):
01353     """ Prints test specification configuration passed to test script for verboseness
01354     """
01355     toolchains_info_cols = []
01356     # We need to check all toolchains for each device
01357     for k in json_data:
01358         # k should be 'targets'
01359         targets = json_data[k]
01360         for target in targets:
01361             toolchains = targets[target]
01362             for toolchain in toolchains:
01363                 if toolchain not in toolchains_info_cols:
01364                     toolchains_info_cols.append(toolchain)
01365 
01366     # Prepare pretty table object to display test specification
01367     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01368     pt = PrettyTable(pt_cols)
01369     for col in pt_cols:
01370         pt.align[col] = "l"
01371 
01372     # { target : [conflicted toolchains] }
01373     toolchain_conflicts = {}
01374     toolchain_path_conflicts = []
01375     for k in json_data:
01376         # k should be 'targets'
01377         targets = json_data[k]
01378         for target in targets:
01379             target_supported_toolchains = get_target_supported_toolchains(target)
01380             if not target_supported_toolchains:
01381                 target_supported_toolchains = []
01382             target_name = target if target in TARGET_MAP else "%s*"% target
01383             row = [target_name]
01384             toolchains = targets[target]
01385 
01386             for toolchain in sorted(toolchains_info_cols):
01387                 # Check for conflicts: target vs toolchain
01388                 conflict = False
01389                 conflict_path = False
01390                 if toolchain in toolchains:
01391                     if toolchain not in target_supported_toolchains:
01392                         conflict = True
01393                         if target not in toolchain_conflicts:
01394                             toolchain_conflicts[target] = []
01395                         toolchain_conflicts[target].append(toolchain)
01396                 # Add marker inside table about target usage / conflict
01397                 cell_val = 'Yes' if toolchain in toolchains else '-'
01398                 if conflict:
01399                     cell_val += '*'
01400                 # Check for conflicts: toolchain vs toolchain path
01401                 if toolchain in TOOLCHAIN_PATHS:
01402                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01403                     if not os.path.isdir(toolchain_path):
01404                         conflict_path = True
01405                         if toolchain not in toolchain_path_conflicts:
01406                             toolchain_path_conflicts.append(toolchain)
01407                 if conflict_path:
01408                     cell_val += '#'
01409                 row.append(cell_val)
01410             pt.add_row(row)
01411 
01412     # generate result string
01413     result = pt.get_string()    # Test specification table
01414     if toolchain_conflicts or toolchain_path_conflicts:
01415         result += "\n"
01416         result += "Toolchain conflicts:\n"
01417         for target in toolchain_conflicts:
01418             if target not in TARGET_MAP:
01419                 result += "\t* Target %s unknown\n"% (target)
01420             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01421             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01422             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01423 
01424         for toolchain in toolchain_path_conflicts:
01425         # Let's check toolchain configuration
01426             if toolchain in TOOLCHAIN_PATHS:
01427                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01428                 if not os.path.isdir(toolchain_path):
01429                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01430     return result
01431 
01432 
01433 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01434     """ Generates table summary with all test cases and additional test cases
01435         information using pretty print functionality. Allows test suite user to
01436         see test cases
01437     """
01438     # get all unique test ID prefixes
01439     unique_test_id = []
01440     for test in TESTS:
01441         split = test['id'].split('_')[:-1]
01442         test_id_prefix = '_'.join(split)
01443         if test_id_prefix not in unique_test_id:
01444             unique_test_id.append(test_id_prefix)
01445     unique_test_id.sort()
01446     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01447     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01448 
01449     test_properties = ['id',
01450                        'automated',
01451                        'description',
01452                        'peripherals',
01453                        'host_test',
01454                        'duration'] if cols is None else cols
01455 
01456     # All tests status table print
01457     pt = PrettyTable(test_properties)
01458     for col in test_properties:
01459         pt.align[col] = "l"
01460     pt.align['duration'] = "r"
01461 
01462     counter_all = 0
01463     counter_automated = 0
01464     pt.padding_width = 1 # One space between column edges and contents (default)
01465 
01466     for test_id in sorted(TEST_MAP.keys()):
01467         if platform_filter is not None:
01468             # FIlter out platforms using regex
01469             if re.search(platform_filter, test_id) is None:
01470                 continue
01471         row = []
01472         test = TEST_MAP[test_id]
01473         split = test_id.split('_')[:-1]
01474         test_id_prefix = '_'.join(split)
01475 
01476         for col in test_properties:
01477             col_value = test[col]
01478             if isinstance(test[col], list):
01479                 col_value = join_delim.join(test[col])
01480             elif test[col] == None:
01481                 col_value = "-"
01482 
01483             row.append(col_value)
01484         if test['automated'] == True:
01485             counter_dict_test_id_types[test_id_prefix] += 1
01486             counter_automated += 1
01487         pt.add_row(row)
01488         # Update counters
01489         counter_all += 1
01490         counter_dict_test_id_types_all[test_id_prefix] += 1
01491     result = pt.get_string()
01492     result += "\n\n"
01493 
01494     if result_summary and not platform_filter:
01495         # Automation result summary
01496         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01497         pt = PrettyTable(test_id_cols)
01498         pt.align['automated'] = "r"
01499         pt.align['all'] = "r"
01500         pt.align['percent [%]'] = "r"
01501 
01502         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01503         str_progress = progress_bar(percent_progress, 75)
01504         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01505         result += "Automation coverage:\n"
01506         result += pt.get_string()
01507         result += "\n\n"
01508 
01509         # Test automation coverage table print
01510         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01511         pt = PrettyTable(test_id_cols)
01512         pt.align['id'] = "l"
01513         pt.align['automated'] = "r"
01514         pt.align['all'] = "r"
01515         pt.align['percent [%]'] = "r"
01516         for unique_id in unique_test_id:
01517             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01518             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01519             str_progress = progress_bar(percent_progress, 75)
01520             row = [unique_id,
01521                    counter_dict_test_id_types[unique_id],
01522                    counter_dict_test_id_types_all[unique_id],
01523                    percent_progress,
01524                    "[" + str_progress + "]"]
01525             pt.add_row(row)
01526         result += "Test automation coverage:\n"
01527         result += pt.get_string()
01528         result += "\n\n"
01529     return result
01530 
01531 
01532 def progress_bar (percent_progress, saturation=0):
01533     """ This function creates progress bar with optional simple saturation mark
01534     """
01535     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01536     str_progress = '#' * step + '.' * int(50 - step)
01537     c = '!' if str_progress[38] == '.' else '|'
01538     if saturation > 0:
01539         saturation = saturation / 2
01540         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01541     return str_progress
01542 
01543 
01544 def singletest_in_cli_mode (single_test):
01545     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01546 
01547         @return returns success code (0 == success) for building and running tests
01548     """
01549     start = time()
01550     # Execute tests depending on options and filter applied
01551     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01552     elapsed_time = time() - start
01553 
01554     # Human readable summary
01555     if not single_test.opts_suppress_summary:
01556         # prints well-formed summary with results (SQL table like)
01557         print(single_test.generate_test_summary(test_summary, shuffle_seed))
01558     if single_test.opts_test_x_toolchain_summary:
01559         # prints well-formed summary with results (SQL table like)
01560         # table shows text x toolchain test result matrix
01561         print(single_test.generate_test_summary_by_target(test_summary,
01562                                                           shuffle_seed))
01563 
01564     print("Completed in %.2f sec" % elapsed_time)
01565     print
01566     # Write summary of the builds
01567 
01568     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01569     status = print_report_exporter.report(build_report)
01570 
01571     # Store extra reports in files
01572     if single_test.opts_report_html_file_name:
01573         # Export results in form of HTML report to separate file
01574         report_exporter = ReportExporter(ResultExporterType.HTML)
01575         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01576     if single_test.opts_report_junit_file_name:
01577         # Export results in form of JUnit XML report to separate file
01578         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01579         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01580     if single_test.opts_report_text_file_name:
01581         # Export results in form of a text file
01582         report_exporter = ReportExporter(ResultExporterType.TEXT)
01583         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01584     if single_test.opts_report_build_file_name:
01585         # Export build results as html report to sparate file
01586         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01587         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01588 
01589     # Returns True if no build failures of the test projects or their dependencies
01590     return status
01591 
01592 class TestLogger ():
01593     """ Super-class for logging and printing ongoing events for test suite pass
01594     """
01595     def __init__ (self, store_log=True):
01596         """ We can control if logger actually stores log in memory
01597             or just handled all log entries immediately
01598         """
01599         self.log  = []
01600         self.log_to_file  = False
01601         self.log_file_name  = None
01602         self.store_log  = store_log
01603 
01604         self.LogType  = construct_enum(INFO='Info',
01605                                       WARN='Warning',
01606                                       NOTIF='Notification',
01607                                       ERROR='Error',
01608                                       EXCEPT='Exception')
01609 
01610         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01611                                             APPEND=2)    # Append to existing log file
01612 
01613     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01614         """ Log one line of text
01615         """
01616         log_timestamp = time()
01617         log_entry = {'log_type' : LogType,
01618                      'log_timestamp' : log_timestamp,
01619                      'log_line' : log_line,
01620                      '_future' : None
01621         }
01622         # Store log in memory
01623         if self.store_log :
01624             self.log .append(log_entry)
01625         return log_entry
01626 
01627 
01628 class CLITestLogger (TestLogger ):
01629     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01630     """
01631     def __init__(self, store_log=True, file_name=None):
01632         TestLogger.__init__(self)
01633         self.log_file_name  = file_name
01634         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01635         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01636 
01637     def log_print (self, log_entry, timestamp=True):
01638         """ Prints on screen formatted log entry
01639         """
01640         ts = log_entry['log_timestamp']
01641         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01642         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01643         return timestamp_str + log_line_str
01644 
01645     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01646         """ Logs line, if log file output was specified log line will be appended
01647             at the end of log file
01648         """
01649         log_entry = TestLogger.log_line(self, LogType, log_line)
01650         log_line_str = self.log_print (log_entry, timestamp)
01651         if self.log_file_name  is not None:
01652             try:
01653                 with open(self.log_file_name , 'a') as f:
01654                     f.write(log_line_str + line_delim)
01655             except IOError:
01656                 pass
01657         return log_line_str
01658 
01659 
01660 def factory_db_logger (db_url):
01661     """ Factory database driver depending on database type supplied in database connection string db_url
01662     """
01663     if db_url is not None:
01664         from tools.test_mysql import MySQLDBAccess
01665         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01666         if connection_info is not None:
01667             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01668             if db_type == 'mysql':
01669                 return MySQLDBAccess()
01670     return None
01671 
01672 
01673 def detect_database_verbose (db_url):
01674     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01675     """
01676     result = BaseDBAccess().parse_db_connection_string(db_url)
01677     if result is not None:
01678         # Parsing passed
01679         (db_type, username, password, host, db_name) = result
01680         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01681         # Let's try to connect
01682         db_ = factory_db_logger(db_url)
01683         if db_ is not None:
01684             print("Connecting to database '%s'..." % db_url)
01685             db_.connect(host, username, password, db_name)
01686             if db_.is_connected():
01687                 print("ok")
01688                 print("Detecting database...")
01689                 print(db_.detect_database(verbose=True))
01690                 print("Disconnecting...")
01691                 db_.disconnect()
01692                 print("done")
01693         else:
01694             print("Database type '%s' unknown" % db_type)
01695     else:
01696         print("Parse error: '%s' - DB Url error" % db_url)
01697 
01698 
01699 def get_module_avail (module_name):
01700     """ This function returns True if module_name is already imported module
01701     """
01702     return module_name in sys.modules.keys()
01703 
01704 def get_autodetected_MUTS_list(platform_name_filter=None):
01705     oldError = None
01706     if os.name == 'nt':
01707         # Disable Windows error box temporarily
01708         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01709 
01710     mbeds = mbed_lstools.create()
01711     detect_muts_list = mbeds.list_mbeds()
01712 
01713     if os.name == 'nt':
01714         ctypes.windll.kernel32.SetErrorMode(oldError)
01715 
01716     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01717 
01718 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01719     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01720         If function fails to auto-detect devices it will return empty dictionary.
01721 
01722         if get_module_avail('mbed_lstools'):
01723             mbeds = mbed_lstools.create()
01724             mbeds_list = mbeds.list_mbeds()
01725 
01726         @param mbeds_list list of mbeds captured from mbed_lstools
01727         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01728     """
01729     result = {}   # Should be in muts_all.json format
01730     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01731     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01732     index = 1
01733     for mut in mbeds_list:
01734         # Filter the MUTS if a filter is specified
01735 
01736         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01737             continue
01738 
01739         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01740         # if not we  are creating our own unique value (last few chars from platform's target_id).
01741         m = {'mcu': mut['platform_name'],
01742              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01743              'port': mut['serial_port'],
01744              'disk': mut['mount_point'],
01745              'peripherals': []     # No peripheral detection
01746              }
01747         if index not in result:
01748             result[index] = {}
01749         result[index] = m
01750         index += 1
01751     return result
01752 
01753 
01754 def get_autodetected_TEST_SPEC (mbeds_list,
01755                                use_default_toolchain=True,
01756                                use_supported_toolchains=False,
01757                                toolchain_filter=None,
01758                                platform_name_filter=None):
01759     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01760         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01761 
01762         use_default_toolchain - if True add default toolchain to test_spec
01763         use_supported_toolchains - if True add all supported toolchains to test_spec
01764         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01765     """
01766     result = {'targets': {} }
01767 
01768     for mut in mbeds_list:
01769         mcu = mut['mcu']
01770         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01771             if mcu in TARGET_MAP:
01772                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01773                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01774 
01775                 # Decide which toolchains should be added to test specification toolchain pool for each target
01776                 toolchains = []
01777                 if use_default_toolchain:
01778                     toolchains.append(default_toolchain)
01779                 if use_supported_toolchains:
01780                     toolchains += supported_toolchains
01781                 if toolchain_filter is not None:
01782                     all_toolchains = supported_toolchains + [default_toolchain]
01783                     for toolchain in toolchain_filter:
01784                         if toolchain in all_toolchains:
01785                             toolchains.append(toolchain)
01786 
01787                 result['targets'][mcu] = list(set(toolchains))
01788     return result
01789 
01790 
01791 def get_default_test_options_parser ():
01792     """ Get common test script options used by CLI, web services etc.
01793     """
01794     parser = argparse.ArgumentParser()
01795     parser.add_argument('-i', '--tests',
01796                         dest='test_spec_filename',
01797                         metavar="FILE",
01798                         type=argparse_filestring_type,
01799                         help='Points to file with test specification')
01800 
01801     parser.add_argument('-M', '--MUTS',
01802                         dest='muts_spec_filename',
01803                         metavar="FILE",
01804                         type=argparse_filestring_type,
01805                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01806 
01807     parser.add_argument("-j", "--jobs",
01808                         dest='jobs',
01809                         metavar="NUMBER",
01810                         type=int,
01811                         help="Define number of compilation jobs. Default value is 1")
01812 
01813     if get_module_avail('mbed_lstools'):
01814         # Additional features available when mbed_lstools is installed on host and imported
01815         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01816         parser.add_argument('--auto',
01817                             dest='auto_detect',
01818                             action="store_true",
01819                             help='Use mbed-ls module to detect all connected mbed devices')
01820 
01821         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01822         parser.add_argument('--tc',
01823                             dest='toolchains_filter',
01824                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01825                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01826 
01827         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01828         parser.add_argument('--oper',
01829                             dest='operability_checks',
01830                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01831                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01832 
01833     parser.add_argument('--clean',
01834                         dest='clean',
01835                         action="store_true",
01836                         help='Clean the build directory')
01837 
01838     parser.add_argument('-P', '--only-peripherals',
01839                         dest='test_only_peripheral',
01840                         default=False,
01841                         action="store_true",
01842                         help='Test only peripheral declared for MUT and skip common tests')
01843 
01844     parser.add_argument("--profile", dest="profile", action="append",
01845                         type=argparse_filestring_type,
01846                         default=[])
01847 
01848     parser.add_argument('-C', '--only-commons',
01849                         dest='test_only_common',
01850                         default=False,
01851                         action="store_true",
01852                         help='Test only board internals. Skip perpherials tests and perform common tests')
01853 
01854     parser.add_argument('-n', '--test-by-names',
01855                         dest='test_by_names',
01856                         type=argparse_many(str),
01857                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01858 
01859     parser.add_argument('-p', '--peripheral-by-names',
01860                       dest='peripheral_by_names',
01861                       type=argparse_many(str),
01862                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01863 
01864     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01865     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01866 
01867     parser.add_argument('-c', '--copy-method',
01868                         dest='copy_method',
01869                         type=argparse_uppercase_type(copy_methods, "flash method"),
01870                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01871 
01872     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01873     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01874 
01875     parser.add_argument('-r', '--reset-type',
01876                         dest='mut_reset_type',
01877                         default=None,
01878                         type=argparse_uppercase_type(reset_methods, "reset method"),
01879                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01880 
01881     parser.add_argument('-g', '--goanna-for-tests',
01882                         dest='goanna_for_tests',
01883                         action="store_true",
01884                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01885 
01886     parser.add_argument('-G', '--goanna-for-sdk',
01887                         dest='goanna_for_mbed_sdk',
01888                         action="store_true",
01889                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01890 
01891     parser.add_argument('-s', '--suppress-summary',
01892                         dest='suppress_summary',
01893                         default=False,
01894                         action="store_true",
01895                         help='Suppresses display of wellformatted table with test results')
01896 
01897     parser.add_argument('-t', '--test-summary',
01898                         dest='test_x_toolchain_summary',
01899                         default=False,
01900                         action="store_true",
01901                         help='Displays wellformatted table with test x toolchain test result per target')
01902 
01903     parser.add_argument('-A', '--test-automation-report',
01904                         dest='test_automation_report',
01905                         default=False,
01906                         action="store_true",
01907                         help='Prints information about all tests and exits')
01908 
01909     parser.add_argument('-R', '--test-case-report',
01910                         dest='test_case_report',
01911                         default=False,
01912                         action="store_true",
01913                         help='Prints information about all test cases and exits')
01914 
01915     parser.add_argument("-S", "--supported-toolchains",
01916                         action="store_true",
01917                         dest="supported_toolchains",
01918                         default=False,
01919                         help="Displays supported matrix of MCUs and toolchains")
01920 
01921     parser.add_argument("-O", "--only-build",
01922                         action="store_true",
01923                         dest="only_build_tests",
01924                         default=False,
01925                         help="Only build tests, skips actual test procedures (flashing etc.)")
01926 
01927     parser.add_argument('--parallel',
01928                         dest='parallel_test_exec',
01929                         default=False,
01930                         action="store_true",
01931                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01932 
01933     parser.add_argument('--config',
01934                         dest='verbose_test_configuration_only',
01935                         default=False,
01936                         action="store_true",
01937                         help='Displays full test specification and MUTs configration and exits')
01938 
01939     parser.add_argument('--loops',
01940                         dest='test_loops_list',
01941                         type=argparse_many(str),
01942                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01943 
01944     parser.add_argument('--global-loops',
01945                         dest='test_global_loops_value',
01946                         type=int,
01947                         help='Set global number of test loops per test. Default value is set 1')
01948 
01949     parser.add_argument('--consolidate-waterfall',
01950                         dest='consolidate_waterfall_test',
01951                         default=False,
01952                         action="store_true",
01953                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01954 
01955     parser.add_argument('-W', '--waterfall',
01956                         dest='waterfall_test',
01957                         default=False,
01958                         action="store_true",
01959                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01960 
01961     parser.add_argument('-N', '--firmware-name',
01962                         dest='firmware_global_name',
01963                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01964 
01965     parser.add_argument('-u', '--shuffle',
01966                         dest='shuffle_test_order',
01967                         default=False,
01968                         action="store_true",
01969                         help='Shuffles test execution order')
01970 
01971     parser.add_argument('--shuffle-seed',
01972                         dest='shuffle_test_seed',
01973                         default=None,
01974                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01975 
01976     parser.add_argument('-f', '--filter',
01977                         dest='general_filter_regex',
01978                         type=argparse_many(str),
01979                         default=None,
01980                         help='For some commands you can use filter to filter out results')
01981 
01982     parser.add_argument('--inc-timeout',
01983                         dest='extend_test_timeout',
01984                         metavar="NUMBER",
01985                         type=int,
01986                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01987 
01988     parser.add_argument('--db',
01989                         dest='db_url',
01990                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01991 
01992     parser.add_argument('-l', '--log',
01993                         dest='log_file_name',
01994                         help='Log events to external file (note not all console entries may be visible in log file)')
01995 
01996     parser.add_argument('--report-html',
01997                         dest='report_html_file_name',
01998                         help='You can log test suite results in form of HTML report')
01999 
02000     parser.add_argument('--report-junit',
02001                         dest='report_junit_file_name',
02002                         help='You can log test suite results in form of JUnit compliant XML report')
02003 
02004     parser.add_argument("--report-build",
02005                         dest="report_build_file_name",
02006                         help="Output the build results to a junit xml file")
02007 
02008     parser.add_argument("--report-text",
02009                         dest="report_text_file_name",
02010                         help="Output the build results to a text file")
02011 
02012     parser.add_argument('--verbose-skipped',
02013                         dest='verbose_skipped_tests',
02014                         default=False,
02015                         action="store_true",
02016                         help='Prints some extra information about skipped tests')
02017 
02018     parser.add_argument('-V', '--verbose-test-result',
02019                         dest='verbose_test_result_only',
02020                         default=False,
02021                         action="store_true",
02022                         help='Prints test serial output')
02023 
02024     parser.add_argument('-v', '--verbose',
02025                         dest='verbose',
02026                         default=False,
02027                         action="store_true",
02028                         help='Verbose mode (prints some extra information)')
02029 
02030     parser.add_argument('--version',
02031                         dest='version',
02032                         default=False,
02033                         action="store_true",
02034                         help='Prints script version and exits')
02035 
02036     parser.add_argument('--stats-depth',
02037                         dest='stats_depth',
02038                         default=2,
02039                         type=int,
02040                         help="Depth level for static memory report")
02041     return parser
02042 
02043 def test_path_to_name (path, base):
02044     """Change all slashes in a path into hyphens
02045     This creates a unique cross-platform test name based on the path
02046     This can eventually be overriden by a to-be-determined meta-data mechanism"""
02047     name_parts = []
02048     head, tail = os.path.split(relpath(path,base))
02049     while (tail and tail != "."):
02050         name_parts.insert(0, tail)
02051         head, tail = os.path.split(head)
02052 
02053     return "-".join(name_parts).lower()
02054 
02055 def get_test_config (config_name, target_name):
02056     """Finds the path to a test configuration file
02057     config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
02058     target_name: name of target to determing if mbed OS interface given is valid
02059     returns path to config, will return None if no valid config is found
02060     """
02061     # If they passed in a full path
02062     if exists(config_name):
02063         # This is a module config
02064         return config_name
02065     # Otherwise find the path to configuration file based on mbed OS interface
02066     return TestConfig.get_config_path(config_name, target_name)
02067 
02068 def find_tests (base_dir, target_name, toolchain_name, app_config=None):
02069     """ Finds all tests in a directory recursively
02070     base_dir: path to the directory to scan for tests (ex. 'path/to/project')
02071     target_name: name of the target to use for scanning (ex. 'K64F')
02072     toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
02073     options: Compile options to pass to the toolchain (ex. ['debug-info'])
02074     app_config - location of a chosen mbed_app.json file
02075 
02076     returns a dictionary where keys are the test name, and the values are
02077     lists of paths needed to biuld the test.
02078     """
02079 
02080     # Temporary structure: tests referenced by (name, base, group, case) tuple
02081     tests = {}
02082     # List of common folders: (predicate function, path) tuple
02083     commons = []
02084 
02085     # Prepare the toolchain
02086     toolchain = prepare_toolchain([base_dir], None, target_name, toolchain_name,
02087                                   app_config=app_config)
02088 
02089     # Scan the directory for paths to probe for 'TESTS' folders
02090     base_resources = scan_resources([base_dir], toolchain)
02091 
02092     dirs = base_resources.inc_dirs
02093     for directory in dirs:
02094         subdirs = os.listdir(directory)
02095 
02096         # If the directory contains a subdirectory called 'TESTS', scan it for test cases
02097         if 'TESTS' in subdirs:
02098             walk_base_dir = join(directory, 'TESTS')
02099             test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
02100 
02101             # Loop through all subdirectories
02102             for d in test_resources.inc_dirs:
02103 
02104                 # If the test case folder is not called 'host_tests' or 'COMMON' and it is
02105                 # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
02106                 # then add it to the tests
02107                 relative_path = relpath(d, walk_base_dir)
02108                 relative_path_parts = os.path.normpath(relative_path).split(os.sep)
02109                 if len(relative_path_parts) == 2:
02110                     test_group_directory_path, test_case_directory = os.path.split(d)
02111                     test_group_directory = os.path.basename(test_group_directory_path)
02112 
02113                     # Check to make sure discoverd folder is not in a host test directory or common directory
02114                     special_dirs = ['host_tests', 'COMMON']
02115                     if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
02116                         test_name = test_path_to_name(d, base_dir)
02117                         tests[(test_name, walk_base_dir, test_group_directory, test_case_directory)] = [d]
02118 
02119                 # Also find any COMMON paths, we'll add these later once we find all the base tests
02120                 if 'COMMON' in relative_path_parts:
02121                     if relative_path_parts[0] != 'COMMON':
02122                         def predicate(base_pred, group_pred, name_base_group_case):
02123                             (name, base, group, case) = name_base_group_case
02124                             return base == base_pred and group == group_pred
02125                         commons.append((functools.partial(predicate, walk_base_dir, relative_path_parts[0]), d))
02126                     else:
02127                         def predicate(base_pred, name_base_group_case):
02128                             (name, base, group, case) = name_base_group_case
02129                             return base == base_pred
02130                         commons.append((functools.partial(predicate, walk_base_dir), d))
02131 
02132     # Apply common directories
02133     for pred, path in commons:
02134         for test_identity, test_paths in six.iteritems(tests):
02135             if pred(test_identity):
02136                 test_paths.append(path)
02137 
02138     # Drop identity besides name
02139     return {name: paths for (name, _, _, _), paths in six.iteritems(tests)}
02140 
02141 def print_tests (tests, format="list", sort=True):
02142     """Given a dictionary of tests (as returned from "find_tests"), print them
02143     in the specified format"""
02144     if format == "list":
02145         for test_name in sorted(tests.keys()):
02146             test_path = tests[test_name][0]
02147             print("Test Case:")
02148             print("    Name: %s" % test_name)
02149             print("    Path: %s" % test_path)
02150     elif format == "json":
02151         print(json.dumps({test_name: test_path[0] for test_name, test_paths
02152                           in tests}, indent=2))
02153     else:
02154         print("Unknown format '%s'" % format)
02155         sys.exit(1)
02156 
02157 def norm_relative_path (path, start):
02158     """This function will create a normalized, relative path. It mimics the
02159     python os.path.relpath function, but also normalizes a Windows-syle path
02160     that use backslashes to a Unix style path that uses forward slashes."""
02161     path = os.path.normpath(path)
02162     path = os.path.relpath(path, start)
02163     path = path.replace("\\", "/")
02164     return path
02165 
02166 
02167 def build_test_worker (*args, **kwargs):
02168     """This is a worker function for the parallel building of tests. The `args`
02169     and `kwargs` are passed directly to `build_project`. It returns a dictionary
02170     with the following structure:
02171 
02172     {
02173         'result': `True` if no exceptions were thrown, `False` otherwise
02174         'reason': Instance of exception that was thrown on failure
02175         'bin_file': Path to the created binary if `build_project` was
02176                     successful. Not present otherwise
02177         'kwargs': The keyword arguments that were passed to `build_project`.
02178                   This includes arguments that were modified (ex. report)
02179     }
02180     """
02181     bin_file = None
02182     ret = {
02183         'result': False,
02184         'args': args,
02185         'kwargs': kwargs
02186     }
02187 
02188     # Use parent TOOLCHAIN_PATHS variable
02189     for key, value in kwargs['toolchain_paths'].items():
02190         TOOLCHAIN_PATHS[key] = value
02191 
02192     del kwargs['toolchain_paths']
02193 
02194     try:
02195         bin_file = build_project(*args, **kwargs)
02196         ret['result'] = True
02197         ret['bin_file'] = bin_file
02198         ret['kwargs'] = kwargs
02199 
02200     except NotSupportedException as e:
02201         ret['reason'] = e
02202     except ToolException as e:
02203         ret['reason'] = e
02204     except KeyboardInterrupt as e:
02205         ret['reason'] = e
02206     except:
02207         # Print unhandled exceptions here
02208         import traceback
02209         traceback.print_exc(file=sys.stdout)
02210 
02211     return ret
02212 
02213 
02214 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02215                 clean=False, notify=None, jobs=1, macros=None,
02216                 silent=False, report=None, properties=None,
02217                 continue_on_build_fail=False, app_config=None,
02218                 build_profile=None, stats_depth=None, ignore=None):
02219     """Given the data structure from 'find_tests' and the typical build parameters,
02220     build all the tests
02221 
02222     Returns a tuple of the build result (True or False) followed by the test
02223     build data structure"""
02224 
02225     execution_directory = "."
02226     base_path = norm_relative_path(build_path, execution_directory)
02227 
02228     target_name = target.name if isinstance(target, Target) else target
02229     cfg, _, _ = get_config(base_source_paths, target_name, toolchain_name, app_config=app_config)
02230 
02231     baud_rate = 9600
02232     if 'platform.stdio-baud-rate' in cfg:
02233         baud_rate = cfg['platform.stdio-baud-rate'].value
02234 
02235     test_build = {
02236         "platform": target_name,
02237         "toolchain": toolchain_name,
02238         "base_path": base_path,
02239         "baud_rate": baud_rate,
02240         "binary_type": "bootable",
02241         "tests": {}
02242     }
02243 
02244     result = True
02245 
02246     jobs_count = int(jobs if jobs else cpu_count())
02247     p = Pool(processes=jobs_count)
02248     results = []
02249     for test_name, test_paths in tests.items():
02250         if not isinstance(test_paths, list):
02251             test_paths = [test_paths]
02252 
02253         test_build_path = os.path.join(build_path, test_paths[0])
02254         src_paths = base_source_paths + test_paths
02255         bin_file = None
02256         test_case_folder_name = os.path.basename(test_paths[0])
02257 
02258         args = (src_paths, test_build_path, target, toolchain_name)
02259         kwargs = {
02260             'jobs': 1,
02261             'clean': clean,
02262             'macros': macros,
02263             'name': test_case_folder_name,
02264             'project_id': test_name,
02265             'report': report,
02266             'properties': properties,
02267             'app_config': app_config,
02268             'build_profile': build_profile,
02269             'toolchain_paths': TOOLCHAIN_PATHS,
02270             'stats_depth': stats_depth,
02271             'notify': MockNotifier()
02272         }
02273 
02274         results.append(p.apply_async(build_test_worker, args, kwargs))
02275 
02276     p.close()
02277     result = True
02278     itr = 0
02279     while len(results):
02280         itr += 1
02281         if itr > 360000:
02282             p.terminate()
02283             p.join()
02284             raise ToolException("Compile did not finish in 10 minutes")
02285         else:
02286             sleep(0.01)
02287             pending = 0
02288             for r in results:
02289                 if r.ready() is True:
02290                     try:
02291                         worker_result = r.get()
02292                         results.remove(r)
02293 
02294                         # Push all deferred notifications out to the actual notifier
02295                         new_notify = deepcopy(notify)
02296                         for message in worker_result['kwargs']['notify'].messages:
02297                             new_notify.notify(message)
02298 
02299                         # Take report from the kwargs and merge it into existing report
02300                         if report:
02301                             report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
02302                             report_entry[worker_result['kwargs']['project_id'].upper()][0][0]['output'] = new_notify.get_output()
02303                             for test_key in report_entry.keys():
02304                                 report[target_name][toolchain_name][test_key] = report_entry[test_key]
02305 
02306                         # Set the overall result to a failure if a build failure occurred
02307                         if ('reason' in worker_result and
02308                             not worker_result['reason'] and
02309                             not isinstance(worker_result['reason'], NotSupportedException)):
02310                             result = False
02311                             break
02312 
02313 
02314                         # Adding binary path to test build result
02315                         if ('result' in worker_result and
02316                             worker_result['result'] and
02317                             'bin_file' in worker_result):
02318                             bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
02319 
02320                             test_build['tests'][worker_result['kwargs']['project_id']] = {
02321                                 "binaries": [
02322                                     {
02323                                         "path": bin_file
02324                                     }
02325                                 ]
02326                             }
02327 
02328                             test_key = worker_result['kwargs']['project_id'].upper()
02329                             print('Image: %s\n' % bin_file)
02330 
02331                     except:
02332                         if p._taskqueue.queue:
02333                             p._taskqueue.queue.clear()
02334                             sleep(0.5)
02335                         p.terminate()
02336                         p.join()
02337                         raise
02338                 else:
02339                     pending += 1
02340                     if pending >= jobs_count:
02341                         break
02342 
02343             # Break as soon as possible if there is a failure and we are not
02344             # continuing on build failures
02345             if not result and not continue_on_build_fail:
02346                 if p._taskqueue.queue:
02347                     p._taskqueue.queue.clear()
02348                     sleep(0.5)
02349                 p.terminate()
02350                 break
02351 
02352     p.join()
02353 
02354     test_builds = {}
02355     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02356 
02357     return result, test_builds
02358 
02359 
02360 def test_spec_from_test_builds(test_builds):
02361     return {
02362         "builds": test_builds
02363     }