takashi kadono / Mbed OS Nucleo_446

Dependencies:   ssd1331

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 from __future__ import print_function
00020 import six
00021 
00022 import os
00023 import re
00024 import sys
00025 import json
00026 import uuid
00027 import pprint
00028 import random
00029 import argparse
00030 import datetime
00031 import threading
00032 import ctypes
00033 import functools
00034 from colorama import Fore, Back, Style
00035 from prettytable import PrettyTable, HEADER
00036 from copy import copy, deepcopy
00037 
00038 from time import sleep, time
00039 try:
00040     from Queue import Queue, Empty
00041 except ImportError:
00042     from queue import Queue, Empty
00043 from os.path import join, exists, basename, relpath, isdir, isfile
00044 from threading import Thread, Lock
00045 from multiprocessing import Pool, cpu_count
00046 from subprocess import Popen, PIPE
00047 
00048 # Imports related to mbed build api
00049 from tools.tests import TESTS
00050 from tools.tests import TEST_MAP
00051 from tools.paths import BUILD_DIR
00052 from tools.paths import HOST_TESTS
00053 from tools.utils import ToolException
00054 from tools.utils import NotSupportedException
00055 from tools.utils import construct_enum
00056 from tools.memap import MemapParser
00057 from tools.targets import TARGET_MAP, Target
00058 from tools.config import Config
00059 import tools.test_configs as TestConfig
00060 from tools.test_db import BaseDBAccess
00061 from tools.build_api import build_project, build_mbed_libs, build_lib
00062 from tools.build_api import get_target_supported_toolchains
00063 from tools.build_api import write_build_report
00064 from tools.build_api import prep_report
00065 from tools.build_api import prep_properties
00066 from tools.build_api import create_result
00067 from tools.build_api import add_result_to_report
00068 from tools.build_api import prepare_toolchain
00069 from tools.build_api import get_config
00070 from tools.resources import Resources, MbedIgnoreSet, IGNORE_FILENAME
00071 from tools.libraries import LIBRARIES, LIBRARY_MAP
00072 from tools.options import extract_profile
00073 from tools.toolchains import TOOLCHAIN_PATHS
00074 from tools.toolchains import TOOLCHAINS
00075 from tools.test_exporters import ReportExporter, ResultExporterType
00076 from tools.utils import argparse_filestring_type
00077 from tools.utils import argparse_uppercase_type
00078 from tools.utils import argparse_lowercase_type
00079 from tools.utils import argparse_many
00080 from tools.notifier.mock import MockNotifier
00081 from tools.notifier.term import TerminalNotifier
00082 
00083 import tools.host_tests.host_tests_plugins as host_tests_plugins
00084 
00085 try:
00086     import mbed_lstools
00087     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00088 except:
00089     pass
00090 
00091 
00092 class ProcessObserver(Thread):
00093     def __init__(self, proc):
00094         Thread.__init__(self)
00095         self.proc = proc
00096         self.queue = Queue()
00097         self.daemon = True
00098         self.active = True
00099         self.start()
00100 
00101     def run(self):
00102         while self.active:
00103             c = self.proc.stdout.read(1)
00104             self.queue.put(c)
00105 
00106     def stop(self):
00107         self.active = False
00108         try:
00109             self.proc.terminate()
00110         except Exception:
00111             pass
00112 
00113 
00114 class SingleTestExecutor (threading.Thread):
00115     """ Example: Single test class in separate thread usage
00116     """
00117     def __init__(self, single_test):
00118         self.single_test  = single_test
00119         threading.Thread.__init__(self)
00120 
00121     def run(self):
00122         start = time()
00123         # Execute tests depending on options and filter applied
00124         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00125         elapsed_time = time() - start
00126 
00127         # Human readable summary
00128         if not self.single_test .opts_suppress_summary:
00129             # prints well-formed summary with results (SQL table like)
00130             print(self.single_test .generate_test_summary(test_summary,
00131                                                          shuffle_seed))
00132         if self.single_test .opts_test_x_toolchain_summary:
00133             # prints well-formed summary with results (SQL table like)
00134             # table shows text x toolchain test result matrix
00135             print(self.single_test .generate_test_summary_by_target(
00136                 test_summary, shuffle_seed))
00137         print("Completed in %.2f sec"% (elapsed_time))
00138 
00139 
00140 class SingleTestRunner (object):
00141     """ Object wrapper for single test run which may involve multiple MUTs
00142     """
00143     RE_DETECT_TESTCASE_RESULT = None
00144 
00145     # Return codes for test script
00146     TEST_RESULT_OK = "OK"
00147     TEST_RESULT_FAIL = "FAIL"
00148     TEST_RESULT_ERROR = "ERROR"
00149     TEST_RESULT_UNDEF = "UNDEF"
00150     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00151     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00152     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00153     TEST_RESULT_TIMEOUT = "TIMEOUT"
00154     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00155     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00156     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00157     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00158 
00159     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00160     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00161     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00162 
00163     muts = {} # MUTs descriptor (from external file)
00164     test_spec = {} # Test specification (from external file)
00165 
00166     # mbed test suite -> SingleTestRunner
00167     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00168                            "failure" : TEST_RESULT_FAIL,
00169                            "error" : TEST_RESULT_ERROR,
00170                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00171                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00172                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00173                            "timeout" : TEST_RESULT_TIMEOUT,
00174                            "no_image" : TEST_RESULT_NO_IMAGE,
00175                            "end" : TEST_RESULT_UNDEF,
00176                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00177                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00178                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00179     }
00180 
00181     def __init__ (self,
00182                  _global_loops_count=1,
00183                  _test_loops_list=None,
00184                  _muts={},
00185                  _clean=False,
00186                  _parser=None,
00187                  _opts=None,
00188                  _opts_db_url=None,
00189                  _opts_log_file_name=None,
00190                  _opts_report_html_file_name=None,
00191                  _opts_report_junit_file_name=None,
00192                  _opts_report_build_file_name=None,
00193                  _opts_report_text_file_name=None,
00194                  _opts_build_report={},
00195                  _opts_build_properties={},
00196                  _test_spec={},
00197                  _opts_goanna_for_mbed_sdk=None,
00198                  _opts_goanna_for_tests=None,
00199                  _opts_shuffle_test_order=False,
00200                  _opts_shuffle_test_seed=None,
00201                  _opts_test_by_names=None,
00202                  _opts_peripheral_by_names=None,
00203                  _opts_test_only_peripheral=False,
00204                  _opts_test_only_common=False,
00205                  _opts_verbose_skipped_tests=False,
00206                  _opts_verbose_test_result_only=False,
00207                  _opts_verbose=False,
00208                  _opts_firmware_global_name=None,
00209                  _opts_only_build_tests=False,
00210                  _opts_parallel_test_exec=False,
00211                  _opts_suppress_summary=False,
00212                  _opts_test_x_toolchain_summary=False,
00213                  _opts_copy_method=None,
00214                  _opts_mut_reset_type=None,
00215                  _opts_jobs=None,
00216                  _opts_waterfall_test=None,
00217                  _opts_consolidate_waterfall_test=None,
00218                  _opts_extend_test_timeout=None,
00219                  _opts_auto_detect=None,
00220                  _opts_include_non_automated=False):
00221         """ Let's try hard to init this object
00222         """
00223         from colorama import init
00224         init()
00225 
00226         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00227         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00228         # Settings related to test loops counters
00229         try:
00230             _global_loops_count = int(_global_loops_count)
00231         except:
00232             _global_loops_count = 1
00233         if _global_loops_count < 1:
00234             _global_loops_count = 1
00235         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00236         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00237         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00238 
00239         self.shuffle_random_seed  = 0.0
00240         self.SHUFFLE_SEED_ROUND  = 10
00241 
00242         # MUT list and test specification storage
00243         self.muts  = _muts
00244         self.test_spec  = _test_spec
00245 
00246         # Settings passed e.g. from command line
00247         self.opts_db_url  = _opts_db_url
00248         self.opts_log_file_name  = _opts_log_file_name
00249         self.opts_report_html_file_name  = _opts_report_html_file_name
00250         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00251         self.opts_report_build_file_name  = _opts_report_build_file_name
00252         self.opts_report_text_file_name  = _opts_report_text_file_name
00253         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00254         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00255         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00256         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00257         self.opts_test_by_names  = _opts_test_by_names
00258         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00259         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00260         self.opts_test_only_common  = _opts_test_only_common
00261         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00262         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00263         self.opts_verbose  = _opts_verbose
00264         self.opts_firmware_global_name  = _opts_firmware_global_name
00265         self.opts_only_build_tests  = _opts_only_build_tests
00266         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00267         self.opts_suppress_summary  = _opts_suppress_summary
00268         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00269         self.opts_copy_method  = _opts_copy_method
00270         self.opts_mut_reset_type  = _opts_mut_reset_type
00271         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00272         self.opts_waterfall_test  = _opts_waterfall_test
00273         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00274         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00275         self.opts_clean  = _clean
00276         self.opts_parser  = _parser
00277         self.opts  = _opts
00278         self.opts_auto_detect  = _opts_auto_detect
00279         self.opts_include_non_automated  = _opts_include_non_automated
00280 
00281         self.build_report  = _opts_build_report
00282         self.build_properties  = _opts_build_properties
00283 
00284         # File / screen logger initialization
00285         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00286 
00287         # Database related initializations
00288         self.db_logger  = factory_db_logger(self.opts_db_url )
00289         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00290         # Let's connect to database to set up credentials and confirm database is ready
00291         if self.db_logger :
00292             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00293             if self.db_logger .is_connected():
00294                 # Get hostname and uname so we can use it as build description
00295                 # when creating new build_id in external database
00296                 (_hostname, _uname) = self.db_logger .get_hostname()
00297                 _host_location = os.path.dirname(os.path.abspath(__file__))
00298                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00299                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00300                 self.db_logger .disconnect()
00301 
00302     def dump_options (self):
00303         """ Function returns data structure with common settings passed to SingelTestRunner
00304             It can be used for example to fill _extra fields in database storing test suite single run data
00305             Example:
00306             data = self.dump_options()
00307             or
00308             data_str = json.dumps(self.dump_options())
00309         """
00310         result = {"db_url" : str(self.opts_db_url ),
00311                   "log_file_name" :  str(self.opts_log_file_name ),
00312                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00313                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00314                   "test_by_names" :  str(self.opts_test_by_names ),
00315                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00316                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00317                   "test_only_common" :  str(self.opts_test_only_common ),
00318                   "verbose" :  str(self.opts_verbose ),
00319                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00320                   "only_build_tests" :  str(self.opts_only_build_tests ),
00321                   "copy_method" :  str(self.opts_copy_method ),
00322                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00323                   "jobs" :  str(self.opts_jobs ),
00324                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00325                   "_dummy" : ''
00326         }
00327         return result
00328 
00329     def shuffle_random_func(self):
00330         return self.shuffle_random_seed 
00331 
00332     def is_shuffle_seed_float (self):
00333         """ return true if function parameter can be converted to float
00334         """
00335         result = True
00336         try:
00337             float(self.shuffle_random_seed )
00338         except ValueError:
00339             result = False
00340         return result
00341 
00342     # This will store target / toolchain specific properties
00343     test_suite_properties_ext = {}  # target : toolchain
00344     # Here we store test results
00345     test_summary = []
00346     # Here we store test results in extended data structure
00347     test_summary_ext = {}
00348     execute_thread_slice_lock = Lock()
00349 
00350     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00351         for toolchain in toolchains:
00352             tt_id = "%s::%s" % (toolchain, target)
00353 
00354             T = TARGET_MAP[target]
00355 
00356             # print target, toolchain
00357             # Test suite properties returned to external tools like CI
00358             test_suite_properties = {
00359                 'jobs': self.opts_jobs ,
00360                 'clean': clean,
00361                 'target': target,
00362                 'vendor': T.extra_labels[0],
00363                 'test_ids': ', '.join(test_ids),
00364                 'toolchain': toolchain,
00365                 'shuffle_random_seed': self.shuffle_random_seed 
00366             }
00367 
00368 
00369             # print '=== %s::%s ===' % (target, toolchain)
00370             # Let's build our test
00371             if target not in TARGET_MAP:
00372                 print(self.logger .log_line(
00373                     self.logger .LogType.NOTIF,
00374                     'Skipped tests for %s target. Target platform not found' %
00375                     (target)))
00376                 continue
00377 
00378             clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk  or
00379                                        self.opts_clean  or clean)
00380 
00381             profile = extract_profile(self.opts_parser , self.opts , toolchain)
00382             stats_depth = self.opts .stats_depth or 2
00383 
00384             try:
00385                 build_mbed_libs_result = build_mbed_libs(
00386                     T, toolchain,
00387                     clean=clean_mbed_libs_options,
00388                     jobs=self.opts_jobs ,
00389                     report=build_report,
00390                     properties=build_properties,
00391                     build_profile=profile,
00392                     notify=TerminalNotifier())
00393 
00394                 if not build_mbed_libs_result:
00395                     print(self.logger .log_line(
00396                         self.logger .LogType.NOTIF,
00397                         'Skipped tests for %s target. Toolchain %s is not '
00398                         'supported for this target'% (T.name, toolchain)))
00399                     continue
00400 
00401             except ToolException:
00402                 print(self.logger .log_line(
00403                     self.logger .LogType.ERROR,
00404                     'There were errors while building MBED libs for %s using %s'
00405                     % (target, toolchain)))
00406                 continue
00407 
00408             build_dir = join(BUILD_DIR, "test", target, toolchain)
00409 
00410             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00411             test_suite_properties['build_dir'] = build_dir
00412             test_suite_properties['skipped'] = []
00413 
00414             # Enumerate through all tests and shuffle test order if requested
00415             test_map_keys = sorted(TEST_MAP.keys())
00416 
00417             if self.opts_shuffle_test_order :
00418                 random.shuffle(test_map_keys, self.shuffle_random_func )
00419                 # Update database with shuffle seed f applicable
00420                 if self.db_logger :
00421                     self.db_logger .reconnect();
00422                     if self.db_logger .is_connected():
00423                         self.db_logger .update_build_id_info(
00424                             self.db_logger_build_id ,
00425                             _shuffle_seed=self.shuffle_random_func ())
00426                         self.db_logger .disconnect();
00427 
00428             if self.db_logger :
00429                 self.db_logger .reconnect();
00430                 if self.db_logger .is_connected():
00431                     # Update MUTs and Test Specification in database
00432                     self.db_logger .update_build_id_info(
00433                         self.db_logger_build_id ,
00434                         _muts=self.muts , _test_spec=self.test_spec )
00435                     # Update Extra information in database (some options passed to test suite)
00436                     self.db_logger .update_build_id_info(
00437                         self.db_logger_build_id ,
00438                         _extra=json.dumps(self.dump_options ()))
00439                     self.db_logger .disconnect();
00440 
00441             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00442             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00443 
00444             for skipped_test_id in skipped_test_map_keys:
00445                 test_suite_properties['skipped'].append(skipped_test_id)
00446 
00447 
00448             # First pass through all tests and determine which libraries need to be built
00449             libraries = []
00450             for test_id in valid_test_map_keys:
00451                 test = TEST_MAP[test_id]
00452 
00453                 # Detect which lib should be added to test
00454                 # Some libs have to compiled like RTOS or ETH
00455                 for lib in LIBRARIES:
00456                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00457                         libraries.append(lib['id'])
00458 
00459 
00460             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00461 
00462             # Build all required libraries
00463             for lib_id in libraries:
00464                 try:
00465                     build_lib(lib_id,
00466                               T,
00467                               toolchain,
00468                               clean=clean_mbed_libs_options,
00469                               jobs=self.opts_jobs ,
00470                               report=build_report,
00471                               properties=build_properties,
00472                               build_profile=profile,
00473                               notify=TerminalNotifier())
00474 
00475                 except ToolException:
00476                     print(self.logger .log_line(
00477                         self.logger .LogType.ERROR,
00478                         'There were errors while building library %s' % lib_id))
00479                     continue
00480 
00481 
00482             for test_id in valid_test_map_keys:
00483                 test = TEST_MAP[test_id]
00484 
00485                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00486 
00487                 # TODO: move this 2 below loops to separate function
00488                 INC_DIRS = []
00489                 for lib_id in libraries:
00490                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00491                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00492 
00493                 MACROS = []
00494                 for lib_id in libraries:
00495                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00496                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00497                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00498                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00499                 test_uuid = uuid.uuid4()
00500                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00501 
00502                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00503                 if target not in self.test_summary_ext :
00504                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00505                 if toolchain not in self.test_summary_ext [target]:
00506                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00507 
00508                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00509 
00510                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00511                 try:
00512                     path = build_project(
00513                         test.source_dir, join(build_dir, test_id), T,
00514                         toolchain, test.dependencies, clean=clean_project_options,
00515                         name=project_name, macros=MACROS,
00516                         inc_dirs=INC_DIRS, jobs=self.opts_jobs , report=build_report,
00517                         properties=build_properties, project_id=test_id,
00518                         project_description=test.get_description(),
00519                         build_profile=profile, stats_depth=stats_depth,
00520                         notify=TerminalNotifier(),
00521                     )
00522 
00523                 except Exception as e:
00524                     project_name_str = project_name if project_name is not None else test_id
00525 
00526 
00527                     test_result = self.TEST_RESULT_FAIL 
00528 
00529                     if isinstance(e, ToolException):
00530                         print(self.logger .log_line(
00531                             self.logger .LogType.ERROR,
00532                             'There were errors while building project %s' %
00533                             project_name_str))
00534                         test_result = self.TEST_RESULT_BUILD_FAILED 
00535                     elif isinstance(e, NotSupportedException):
00536                         print(self.logger .log_line(
00537                             self.logger .LogType.INFO,
00538                             'Project %s is not supported' % project_name_str))
00539                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00540 
00541 
00542                     # Append test results to global test summary
00543                     self.test_summary .append(
00544                         (test_result, target, toolchain, test_id,
00545                          test.get_description(), 0, 0, '-')
00546                     )
00547 
00548                     # Add detailed test result to test summary structure
00549                     if test_id not in self.test_summary_ext [target][toolchain]:
00550                         self.test_summary_ext [target][toolchain][test_id] = []
00551 
00552                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00553                         'result' : test_result,
00554                         'output' : '',
00555                         'target_name' : target,
00556                         'target_name_unique': target,
00557                         'toolchain_name' : toolchain,
00558                         'id' : test_id,
00559                         'description' : test.get_description(),
00560                         'elapsed_time' : 0,
00561                         'duration' : 0,
00562                         'copy_method' : None
00563                     }})
00564                     continue
00565 
00566                 if self.opts_only_build_tests :
00567                     # With this option we are skipping testing phase
00568                     continue
00569 
00570                 # Test duration can be increased by global value
00571                 test_duration = test.duration
00572                 if self.opts_extend_test_timeout  is not None:
00573                     test_duration += self.opts_extend_test_timeout 
00574 
00575                 # For an automated test the duration act as a timeout after
00576                 # which the test gets interrupted
00577                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00578                 test_loops = self.get_test_loop_count (test_id)
00579 
00580                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00581                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00582                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00583 
00584                 # read MUTs, test specification and perform tests
00585                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00586 
00587                 if handle_results is None:
00588                     continue
00589 
00590                 for handle_result in handle_results:
00591                     if handle_result:
00592                         single_test_result, detailed_test_results = handle_result
00593                     else:
00594                         continue
00595 
00596                     # Append test results to global test summary
00597                     if single_test_result is not None:
00598                         self.test_summary .append(single_test_result)
00599 
00600                     # Add detailed test result to test summary structure
00601                     if target not in self.test_summary_ext [target][toolchain]:
00602                         if test_id not in self.test_summary_ext [target][toolchain]:
00603                             self.test_summary_ext [target][toolchain][test_id] = []
00604 
00605                         append_test_result = detailed_test_results
00606 
00607                         # If waterfall and consolidate-waterfall options are enabled,
00608                         # only include the last test result in the report.
00609                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00610                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00611 
00612                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00613 
00614             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00615             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00616 
00617         q.put(target + '_'.join(toolchains))
00618         return
00619 
00620     def execute(self):
00621         clean = self.test_spec .get('clean', False)
00622         test_ids = self.test_spec .get('test_ids', [])
00623         q = Queue()
00624 
00625         # Generate seed for shuffle if seed is not provided in
00626         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00627         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00628             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00629 
00630 
00631         if self.opts_parallel_test_exec :
00632             ###################################################################
00633             # Experimental, parallel test execution per singletest instance.
00634             ###################################################################
00635             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00636             # Note: We are building here in parallel for each target separately!
00637             # So we are not building the same thing multiple times and compilers
00638             # in separate threads do not collide.
00639             # Inside execute_thread_slice() function function handle() will be called to
00640             # get information about available MUTs (per target).
00641             for target, toolchains in self.test_spec ['targets'].items():
00642                 self.test_suite_properties_ext [target] = {}
00643                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00644                 t.daemon = True
00645                 t.start()
00646                 execute_threads.append(t)
00647 
00648             for t in execute_threads:
00649                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00650         else:
00651             # Serialized (not parallel) test execution
00652             for target, toolchains in self.test_spec ['targets'].items():
00653                 if target not in self.test_suite_properties_ext :
00654                     self.test_suite_properties_ext [target] = {}
00655 
00656                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00657                 q.get()
00658 
00659         if self.db_logger :
00660             self.db_logger .reconnect();
00661             if self.db_logger .is_connected():
00662                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00663                 self.db_logger .disconnect();
00664 
00665         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00666 
00667     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00668         valid_test_map_keys = []
00669 
00670         for test_id in test_map_keys:
00671             test = TEST_MAP[test_id]
00672             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00673                 continue
00674 
00675             if test_ids and test_id not in test_ids:
00676                 continue
00677 
00678             if self.opts_test_only_peripheral  and not test.peripherals:
00679                 if self.opts_verbose_skipped_tests :
00680                     print(self.logger .log_line(
00681                         self.logger .LogType.INFO,
00682                         'Common test skipped for target %s' % target))
00683                 continue
00684 
00685             if (self.opts_peripheral_by_names  and test.peripherals and
00686                 not any((i in self.opts_peripheral_by_names )
00687                         for i in test.peripherals)):
00688                 # We will skip tests not forced with -p option
00689                 if self.opts_verbose_skipped_tests :
00690                     print(self.logger .log_line(
00691                         self.logger .LogType.INFO,
00692                         'Common test skipped for target %s' % target))
00693                 continue
00694 
00695             if self.opts_test_only_common  and test.peripherals:
00696                 if self.opts_verbose_skipped_tests :
00697                     print(self.logger .log_line(
00698                         self.logger .LogType.INFO,
00699                         'Peripheral test skipped for target %s' % target))
00700                 continue
00701 
00702             if not include_non_automated and not test.automated:
00703                 if self.opts_verbose_skipped_tests :
00704                     print(self.logger .log_line(
00705                         self.logger .LogType.INFO,
00706                         'Non automated test skipped for target %s' % target))
00707                 continue
00708 
00709             if test.is_supported(target, toolchain):
00710                 if test.peripherals is None and self.opts_only_build_tests :
00711                     # When users are using 'build only flag' and test do not have
00712                     # specified peripherals we can allow test building by default
00713                     pass
00714                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00715                     # If we force peripheral with option -p we expect test
00716                     # to pass even if peripheral is not in MUTs file.
00717                     pass
00718                 elif not self.is_peripherals_available (target, test.peripherals):
00719                     if self.opts_verbose_skipped_tests :
00720                         if test.peripherals:
00721                             print(self.logger .log_line(
00722                                 self.logger .LogType.INFO,
00723                                 'Peripheral %s test skipped for target %s' %
00724                                 (",".join(test.peripherals), target)))
00725                         else:
00726                             print(self.logger .log_line(
00727                                 self.logger .LogType.INFO,
00728                                 'Test %s skipped for target %s' %
00729                                 (test_id, target)))
00730                     continue
00731 
00732                 # The test has made it through all the filters, so add it to the valid tests list
00733                 valid_test_map_keys.append(test_id)
00734 
00735         return valid_test_map_keys
00736 
00737     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00738         # NOTE: This will not preserve order
00739         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00740 
00741     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00742         """ Prints well-formed summary with results (SQL table like)
00743             table shows text x toolchain test result matrix
00744         """
00745         RESULT_INDEX = 0
00746         TARGET_INDEX = 1
00747         TOOLCHAIN_INDEX = 2
00748         TEST_INDEX = 3
00749         DESC_INDEX = 4
00750 
00751         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00752         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00753         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00754         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00755 
00756         result = "Test summary:\n"
00757         for target in unique_targets:
00758             result_dict = {} # test : { toolchain : result }
00759             unique_target_toolchains = []
00760             for test in test_summary:
00761                 if test[TARGET_INDEX] == target:
00762                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00763                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00764                     if test[TEST_INDEX] not in result_dict:
00765                         result_dict[test[TEST_INDEX]] = {}
00766                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00767 
00768             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00769             pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
00770             for col in pt_cols:
00771                 pt.align[col] = "l"
00772             pt.padding_width = 1 # One space between column edges and contents (default)
00773 
00774             for test in unique_tests:
00775                 if test in result_dict:
00776                     test_results = result_dict[test]
00777                     if test in unique_test_desc:
00778                         row = [target, test, unique_test_desc[test]]
00779                         for toolchain in unique_toolchains:
00780                             if toolchain in test_results:
00781                                 row.append(test_results[toolchain])
00782                         pt.add_row(row)
00783             result += pt.get_string()
00784             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00785                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00786             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00787         return result
00788 
00789     def generate_test_summary (self, test_summary, shuffle_seed=None):
00790         """ Prints well-formed summary with results (SQL table like)
00791             table shows target x test results matrix across
00792         """
00793         success_code = 0    # Success code that can be leter returned to
00794         result = "Test summary:\n"
00795         # Pretty table package is used to print results
00796         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00797                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"], junction_char="|", hrules=HEADER)
00798         pt.align["Result"] = "l" # Left align
00799         pt.align["Target"] = "l" # Left align
00800         pt.align["Toolchain"] = "l" # Left align
00801         pt.align["Test ID"] = "l" # Left align
00802         pt.align["Test Description"] = "l" # Left align
00803         pt.padding_width = 1 # One space between column edges and contents (default)
00804 
00805         result_dict = {self.TEST_RESULT_OK  : 0,
00806                        self.TEST_RESULT_FAIL  : 0,
00807                        self.TEST_RESULT_ERROR  : 0,
00808                        self.TEST_RESULT_UNDEF  : 0,
00809                        self.TEST_RESULT_IOERR_COPY  : 0,
00810                        self.TEST_RESULT_IOERR_DISK  : 0,
00811                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00812                        self.TEST_RESULT_NO_IMAGE  : 0,
00813                        self.TEST_RESULT_TIMEOUT  : 0,
00814                        self.TEST_RESULT_MBED_ASSERT  : 0,
00815                        self.TEST_RESULT_BUILD_FAILED  : 0,
00816                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00817         }
00818 
00819         for test in test_summary:
00820             if test[0] in result_dict:
00821                 result_dict[test[0]] += 1
00822             pt.add_row(test)
00823         result += pt.get_string()
00824         result += "\n"
00825 
00826         # Print result count
00827         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
00828         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00829                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00830         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00831         return result
00832 
00833     def test_loop_list_to_dict (self, test_loops_str):
00834         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00835         """
00836         result = {}
00837         if test_loops_str:
00838             test_loops = test_loops_str
00839             for test_loop in test_loops:
00840                 test_loop_count = test_loop.split('=')
00841                 if len(test_loop_count) == 2:
00842                     _test_id, _test_loops = test_loop_count
00843                     try:
00844                         _test_loops = int(_test_loops)
00845                     except:
00846                         continue
00847                     result[_test_id] = _test_loops
00848         return result
00849 
00850     def get_test_loop_count (self, test_id):
00851         """ This function returns no. of loops per test (deducted by test_id_.
00852             If test is not in list of redefined loop counts it will use default value.
00853         """
00854         result = self.GLOBAL_LOOPS_COUNT 
00855         if test_id in self.TEST_LOOPS_DICT :
00856             result = self.TEST_LOOPS_DICT [test_id]
00857         return result
00858 
00859     def delete_file (self, file_path):
00860         """ Remove file from the system
00861         """
00862         result = True
00863         resutl_msg = ""
00864         try:
00865             os.remove(file_path)
00866         except Exception as e:
00867             resutl_msg = e
00868             result = False
00869         return result, resutl_msg
00870 
00871     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00872         """ Test is being invoked for given MUT.
00873         """
00874         # Get test information, image and test timeout
00875         test_id = data['test_id']
00876         test = TEST_MAP[test_id]
00877         test_description = TEST_MAP[test_id].get_description()
00878         image = data["image"]
00879         duration = data.get("duration", 10)
00880 
00881         if mut is None:
00882             print("Error: No Mbed available: MUT[%s]" % data['mcu'])
00883             return None
00884 
00885         mcu = mut['mcu']
00886         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00887 
00888         if self.db_logger :
00889             self.db_logger .reconnect()
00890 
00891         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00892 
00893         # Tests can be looped so test results must be stored for the same test
00894         test_all_result = []
00895         # Test results for one test ran few times
00896         detailed_test_results = {}  # { Loop_number: { results ... } }
00897 
00898         for test_index in range(test_loops):
00899 
00900             # If mbedls is available and we are auto detecting MUT info,
00901             # update MUT info (mounting may changed)
00902             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00903                 platform_name_filter = [mcu]
00904                 muts_list = {}
00905                 found = False
00906 
00907                 for i in range(0, 60):
00908                     print('Looking for %s with MBEDLS' % mcu)
00909                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00910 
00911                     if 1 not in muts_list:
00912                         sleep(3)
00913                     else:
00914                         found = True
00915                         break
00916 
00917                 if not found:
00918                     print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
00919                     return None
00920                 else:
00921                     mut = muts_list[1]
00922 
00923             disk = mut.get('disk')
00924             port = mut.get('port')
00925 
00926             if disk is None or port is None:
00927                 return None
00928 
00929             target_by_mcu = TARGET_MAP[mut['mcu']]
00930             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00931             # Some extra stuff can be declared in MUTs structure
00932             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00933             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00934 
00935             # When the build and test system were separate, this was relative to a
00936             # base network folder base path: join(NETWORK_BASE_PATH, )
00937             image_path = image
00938 
00939             # Host test execution
00940             start_host_exec_time = time()
00941 
00942             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00943             _copy_method = selected_copy_method
00944 
00945             if not exists(image_path):
00946                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00947                 elapsed_time = 0
00948                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00949                 print(single_test_output)
00950             else:
00951                 # Host test execution
00952                 start_host_exec_time = time()
00953 
00954                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00955                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00956                 host_test_result = self.run_host_test (test.host_test,
00957                                                       image_path, disk, port, duration,
00958                                                       micro=target_name,
00959                                                       verbose=host_test_verbose,
00960                                                       reset=host_test_reset,
00961                                                       reset_tout=reset_tout,
00962                                                       copy_method=selected_copy_method,
00963                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00964                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00965 
00966             # Store test result
00967             test_all_result.append(single_test_result)
00968             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00969             elapsed_time = single_testduration  # TIme of single test case execution after reset
00970 
00971             detailed_test_results[test_index] = {
00972                 'result' : single_test_result,
00973                 'output' : single_test_output,
00974                 'target_name' : target_name,
00975                 'target_name_unique' : target_name_unique,
00976                 'toolchain_name' : toolchain_name,
00977                 'id' : test_id,
00978                 'description' : test_description,
00979                 'elapsed_time' : round(elapsed_time, 2),
00980                 'duration' : single_timeout,
00981                 'copy_method' : _copy_method,
00982             }
00983 
00984             print(self.print_test_result (
00985                 single_test_result, target_name_unique, toolchain_name, test_id,
00986                 test_description, elapsed_time, single_timeout))
00987 
00988             # Update database entries for ongoing test
00989             if self.db_logger  and self.db_logger .is_connected():
00990                 test_type = 'SingleTest'
00991                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00992                                                  target_name,
00993                                                  toolchain_name,
00994                                                  test_type,
00995                                                  test_id,
00996                                                  single_test_result,
00997                                                  single_test_output,
00998                                                  elapsed_time,
00999                                                  single_timeout,
01000                                                  test_index)
01001 
01002             # If we perform waterfall test we test until we get OK and we stop testing
01003             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
01004                 break
01005 
01006         if self.db_logger :
01007             self.db_logger .disconnect()
01008 
01009         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
01010                 target_name_unique,
01011                 toolchain_name,
01012                 test_id,
01013                 test_description,
01014                 round(elapsed_time, 2),
01015                 single_timeout,
01016                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
01017 
01018     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
01019         """ Function determines MUT's mbed disk/port and copies binary to
01020             target.
01021         """
01022         handle_results = []
01023         data = json.loads(test_spec)
01024 
01025         # Find a suitable MUT:
01026         mut = None
01027         for id, m in self.muts .items():
01028             if m['mcu'] == data['mcu']:
01029                 mut = m
01030                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
01031                 handle_results.append(handle_result)
01032 
01033         return handle_results
01034 
01035     def print_test_result (self, test_result, target_name, toolchain_name,
01036                           test_id, test_description, elapsed_time, duration):
01037         """ Use specific convention to print test result and related data
01038         """
01039         tokens = []
01040         tokens.append("TargetTest")
01041         tokens.append(target_name)
01042         tokens.append(toolchain_name)
01043         tokens.append(test_id)
01044         tokens.append(test_description)
01045         separator = "::"
01046         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
01047         result = separator.join(tokens) + " [" + test_result +"]" + time_info
01048         return Fore.MAGENTA + result + Fore.RESET
01049 
01050     def shape_test_loop_ok_result_count (self, test_all_result):
01051         """ Reformats list of results to simple string
01052         """
01053         test_loop_count = len(test_all_result)
01054         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01055         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01056 
01057     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01058         """ Reformats list of results to simple string
01059         """
01060         result = self.TEST_RESULT_FAIL 
01061 
01062         if all(test_all_result[0] == res for res in test_all_result):
01063             result = test_all_result[0]
01064         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01065             result = self.TEST_RESULT_OK 
01066 
01067         return result
01068 
01069     def run_host_test (self, name, image_path, disk, port, duration,
01070                       micro=None, reset=None, reset_tout=None,
01071                       verbose=False, copy_method=None, program_cycle_s=None):
01072         """ Function creates new process with host test configured with particular test case.
01073             Function also is pooling for serial port activity from process to catch all data
01074             printed by test runner and host test during test execution
01075         """
01076 
01077         def get_char_from_queue(obs):
01078             """ Get character from queue safe way
01079             """
01080             try:
01081                 c = obs.queue.get(block=True, timeout=0.5)
01082             except Empty:
01083                 c = None
01084             return c
01085 
01086         def filter_queue_char(c):
01087             """ Filters out non ASCII characters from serial port
01088             """
01089             if ord(c) not in range(128):
01090                 c = ' '
01091             return c
01092 
01093         def get_test_result(output):
01094             """ Parse test 'output' data
01095             """
01096             result = self.TEST_RESULT_TIMEOUT 
01097             for line in "".join(output).splitlines():
01098                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01099                 if search_result and len(search_result.groups()):
01100                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01101                     break
01102             return result
01103 
01104         def get_auto_property_value(property_name, line):
01105             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01106                 Returns string
01107             """
01108             result = None
01109             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01110                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01111                 if property is not None and len(property.groups()) == 1:
01112                     result = property.groups()[0]
01113             return result
01114 
01115         cmd = ["python",
01116                '%s.py'% name,
01117                '-d', disk,
01118                '-f', '"%s"'% image_path,
01119                '-p', port,
01120                '-t', str(duration),
01121                '-C', str(program_cycle_s)]
01122 
01123         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01124             cmd += ['--auto']
01125 
01126         # Add extra parameters to host_test
01127         if copy_method is not None:
01128             cmd += ["-c", copy_method]
01129         if micro is not None:
01130             cmd += ["-m", micro]
01131         if reset is not None:
01132             cmd += ["-r", reset]
01133         if reset_tout is not None:
01134             cmd += ["-R", str(reset_tout)]
01135 
01136         if verbose:
01137             print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
01138             print("Test::Output::Start")
01139 
01140         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01141         obs = ProcessObserver(proc)
01142         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01143         line = ''
01144         output = []
01145         start_time = time()
01146         while (time() - start_time) < (2 * duration):
01147             c = get_char_from_queue(obs)
01148             if c:
01149                 if verbose:
01150                     sys.stdout.write(c)
01151                 c = filter_queue_char(c)
01152                 output.append(c)
01153                 # Give the mbed under test a way to communicate the end of the test
01154                 if c in ['\n', '\r']:
01155 
01156                     # Checking for auto-detection information from the test about MUT reset moment
01157                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01158                         # We will update this marker only once to prevent multiple time resets
01159                         update_once_flag['reset_target'] = True
01160                         start_time = time()
01161 
01162                     # Checking for auto-detection information from the test about timeout
01163                     auto_timeout_val = get_auto_property_value('timeout', line)
01164                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01165                         # We will update this marker only once to prevent multiple time resets
01166                         update_once_flag['timeout'] = True
01167                         duration = int(auto_timeout_val)
01168 
01169                     # Detect mbed assert:
01170                     if 'mbed assertation failed: ' in line:
01171                         output.append('{{mbed_assert}}')
01172                         break
01173 
01174                     # Check for test end
01175                     if '{end}' in line:
01176                         break
01177                     line = ''
01178                 else:
01179                     line += c
01180         end_time = time()
01181         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01182 
01183         c = get_char_from_queue(obs)
01184 
01185         if c:
01186             if verbose:
01187                 sys.stdout.write(c)
01188             c = filter_queue_char(c)
01189             output.append(c)
01190 
01191         if verbose:
01192             print("Test::Output::Finish")
01193         # Stop test process
01194         obs.stop()
01195 
01196         result = get_test_result(output)
01197         return (result, "".join(output), testcase_duration, duration)
01198 
01199     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01200         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01201         """
01202         if peripherals is not None:
01203             peripherals = set(peripherals)
01204         for id, mut in self.muts .items():
01205             # Target MCU name check
01206             if mut["mcu"] != target_mcu_name:
01207                 continue
01208             # Peripherals check
01209             if peripherals is not None:
01210                 if 'peripherals' not in mut:
01211                     continue
01212                 if not peripherals.issubset(set(mut['peripherals'])):
01213                     continue
01214             return True
01215         return False
01216 
01217     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01218         """ Function prepares JSON structure describing test specification
01219         """
01220         test_spec = {
01221             "mcu": mcu,
01222             "image": image_path,
01223             "duration": duration,
01224             "test_id": test_id,
01225         }
01226         return json.dumps(test_spec)
01227 
01228 
01229 def get_unique_value_from_summary (test_summary, index):
01230     """ Gets list of unique target names
01231     """
01232     result = []
01233     for test in test_summary:
01234         target_name = test[index]
01235         if target_name not in result:
01236             result.append(target_name)
01237     return sorted(result)
01238 
01239 
01240 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01241     """ Gets list of unique target names and return dictionary
01242     """
01243     result = {}
01244     for test in test_summary:
01245         key = test[index_key]
01246         val = test[index_val]
01247         if key not in result:
01248             result[key] = val
01249     return result
01250 
01251 
01252 def show_json_file_format_error (json_spec_filename, line, column):
01253     """ Prints JSON broken content
01254     """
01255     with open(json_spec_filename) as data_file:
01256         line_no = 1
01257         for json_line in data_file:
01258             if line_no + 5 >= line: # Print last few lines before error
01259                 print('Line %d:\t'%line_no + json_line)
01260             if line_no == line:
01261                 print('%s\t%s^' (' ' * len('Line %d:' % line_no),
01262                                  '-' * (column - 1)))
01263                 break
01264             line_no += 1
01265 
01266 
01267 def json_format_error_defect_pos (json_error_msg):
01268     """ Gets first error line and column in JSON file format.
01269         Parsed from exception thrown by json.loads() string
01270     """
01271     result = None
01272     line, column = 0, 0
01273     # Line value search
01274     line_search = re.search('line [0-9]+', json_error_msg)
01275     if line_search is not None:
01276         ls = line_search.group().split(' ')
01277         if len(ls) == 2:
01278             line = int(ls[1])
01279             # Column position search
01280             column_search = re.search('column [0-9]+', json_error_msg)
01281             if column_search is not None:
01282                 cs = column_search.group().split(' ')
01283                 if len(cs) == 2:
01284                     column = int(cs[1])
01285                     result = [line, column]
01286     return result
01287 
01288 
01289 def get_json_data_from_file (json_spec_filename, verbose=False):
01290     """ Loads from file JSON formatted string to data structure
01291     """
01292     result = None
01293     try:
01294         with open(json_spec_filename) as data_file:
01295             try:
01296                 result = json.load(data_file)
01297             except ValueError as json_error_msg:
01298                 result = None
01299                 print('JSON file %s parsing failed. Reason: %s' %
01300                       (json_spec_filename, json_error_msg))
01301                 # We can print where error occurred inside JSON file if we can parse exception msg
01302                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01303                 if json_format_defect_pos is not None:
01304                     line = json_format_defect_pos[0]
01305                     column = json_format_defect_pos[1]
01306                     print()
01307                     show_json_file_format_error(json_spec_filename, line, column)
01308 
01309     except IOError as fileopen_error_msg:
01310         print('JSON file %s not opened. Reason: %s\n'%
01311               (json_spec_filename, fileopen_error_msg))
01312     if verbose and result:
01313         pp = pprint.PrettyPrinter(indent=4)
01314         pp.pprint(result)
01315     return result
01316 
01317 
01318 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01319     """ Prints MUTs configuration passed to test script for verboseness
01320     """
01321     muts_info_cols = []
01322     # We need to check all unique properties for each defined MUT
01323     for k in json_data:
01324         mut_info = json_data[k]
01325         for mut_property in mut_info:
01326             if mut_property not in muts_info_cols:
01327                 muts_info_cols.append(mut_property)
01328 
01329     # Prepare pretty table object to display all MUTs
01330     pt_cols = ["index"] + muts_info_cols
01331     pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
01332     for col in pt_cols:
01333         pt.align[col] = "l"
01334 
01335     # Add rows to pretty print object
01336     for k in json_data:
01337         row = [k]
01338         mut_info = json_data[k]
01339 
01340         add_row = True
01341         if platform_filter and 'mcu' in mut_info:
01342             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01343         if add_row:
01344             for col in muts_info_cols:
01345                 cell_val = mut_info[col] if col in mut_info else None
01346                 if isinstance(cell_val, list):
01347                     cell_val = join_delim.join(cell_val)
01348                 row.append(cell_val)
01349             pt.add_row(row)
01350     return pt.get_string()
01351 
01352 
01353 def print_test_configuration_from_json (json_data, join_delim=", "):
01354     """ Prints test specification configuration passed to test script for verboseness
01355     """
01356     toolchains_info_cols = []
01357     # We need to check all toolchains for each device
01358     for k in json_data:
01359         # k should be 'targets'
01360         targets = json_data[k]
01361         for target in targets:
01362             toolchains = targets[target]
01363             for toolchain in toolchains:
01364                 if toolchain not in toolchains_info_cols:
01365                     toolchains_info_cols.append(toolchain)
01366 
01367     # Prepare pretty table object to display test specification
01368     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01369     pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
01370     for col in pt_cols:
01371         pt.align[col] = "l"
01372 
01373     # { target : [conflicted toolchains] }
01374     toolchain_conflicts = {}
01375     toolchain_path_conflicts = []
01376     for k in json_data:
01377         # k should be 'targets'
01378         targets = json_data[k]
01379         for target in targets:
01380             target_supported_toolchains = get_target_supported_toolchains(target)
01381             if not target_supported_toolchains:
01382                 target_supported_toolchains = []
01383             target_name = target if target in TARGET_MAP else "%s*"% target
01384             row = [target_name]
01385             toolchains = targets[target]
01386 
01387             for toolchain in sorted(toolchains_info_cols):
01388                 # Check for conflicts: target vs toolchain
01389                 conflict = False
01390                 conflict_path = False
01391                 if toolchain in toolchains:
01392                     if toolchain not in target_supported_toolchains:
01393                         conflict = True
01394                         if target not in toolchain_conflicts:
01395                             toolchain_conflicts[target] = []
01396                         toolchain_conflicts[target].append(toolchain)
01397                 # Add marker inside table about target usage / conflict
01398                 cell_val = 'Yes' if toolchain in toolchains else '-'
01399                 if conflict:
01400                     cell_val += '*'
01401                 # Check for conflicts: toolchain vs toolchain path
01402                 if toolchain in TOOLCHAIN_PATHS:
01403                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01404                     if not os.path.isdir(toolchain_path):
01405                         conflict_path = True
01406                         if toolchain not in toolchain_path_conflicts:
01407                             toolchain_path_conflicts.append(toolchain)
01408                 if conflict_path:
01409                     cell_val += '#'
01410                 row.append(cell_val)
01411             pt.add_row(row)
01412 
01413     # generate result string
01414     result = pt.get_string()    # Test specification table
01415     if toolchain_conflicts or toolchain_path_conflicts:
01416         result += "\n"
01417         result += "Toolchain conflicts:\n"
01418         for target in toolchain_conflicts:
01419             if target not in TARGET_MAP:
01420                 result += "\t* Target %s unknown\n"% (target)
01421             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01422             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01423             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01424 
01425         for toolchain in toolchain_path_conflicts:
01426         # Let's check toolchain configuration
01427             if toolchain in TOOLCHAIN_PATHS:
01428                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01429                 if not os.path.isdir(toolchain_path):
01430                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01431     return result
01432 
01433 
01434 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01435     """ Generates table summary with all test cases and additional test cases
01436         information using pretty print functionality. Allows test suite user to
01437         see test cases
01438     """
01439     # get all unique test ID prefixes
01440     unique_test_id = []
01441     for test in TESTS:
01442         split = test['id'].split('_')[:-1]
01443         test_id_prefix = '_'.join(split)
01444         if test_id_prefix not in unique_test_id:
01445             unique_test_id.append(test_id_prefix)
01446     unique_test_id.sort()
01447     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01448     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01449 
01450     test_properties = ['id',
01451                        'automated',
01452                        'description',
01453                        'peripherals',
01454                        'host_test',
01455                        'duration'] if cols is None else cols
01456 
01457     # All tests status table print
01458     pt = PrettyTable(test_properties, junction_char="|", hrules=HEADER)
01459     for col in test_properties:
01460         pt.align[col] = "l"
01461     pt.align['duration'] = "r"
01462 
01463     counter_all = 0
01464     counter_automated = 0
01465     pt.padding_width = 1 # One space between column edges and contents (default)
01466 
01467     for test_id in sorted(TEST_MAP.keys()):
01468         if platform_filter is not None:
01469             # FIlter out platforms using regex
01470             if re.search(platform_filter, test_id) is None:
01471                 continue
01472         row = []
01473         test = TEST_MAP[test_id]
01474         split = test_id.split('_')[:-1]
01475         test_id_prefix = '_'.join(split)
01476 
01477         for col in test_properties:
01478             col_value = test[col]
01479             if isinstance(test[col], list):
01480                 col_value = join_delim.join(test[col])
01481             elif test[col] == None:
01482                 col_value = "-"
01483 
01484             row.append(col_value)
01485         if test['automated'] == True:
01486             counter_dict_test_id_types[test_id_prefix] += 1
01487             counter_automated += 1
01488         pt.add_row(row)
01489         # Update counters
01490         counter_all += 1
01491         counter_dict_test_id_types_all[test_id_prefix] += 1
01492     result = pt.get_string()
01493     result += "\n\n"
01494 
01495     if result_summary and not platform_filter:
01496         # Automation result summary
01497         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01498         pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
01499         pt.align['automated'] = "r"
01500         pt.align['all'] = "r"
01501         pt.align['percent [%]'] = "r"
01502 
01503         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01504         str_progress = progress_bar(percent_progress, 75)
01505         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01506         result += "Automation coverage:\n"
01507         result += pt.get_string()
01508         result += "\n\n"
01509 
01510         # Test automation coverage table print
01511         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01512         pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
01513         pt.align['id'] = "l"
01514         pt.align['automated'] = "r"
01515         pt.align['all'] = "r"
01516         pt.align['percent [%]'] = "r"
01517         for unique_id in unique_test_id:
01518             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01519             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01520             str_progress = progress_bar(percent_progress, 75)
01521             row = [unique_id,
01522                    counter_dict_test_id_types[unique_id],
01523                    counter_dict_test_id_types_all[unique_id],
01524                    percent_progress,
01525                    "[" + str_progress + "]"]
01526             pt.add_row(row)
01527         result += "Test automation coverage:\n"
01528         result += pt.get_string()
01529         result += "\n\n"
01530     return result
01531 
01532 
01533 def progress_bar (percent_progress, saturation=0):
01534     """ This function creates progress bar with optional simple saturation mark
01535     """
01536     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01537     str_progress = '#' * step + '.' * int(50 - step)
01538     c = '!' if str_progress[38] == '.' else '|'
01539     if saturation > 0:
01540         saturation = saturation / 2
01541         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01542     return str_progress
01543 
01544 
01545 def singletest_in_cli_mode (single_test):
01546     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01547 
01548         @return returns success code (0 == success) for building and running tests
01549     """
01550     start = time()
01551     # Execute tests depending on options and filter applied
01552     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01553     elapsed_time = time() - start
01554 
01555     # Human readable summary
01556     if not single_test.opts_suppress_summary:
01557         # prints well-formed summary with results (SQL table like)
01558         print(single_test.generate_test_summary(test_summary, shuffle_seed))
01559     if single_test.opts_test_x_toolchain_summary:
01560         # prints well-formed summary with results (SQL table like)
01561         # table shows text x toolchain test result matrix
01562         print(single_test.generate_test_summary_by_target(test_summary,
01563                                                           shuffle_seed))
01564 
01565     print("Completed in %.2f sec" % elapsed_time)
01566     print
01567     # Write summary of the builds
01568 
01569     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01570     status = print_report_exporter.report(build_report)
01571 
01572     # Store extra reports in files
01573     if single_test.opts_report_html_file_name:
01574         # Export results in form of HTML report to separate file
01575         report_exporter = ReportExporter(ResultExporterType.HTML)
01576         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01577     if single_test.opts_report_junit_file_name:
01578         # Export results in form of JUnit XML report to separate file
01579         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01580         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01581     if single_test.opts_report_text_file_name:
01582         # Export results in form of a text file
01583         report_exporter = ReportExporter(ResultExporterType.TEXT)
01584         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01585     if single_test.opts_report_build_file_name:
01586         # Export build results as html report to sparate file
01587         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01588         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01589 
01590     # Returns True if no build failures of the test projects or their dependencies
01591     return status
01592 
01593 class TestLogger ():
01594     """ Super-class for logging and printing ongoing events for test suite pass
01595     """
01596     def __init__ (self, store_log=True):
01597         """ We can control if logger actually stores log in memory
01598             or just handled all log entries immediately
01599         """
01600         self.log  = []
01601         self.log_to_file  = False
01602         self.log_file_name  = None
01603         self.store_log  = store_log
01604 
01605         self.LogType  = construct_enum(INFO='Info',
01606                                       WARN='Warning',
01607                                       NOTIF='Notification',
01608                                       ERROR='Error',
01609                                       EXCEPT='Exception')
01610 
01611         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01612                                             APPEND=2)    # Append to existing log file
01613 
01614     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01615         """ Log one line of text
01616         """
01617         log_timestamp = time()
01618         log_entry = {'log_type' : LogType,
01619                      'log_timestamp' : log_timestamp,
01620                      'log_line' : log_line,
01621                      '_future' : None
01622         }
01623         # Store log in memory
01624         if self.store_log :
01625             self.log .append(log_entry)
01626         return log_entry
01627 
01628 
01629 class CLITestLogger (TestLogger ):
01630     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01631     """
01632     def __init__(self, store_log=True, file_name=None):
01633         TestLogger.__init__(self)
01634         self.log_file_name  = file_name
01635         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01636         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01637 
01638     def log_print (self, log_entry, timestamp=True):
01639         """ Prints on screen formatted log entry
01640         """
01641         ts = log_entry['log_timestamp']
01642         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01643         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01644         return timestamp_str + log_line_str
01645 
01646     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01647         """ Logs line, if log file output was specified log line will be appended
01648             at the end of log file
01649         """
01650         log_entry = TestLogger.log_line(self, LogType, log_line)
01651         log_line_str = self.log_print (log_entry, timestamp)
01652         if self.log_file_name  is not None:
01653             try:
01654                 with open(self.log_file_name , 'a') as f:
01655                     f.write(log_line_str + line_delim)
01656             except IOError:
01657                 pass
01658         return log_line_str
01659 
01660 
01661 def factory_db_logger (db_url):
01662     """ Factory database driver depending on database type supplied in database connection string db_url
01663     """
01664     if db_url is not None:
01665         from tools.test_mysql import MySQLDBAccess
01666         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01667         if connection_info is not None:
01668             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01669             if db_type == 'mysql':
01670                 return MySQLDBAccess()
01671     return None
01672 
01673 
01674 def detect_database_verbose (db_url):
01675     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01676     """
01677     result = BaseDBAccess().parse_db_connection_string(db_url)
01678     if result is not None:
01679         # Parsing passed
01680         (db_type, username, password, host, db_name) = result
01681         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01682         # Let's try to connect
01683         db_ = factory_db_logger(db_url)
01684         if db_ is not None:
01685             print("Connecting to database '%s'..." % db_url)
01686             db_.connect(host, username, password, db_name)
01687             if db_.is_connected():
01688                 print("ok")
01689                 print("Detecting database...")
01690                 print(db_.detect_database(verbose=True))
01691                 print("Disconnecting...")
01692                 db_.disconnect()
01693                 print("done")
01694         else:
01695             print("Database type '%s' unknown" % db_type)
01696     else:
01697         print("Parse error: '%s' - DB Url error" % db_url)
01698 
01699 
01700 def get_module_avail (module_name):
01701     """ This function returns True if module_name is already imported module
01702     """
01703     return module_name in sys.modules.keys()
01704 
01705 def get_autodetected_MUTS_list(platform_name_filter=None):
01706     oldError = None
01707     if os.name == 'nt':
01708         # Disable Windows error box temporarily
01709         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01710 
01711     mbeds = mbed_lstools.create()
01712     detect_muts_list = mbeds.list_mbeds()
01713 
01714     if os.name == 'nt':
01715         ctypes.windll.kernel32.SetErrorMode(oldError)
01716 
01717     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01718 
01719 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01720     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01721         If function fails to auto-detect devices it will return empty dictionary.
01722 
01723         if get_module_avail('mbed_lstools'):
01724             mbeds = mbed_lstools.create()
01725             mbeds_list = mbeds.list_mbeds()
01726 
01727         @param mbeds_list list of mbeds captured from mbed_lstools
01728         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01729     """
01730     result = {}   # Should be in muts_all.json format
01731     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01732     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01733     index = 1
01734     for mut in mbeds_list:
01735         # Filter the MUTS if a filter is specified
01736 
01737         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01738             continue
01739 
01740         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01741         # if not we  are creating our own unique value (last few chars from platform's target_id).
01742         m = {'mcu': mut['platform_name'],
01743              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01744              'port': mut['serial_port'],
01745              'disk': mut['mount_point'],
01746              'peripherals': []     # No peripheral detection
01747              }
01748         if index not in result:
01749             result[index] = {}
01750         result[index] = m
01751         index += 1
01752     return result
01753 
01754 
01755 def get_autodetected_TEST_SPEC (mbeds_list,
01756                                use_default_toolchain=True,
01757                                use_supported_toolchains=False,
01758                                toolchain_filter=None,
01759                                platform_name_filter=None):
01760     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01761         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01762 
01763         use_default_toolchain - if True add default toolchain to test_spec
01764         use_supported_toolchains - if True add all supported toolchains to test_spec
01765         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01766     """
01767     result = {'targets': {} }
01768 
01769     for mut in mbeds_list:
01770         mcu = mut['mcu']
01771         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01772             if mcu in TARGET_MAP:
01773                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01774                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01775 
01776                 # Decide which toolchains should be added to test specification toolchain pool for each target
01777                 toolchains = []
01778                 if use_default_toolchain:
01779                     toolchains.append(default_toolchain)
01780                 if use_supported_toolchains:
01781                     toolchains += supported_toolchains
01782                 if toolchain_filter is not None:
01783                     all_toolchains = supported_toolchains + [default_toolchain]
01784                     for toolchain in toolchain_filter:
01785                         if toolchain in all_toolchains:
01786                             toolchains.append(toolchain)
01787 
01788                 result['targets'][mcu] = list(set(toolchains))
01789     return result
01790 
01791 
01792 def get_default_test_options_parser ():
01793     """ Get common test script options used by CLI, web services etc.
01794     """
01795     parser = argparse.ArgumentParser()
01796     parser.add_argument('-i', '--tests',
01797                         dest='test_spec_filename',
01798                         metavar="FILE",
01799                         type=argparse_filestring_type,
01800                         help='Points to file with test specification')
01801 
01802     parser.add_argument('-M', '--MUTS',
01803                         dest='muts_spec_filename',
01804                         metavar="FILE",
01805                         type=argparse_filestring_type,
01806                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01807 
01808     parser.add_argument("-j", "--jobs",
01809                         dest='jobs',
01810                         metavar="NUMBER",
01811                         type=int,
01812                         help="Define number of compilation jobs. Default value is 1")
01813 
01814     if get_module_avail('mbed_lstools'):
01815         # Additional features available when mbed_lstools is installed on host and imported
01816         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01817         parser.add_argument('--auto',
01818                             dest='auto_detect',
01819                             action="store_true",
01820                             help='Use mbed-ls module to detect all connected mbed devices')
01821 
01822         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01823         parser.add_argument('--tc',
01824                             dest='toolchains_filter',
01825                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01826                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01827 
01828         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01829         parser.add_argument('--oper',
01830                             dest='operability_checks',
01831                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01832                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01833 
01834     parser.add_argument('--clean',
01835                         dest='clean',
01836                         action="store_true",
01837                         help='Clean the build directory')
01838 
01839     parser.add_argument('-P', '--only-peripherals',
01840                         dest='test_only_peripheral',
01841                         default=False,
01842                         action="store_true",
01843                         help='Test only peripheral declared for MUT and skip common tests')
01844 
01845     parser.add_argument("--profile", dest="profile", action="append",
01846                         type=argparse_filestring_type,
01847                         default=[])
01848 
01849     parser.add_argument('-C', '--only-commons',
01850                         dest='test_only_common',
01851                         default=False,
01852                         action="store_true",
01853                         help='Test only board internals. Skip perpherials tests and perform common tests')
01854 
01855     parser.add_argument('-n', '--test-by-names',
01856                         dest='test_by_names',
01857                         type=argparse_many(str),
01858                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01859 
01860     parser.add_argument('-p', '--peripheral-by-names',
01861                       dest='peripheral_by_names',
01862                       type=argparse_many(str),
01863                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01864 
01865     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01866     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01867 
01868     parser.add_argument('-c', '--copy-method',
01869                         dest='copy_method',
01870                         type=argparse_uppercase_type(copy_methods, "flash method"),
01871                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01872 
01873     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01874     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01875 
01876     parser.add_argument('-r', '--reset-type',
01877                         dest='mut_reset_type',
01878                         default=None,
01879                         type=argparse_uppercase_type(reset_methods, "reset method"),
01880                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01881 
01882     parser.add_argument('-g', '--goanna-for-tests',
01883                         dest='goanna_for_tests',
01884                         action="store_true",
01885                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01886 
01887     parser.add_argument('-G', '--goanna-for-sdk',
01888                         dest='goanna_for_mbed_sdk',
01889                         action="store_true",
01890                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01891 
01892     parser.add_argument('-s', '--suppress-summary',
01893                         dest='suppress_summary',
01894                         default=False,
01895                         action="store_true",
01896                         help='Suppresses display of wellformatted table with test results')
01897 
01898     parser.add_argument('-t', '--test-summary',
01899                         dest='test_x_toolchain_summary',
01900                         default=False,
01901                         action="store_true",
01902                         help='Displays wellformatted table with test x toolchain test result per target')
01903 
01904     parser.add_argument('-A', '--test-automation-report',
01905                         dest='test_automation_report',
01906                         default=False,
01907                         action="store_true",
01908                         help='Prints information about all tests and exits')
01909 
01910     parser.add_argument('-R', '--test-case-report',
01911                         dest='test_case_report',
01912                         default=False,
01913                         action="store_true",
01914                         help='Prints information about all test cases and exits')
01915 
01916     parser.add_argument("-S", "--supported-toolchains",
01917                         action="store_true",
01918                         dest="supported_toolchains",
01919                         default=False,
01920                         help="Displays supported matrix of MCUs and toolchains")
01921 
01922     parser.add_argument("-O", "--only-build",
01923                         action="store_true",
01924                         dest="only_build_tests",
01925                         default=False,
01926                         help="Only build tests, skips actual test procedures (flashing etc.)")
01927 
01928     parser.add_argument('--parallel',
01929                         dest='parallel_test_exec',
01930                         default=False,
01931                         action="store_true",
01932                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01933 
01934     parser.add_argument('--config',
01935                         dest='verbose_test_configuration_only',
01936                         default=False,
01937                         action="store_true",
01938                         help='Displays full test specification and MUTs configration and exits')
01939 
01940     parser.add_argument('--loops',
01941                         dest='test_loops_list',
01942                         type=argparse_many(str),
01943                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01944 
01945     parser.add_argument('--global-loops',
01946                         dest='test_global_loops_value',
01947                         type=int,
01948                         help='Set global number of test loops per test. Default value is set 1')
01949 
01950     parser.add_argument('--consolidate-waterfall',
01951                         dest='consolidate_waterfall_test',
01952                         default=False,
01953                         action="store_true",
01954                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01955 
01956     parser.add_argument('-W', '--waterfall',
01957                         dest='waterfall_test',
01958                         default=False,
01959                         action="store_true",
01960                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01961 
01962     parser.add_argument('-N', '--firmware-name',
01963                         dest='firmware_global_name',
01964                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01965 
01966     parser.add_argument('-u', '--shuffle',
01967                         dest='shuffle_test_order',
01968                         default=False,
01969                         action="store_true",
01970                         help='Shuffles test execution order')
01971 
01972     parser.add_argument('--shuffle-seed',
01973                         dest='shuffle_test_seed',
01974                         default=None,
01975                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01976 
01977     parser.add_argument('-f', '--filter',
01978                         dest='general_filter_regex',
01979                         type=argparse_many(str),
01980                         default=None,
01981                         help='For some commands you can use filter to filter out results')
01982 
01983     parser.add_argument('--inc-timeout',
01984                         dest='extend_test_timeout',
01985                         metavar="NUMBER",
01986                         type=int,
01987                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01988 
01989     parser.add_argument('--db',
01990                         dest='db_url',
01991                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01992 
01993     parser.add_argument('-l', '--log',
01994                         dest='log_file_name',
01995                         help='Log events to external file (note not all console entries may be visible in log file)')
01996 
01997     parser.add_argument('--report-html',
01998                         dest='report_html_file_name',
01999                         help='You can log test suite results in form of HTML report')
02000 
02001     parser.add_argument('--report-junit',
02002                         dest='report_junit_file_name',
02003                         help='You can log test suite results in form of JUnit compliant XML report')
02004 
02005     parser.add_argument("--report-build",
02006                         dest="report_build_file_name",
02007                         help="Output the build results to a junit xml file")
02008 
02009     parser.add_argument("--report-text",
02010                         dest="report_text_file_name",
02011                         help="Output the build results to a text file")
02012 
02013     parser.add_argument('--verbose-skipped',
02014                         dest='verbose_skipped_tests',
02015                         default=False,
02016                         action="store_true",
02017                         help='Prints some extra information about skipped tests')
02018 
02019     parser.add_argument('-V', '--verbose-test-result',
02020                         dest='verbose_test_result_only',
02021                         default=False,
02022                         action="store_true",
02023                         help='Prints test serial output')
02024 
02025     parser.add_argument('-v', '--verbose',
02026                         dest='verbose',
02027                         default=False,
02028                         action="store_true",
02029                         help='Verbose mode (prints some extra information)')
02030 
02031     parser.add_argument('--version',
02032                         dest='version',
02033                         default=False,
02034                         action="store_true",
02035                         help='Prints script version and exits')
02036 
02037     parser.add_argument('--stats-depth',
02038                         dest='stats_depth',
02039                         default=2,
02040                         type=int,
02041                         help="Depth level for static memory report")
02042     return parser
02043 
02044 def test_path_to_name (path, base):
02045     """Change all slashes in a path into hyphens
02046     This creates a unique cross-platform test name based on the path
02047     This can eventually be overriden by a to-be-determined meta-data mechanism"""
02048     name_parts = []
02049     head, tail = os.path.split(relpath(path,base))
02050     while (tail and tail != "."):
02051         name_parts.insert(0, tail)
02052         head, tail = os.path.split(head)
02053 
02054     return "-".join(name_parts).lower()
02055 
02056 def get_test_config (config_name, target_name):
02057     """Finds the path to a test configuration file
02058     config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
02059     target_name: name of target to determing if mbed OS interface given is valid
02060     returns path to config, will return None if no valid config is found
02061     """
02062     # If they passed in a full path
02063     if exists(config_name):
02064         # This is a module config
02065         return config_name
02066     # Otherwise find the path to configuration file based on mbed OS interface
02067     return TestConfig.get_config_path(config_name, target_name)
02068 
02069 
02070 def find_tests (base_dir, target_name, toolchain_name, icetea, greentea, app_config=None):
02071     """ Finds all tests in a directory recursively
02072     :param base_dir: path to the directory to scan for tests (ex. 'path/to/project')
02073     :param target_name: name of the target to use for scanning (ex. 'K64F')
02074     :param toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
02075     :param icetea: icetea enabled
02076     :param greentea: greentea enabled
02077     :param app_config - location of a chosen mbed_app.json file
02078 
02079     returns a dictionary where keys are the test name, and the values are
02080     lists of paths needed to biuld the test.
02081     """
02082 
02083     # Temporary structure: tests referenced by (name, base, group, case) tuple
02084     tests = {}
02085     # List of common folders: (predicate function, path) tuple
02086     commons = []
02087 
02088     config = Config(target_name, base_dir, app_config)
02089 
02090     # Scan the directory for paths to probe for 'TESTS' folders
02091     base_resources = Resources(MockNotifier(), collect_ignores=True)
02092     base_resources.scan_with_config(base_dir, config)
02093 
02094     if greentea:
02095         dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TESTS']
02096         ignoreset = MbedIgnoreSet()
02097 
02098         for directory in dirs:
02099             ignorefile = join(directory, IGNORE_FILENAME)
02100             if isfile(ignorefile):
02101                 ignoreset.add_mbedignore(directory, ignorefile)
02102             for test_group_directory in os.listdir(directory):
02103                 grp_dir = join(directory, test_group_directory)
02104                 if not isdir(grp_dir) or ignoreset.is_ignored(grp_dir):
02105                     continue
02106                 grpignorefile = join(grp_dir, IGNORE_FILENAME)
02107                 if isfile(grpignorefile):
02108                     ignoreset.add_mbedignore(grp_dir, grpignorefile)
02109                 for test_case_directory in os.listdir(grp_dir):
02110                     d = join(directory, test_group_directory, test_case_directory)
02111                     if not isdir(d) or ignoreset.is_ignored(d):
02112                         continue
02113                     special_dirs = ['host_tests', 'COMMON']
02114                     if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
02115                         test_name = test_path_to_name(d, base_dir)
02116                         tests[(test_name, directory, test_group_directory, test_case_directory)] = [d]
02117                     if test_case_directory == 'COMMON':
02118                         def predicate(base_pred, group_pred, name_base_group_case):
02119                             (name, base, group, case) = name_base_group_case
02120                             return base == base_pred and group == group_pred
02121 
02122                         commons.append((functools.partial(predicate, directory, test_group_directory), d))
02123                 if test_group_directory == 'COMMON':
02124                     def predicate(base_pred, name_base_group_case):
02125                         (name, base, group, case) = name_base_group_case
02126                         return base == base_pred
02127 
02128                     commons.append((functools.partial(predicate, directory), grp_dir))
02129 
02130     if icetea:
02131         dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TEST_APPS']
02132         for directory in dirs:
02133             if not isdir(directory):
02134                 continue
02135             for subdir in os.listdir(directory):
02136                 d = join(directory, subdir)
02137                 if not isdir(d):
02138                     continue
02139                 if 'device' == subdir:
02140                     for test_dir in os.listdir(d):
02141                         test_dir_path = join(d, test_dir)
02142                         test_name = test_path_to_name(test_dir_path, base_dir)
02143                         tests[(test_name, directory, subdir, test_dir)] = [test_dir_path]
02144 
02145     # Apply common directories
02146     for pred, path in commons:
02147         for test_identity, test_paths in six.iteritems(tests):
02148             if pred(test_identity):
02149                 test_paths.append(path)
02150 
02151     # Drop identity besides name
02152     return {name: paths for (name, _, _, _), paths in six.iteritems(tests)}
02153 
02154 
02155 def print_tests (tests, format="list", sort=True):
02156     """Given a dictionary of tests (as returned from "find_tests"), print them
02157     in the specified format"""
02158     if format == "list":
02159         for test_name in sorted(tests.keys()):
02160             test_path = tests[test_name][0]
02161             print("Test Case:")
02162             print("    Name: %s" % test_name)
02163             print("    Path: %s" % test_path)
02164     elif format == "json":
02165         print(json.dumps({test_name: test_path[0] for test_name, test_paths
02166                           in tests}, indent=2))
02167     else:
02168         print("Unknown format '%s'" % format)
02169         sys.exit(1)
02170 
02171 def norm_relative_path (path, start):
02172     """This function will create a normalized, relative path. It mimics the
02173     python os.path.relpath function, but also normalizes a Windows-syle path
02174     that use backslashes to a Unix style path that uses forward slashes."""
02175     path = os.path.normpath(path)
02176     path = os.path.relpath(path, start)
02177     path = path.replace("\\", "/")
02178     return path
02179 
02180 
02181 def build_test_worker (*args, **kwargs):
02182     """This is a worker function for the parallel building of tests. The `args`
02183     and `kwargs` are passed directly to `build_project`. It returns a dictionary
02184     with the following structure:
02185 
02186     {
02187         'result': `True` if no exceptions were thrown, `False` otherwise
02188         'reason': Instance of exception that was thrown on failure
02189         'bin_file': Path to the created binary if `build_project` was
02190                     successful. Not present otherwise
02191         'kwargs': The keyword arguments that were passed to `build_project`.
02192                   This includes arguments that were modified (ex. report)
02193     }
02194     """
02195     bin_file = None
02196     ret = {
02197         'result': False,
02198         'args': args,
02199         'kwargs': kwargs
02200     }
02201 
02202     # Use parent TOOLCHAIN_PATHS variable
02203     for key, value in kwargs['toolchain_paths'].items():
02204         TOOLCHAIN_PATHS[key] = value
02205 
02206     del kwargs['toolchain_paths']
02207 
02208     try:
02209         bin_file, _ = build_project(*args, **kwargs)
02210         ret['result'] = True
02211         ret['bin_file'] = bin_file
02212         ret['kwargs'] = kwargs
02213 
02214     except NotSupportedException as e:
02215         ret['reason'] = e
02216     except ToolException as e:
02217         ret['reason'] = e
02218     except KeyboardInterrupt as e:
02219         ret['reason'] = e
02220     except:
02221         # Print unhandled exceptions here
02222         import traceback
02223         traceback.print_exc(file=sys.stdout)
02224 
02225     return ret
02226 
02227 
02228 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02229                 clean=False, notify=None, jobs=1, macros=None,
02230                 silent=False, report=None, properties=None,
02231                 continue_on_build_fail=False, app_config=None,
02232                 build_profile=None, stats_depth=None, ignore=None):
02233     """Given the data structure from 'find_tests' and the typical build parameters,
02234     build all the tests
02235 
02236     Returns a tuple of the build result (True or False) followed by the test
02237     build data structure"""
02238 
02239     execution_directory = "."
02240     base_path = norm_relative_path(build_path, execution_directory)
02241 
02242     if isinstance(target, Target):
02243         target_name = target.name
02244     else:
02245         target_name = target
02246         target = TARGET_MAP[target_name]
02247     cfg, _, _ = get_config(base_source_paths, target, app_config=app_config)
02248 
02249     baud_rate = 9600
02250     if 'platform.stdio-baud-rate' in cfg:
02251         baud_rate = cfg['platform.stdio-baud-rate'].value
02252 
02253     test_build = {
02254         "platform": target_name,
02255         "toolchain": toolchain_name,
02256         "base_path": base_path,
02257         "baud_rate": baud_rate,
02258         "binary_type": "bootable",
02259         "tests": {},
02260         "test_apps": {}
02261     }
02262 
02263     result = True
02264 
02265     jobs_count = int(jobs if jobs else cpu_count())
02266     p = Pool(processes=jobs_count)
02267     results = []
02268     for test_name, test_paths in tests.items():
02269         if not isinstance(test_paths, list):
02270             test_paths = [test_paths]
02271 
02272         test_build_path = os.path.join(build_path, test_paths[0])
02273         src_paths = base_source_paths + test_paths
02274         bin_file = None
02275         test_case_folder_name = os.path.basename(test_paths[0])
02276 
02277         args = (src_paths, test_build_path, deepcopy(target), toolchain_name)
02278         kwargs = {
02279             'jobs': 1,
02280             'clean': clean,
02281             'macros': macros,
02282             'name': test_case_folder_name,
02283             'project_id': test_name,
02284             'report': report,
02285             'properties': properties,
02286             'app_config': app_config,
02287             'build_profile': build_profile,
02288             'toolchain_paths': TOOLCHAIN_PATHS,
02289             'stats_depth': stats_depth,
02290             'notify': MockNotifier()
02291         }
02292 
02293         results.append(p.apply_async(build_test_worker, args, kwargs))
02294 
02295     p.close()
02296     result = True
02297     itr = 0
02298     while len(results):
02299         itr += 1
02300         if itr > 360000:
02301             p.terminate()
02302             p.join()
02303             raise ToolException("Compile did not finish in 10 minutes")
02304         else:
02305             sleep(0.01)
02306             pending = 0
02307             for r in results:
02308                 if r.ready() is True:
02309                     try:
02310                         worker_result = r.get()
02311                         results.remove(r)
02312 
02313                         # Push all deferred notifications out to the actual notifier
02314                         new_notify = deepcopy(notify)
02315                         for message in worker_result['kwargs']['notify'].messages:
02316                             new_notify.notify(message)
02317 
02318                         # Take report from the kwargs and merge it into existing report
02319                         if report:
02320                             report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
02321                             report_entry[worker_result['kwargs']['project_id'].upper()][0][0]['output'] = new_notify.get_output()
02322                             for test_key in report_entry.keys():
02323                                 report[target_name][toolchain_name][test_key] = report_entry[test_key]
02324 
02325                         # Set the overall result to a failure if a build failure occurred
02326                         if ('reason' in worker_result and
02327                             not worker_result['reason'] and
02328                             not isinstance(worker_result['reason'], NotSupportedException)):
02329                             result = False
02330                             break
02331 
02332 
02333                         # Adding binary path to test build result
02334                         if ('result' in worker_result and
02335                             worker_result['result'] and
02336                             'bin_file' in worker_result):
02337                             bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
02338 
02339                             test_key = 'test_apps' if 'test_apps-' in worker_result['kwargs']['project_id'] else 'tests'
02340                             test_build[test_key][worker_result['kwargs']['project_id']] = {
02341                                 "binaries": [
02342                                     {
02343                                         "path": bin_file
02344                                     }
02345                                 ]
02346                             }
02347 
02348                             test_key = worker_result['kwargs']['project_id'].upper()
02349                             print('Image: %s\n' % bin_file)
02350 
02351                     except:
02352                         if p._taskqueue.queue:
02353                             p._taskqueue.queue.clear()
02354                             sleep(0.5)
02355                         p.terminate()
02356                         p.join()
02357                         raise
02358                 else:
02359                     pending += 1
02360                     if pending >= jobs_count:
02361                         break
02362 
02363             # Break as soon as possible if there is a failure and we are not
02364             # continuing on build failures
02365             if not result and not continue_on_build_fail:
02366                 if p._taskqueue.queue:
02367                     p._taskqueue.queue.clear()
02368                     sleep(0.5)
02369                 p.terminate()
02370                 break
02371 
02372     p.join()
02373 
02374     test_builds = {}
02375     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02376 
02377     return result, test_builds
02378 
02379 
02380 def test_spec_from_test_builds(test_builds):
02381     return {
02382         "builds": test_builds
02383     }