Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 from __future__ import print_function
00020 
00021 import os
00022 import re
00023 import sys
00024 import json
00025 import uuid
00026 import pprint
00027 import random
00028 import argparse
00029 import datetime
00030 import threading
00031 import ctypes
00032 import functools
00033 from colorama import Fore, Back, Style
00034 from prettytable import PrettyTable
00035 from copy import copy
00036 
00037 from time import sleep, time
00038 try:
00039     from Queue import Queue, Empty
00040 except ImportError:
00041     from queue import Queue, Empty
00042 from os.path import join, exists, basename, relpath
00043 from threading import Thread, Lock
00044 from multiprocessing import Pool, cpu_count
00045 from subprocess import Popen, PIPE
00046 
00047 # Imports related to mbed build api
00048 from tools.tests import TESTS
00049 from tools.tests import TEST_MAP
00050 from tools.paths import BUILD_DIR
00051 from tools.paths import HOST_TESTS
00052 from tools.utils import ToolException
00053 from tools.utils import NotSupportedException
00054 from tools.utils import construct_enum
00055 from tools.memap import MemapParser
00056 from tools.targets import TARGET_MAP, Target
00057 import tools.test_configs as TestConfig
00058 from tools.test_db import BaseDBAccess
00059 from tools.build_api import build_project, build_mbed_libs, build_lib
00060 from tools.build_api import get_target_supported_toolchains
00061 from tools.build_api import write_build_report
00062 from tools.build_api import prep_report
00063 from tools.build_api import prep_properties
00064 from tools.build_api import create_result
00065 from tools.build_api import add_result_to_report
00066 from tools.build_api import prepare_toolchain
00067 from tools.build_api import scan_resources
00068 from tools.build_api import get_config
00069 from tools.libraries import LIBRARIES, LIBRARY_MAP
00070 from tools.options import extract_profile
00071 from tools.toolchains import TOOLCHAIN_PATHS
00072 from tools.toolchains import TOOLCHAINS
00073 from tools.test_exporters import ReportExporter, ResultExporterType
00074 from tools.utils import argparse_filestring_type
00075 from tools.utils import argparse_uppercase_type
00076 from tools.utils import argparse_lowercase_type
00077 from tools.utils import argparse_many
00078 
00079 import tools.host_tests.host_tests_plugins as host_tests_plugins
00080 
00081 try:
00082     import mbed_lstools
00083     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00084 except:
00085     pass
00086 
00087 
00088 class ProcessObserver(Thread):
00089     def __init__(self, proc):
00090         Thread.__init__(self)
00091         self.proc = proc
00092         self.queue = Queue()
00093         self.daemon = True
00094         self.active = True
00095         self.start()
00096 
00097     def run(self):
00098         while self.active:
00099             c = self.proc.stdout.read(1)
00100             self.queue.put(c)
00101 
00102     def stop(self):
00103         self.active = False
00104         try:
00105             self.proc.terminate()
00106         except Exception:
00107             pass
00108 
00109 
00110 class SingleTestExecutor (threading.Thread):
00111     """ Example: Single test class in separate thread usage
00112     """
00113     def __init__(self, single_test):
00114         self.single_test  = single_test
00115         threading.Thread.__init__(self)
00116 
00117     def run(self):
00118         start = time()
00119         # Execute tests depending on options and filter applied
00120         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00121         elapsed_time = time() - start
00122 
00123         # Human readable summary
00124         if not self.single_test .opts_suppress_summary:
00125             # prints well-formed summary with results (SQL table like)
00126             print(self.single_test .generate_test_summary(test_summary,
00127                                                          shuffle_seed))
00128         if self.single_test .opts_test_x_toolchain_summary:
00129             # prints well-formed summary with results (SQL table like)
00130             # table shows text x toolchain test result matrix
00131             print(self.single_test .generate_test_summary_by_target(
00132                 test_summary, shuffle_seed))
00133         print("Completed in %.2f sec"% (elapsed_time))
00134 
00135 
00136 class SingleTestRunner (object):
00137     """ Object wrapper for single test run which may involve multiple MUTs
00138     """
00139     RE_DETECT_TESTCASE_RESULT = None
00140 
00141     # Return codes for test script
00142     TEST_RESULT_OK = "OK"
00143     TEST_RESULT_FAIL = "FAIL"
00144     TEST_RESULT_ERROR = "ERROR"
00145     TEST_RESULT_UNDEF = "UNDEF"
00146     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00147     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00148     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00149     TEST_RESULT_TIMEOUT = "TIMEOUT"
00150     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00151     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00152     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00153     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00154 
00155     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00156     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00157     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00158 
00159     muts = {} # MUTs descriptor (from external file)
00160     test_spec = {} # Test specification (from external file)
00161 
00162     # mbed test suite -> SingleTestRunner
00163     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00164                            "failure" : TEST_RESULT_FAIL,
00165                            "error" : TEST_RESULT_ERROR,
00166                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00167                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00168                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00169                            "timeout" : TEST_RESULT_TIMEOUT,
00170                            "no_image" : TEST_RESULT_NO_IMAGE,
00171                            "end" : TEST_RESULT_UNDEF,
00172                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00173                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00174                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00175     }
00176 
00177     def __init__ (self,
00178                  _global_loops_count=1,
00179                  _test_loops_list=None,
00180                  _muts={},
00181                  _clean=False,
00182                  _parser=None,
00183                  _opts=None,
00184                  _opts_db_url=None,
00185                  _opts_log_file_name=None,
00186                  _opts_report_html_file_name=None,
00187                  _opts_report_junit_file_name=None,
00188                  _opts_report_build_file_name=None,
00189                  _opts_report_text_file_name=None,
00190                  _opts_build_report={},
00191                  _opts_build_properties={},
00192                  _test_spec={},
00193                  _opts_goanna_for_mbed_sdk=None,
00194                  _opts_goanna_for_tests=None,
00195                  _opts_shuffle_test_order=False,
00196                  _opts_shuffle_test_seed=None,
00197                  _opts_test_by_names=None,
00198                  _opts_peripheral_by_names=None,
00199                  _opts_test_only_peripheral=False,
00200                  _opts_test_only_common=False,
00201                  _opts_verbose_skipped_tests=False,
00202                  _opts_verbose_test_result_only=False,
00203                  _opts_verbose=False,
00204                  _opts_firmware_global_name=None,
00205                  _opts_only_build_tests=False,
00206                  _opts_parallel_test_exec=False,
00207                  _opts_suppress_summary=False,
00208                  _opts_test_x_toolchain_summary=False,
00209                  _opts_copy_method=None,
00210                  _opts_mut_reset_type=None,
00211                  _opts_jobs=None,
00212                  _opts_waterfall_test=None,
00213                  _opts_consolidate_waterfall_test=None,
00214                  _opts_extend_test_timeout=None,
00215                  _opts_auto_detect=None,
00216                  _opts_include_non_automated=False):
00217         """ Let's try hard to init this object
00218         """
00219         from colorama import init
00220         init()
00221 
00222         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00223         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00224         # Settings related to test loops counters
00225         try:
00226             _global_loops_count = int(_global_loops_count)
00227         except:
00228             _global_loops_count = 1
00229         if _global_loops_count < 1:
00230             _global_loops_count = 1
00231         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00232         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00233         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00234 
00235         self.shuffle_random_seed  = 0.0
00236         self.SHUFFLE_SEED_ROUND  = 10
00237 
00238         # MUT list and test specification storage
00239         self.muts  = _muts
00240         self.test_spec  = _test_spec
00241 
00242         # Settings passed e.g. from command line
00243         self.opts_db_url  = _opts_db_url
00244         self.opts_log_file_name  = _opts_log_file_name
00245         self.opts_report_html_file_name  = _opts_report_html_file_name
00246         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00247         self.opts_report_build_file_name  = _opts_report_build_file_name
00248         self.opts_report_text_file_name  = _opts_report_text_file_name
00249         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00250         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00251         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00252         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00253         self.opts_test_by_names  = _opts_test_by_names
00254         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00255         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00256         self.opts_test_only_common  = _opts_test_only_common
00257         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00258         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00259         self.opts_verbose  = _opts_verbose
00260         self.opts_firmware_global_name  = _opts_firmware_global_name
00261         self.opts_only_build_tests  = _opts_only_build_tests
00262         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00263         self.opts_suppress_summary  = _opts_suppress_summary
00264         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00265         self.opts_copy_method  = _opts_copy_method
00266         self.opts_mut_reset_type  = _opts_mut_reset_type
00267         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00268         self.opts_waterfall_test  = _opts_waterfall_test
00269         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00270         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00271         self.opts_clean  = _clean
00272         self.opts_parser  = _parser
00273         self.opts  = _opts
00274         self.opts_auto_detect  = _opts_auto_detect
00275         self.opts_include_non_automated  = _opts_include_non_automated
00276 
00277         self.build_report  = _opts_build_report
00278         self.build_properties  = _opts_build_properties
00279 
00280         # File / screen logger initialization
00281         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00282 
00283         # Database related initializations
00284         self.db_logger  = factory_db_logger(self.opts_db_url )
00285         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00286         # Let's connect to database to set up credentials and confirm database is ready
00287         if self.db_logger :
00288             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00289             if self.db_logger .is_connected():
00290                 # Get hostname and uname so we can use it as build description
00291                 # when creating new build_id in external database
00292                 (_hostname, _uname) = self.db_logger .get_hostname()
00293                 _host_location = os.path.dirname(os.path.abspath(__file__))
00294                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00295                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00296                 self.db_logger .disconnect()
00297 
00298     def dump_options (self):
00299         """ Function returns data structure with common settings passed to SingelTestRunner
00300             It can be used for example to fill _extra fields in database storing test suite single run data
00301             Example:
00302             data = self.dump_options()
00303             or
00304             data_str = json.dumps(self.dump_options())
00305         """
00306         result = {"db_url" : str(self.opts_db_url ),
00307                   "log_file_name" :  str(self.opts_log_file_name ),
00308                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00309                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00310                   "test_by_names" :  str(self.opts_test_by_names ),
00311                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00312                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00313                   "test_only_common" :  str(self.opts_test_only_common ),
00314                   "verbose" :  str(self.opts_verbose ),
00315                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00316                   "only_build_tests" :  str(self.opts_only_build_tests ),
00317                   "copy_method" :  str(self.opts_copy_method ),
00318                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00319                   "jobs" :  str(self.opts_jobs ),
00320                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00321                   "_dummy" : ''
00322         }
00323         return result
00324 
00325     def shuffle_random_func(self):
00326         return self.shuffle_random_seed 
00327 
00328     def is_shuffle_seed_float (self):
00329         """ return true if function parameter can be converted to float
00330         """
00331         result = True
00332         try:
00333             float(self.shuffle_random_seed )
00334         except ValueError:
00335             result = False
00336         return result
00337 
00338     # This will store target / toolchain specific properties
00339     test_suite_properties_ext = {}  # target : toolchain
00340     # Here we store test results
00341     test_summary = []
00342     # Here we store test results in extended data structure
00343     test_summary_ext = {}
00344     execute_thread_slice_lock = Lock()
00345 
00346     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00347         for toolchain in toolchains:
00348             tt_id = "%s::%s" % (toolchain, target)
00349 
00350             T = TARGET_MAP[target]
00351 
00352             # print target, toolchain
00353             # Test suite properties returned to external tools like CI
00354             test_suite_properties = {
00355                 'jobs': self.opts_jobs ,
00356                 'clean': clean,
00357                 'target': target,
00358                 'vendor': T.extra_labels[0],
00359                 'test_ids': ', '.join(test_ids),
00360                 'toolchain': toolchain,
00361                 'shuffle_random_seed': self.shuffle_random_seed 
00362             }
00363 
00364 
00365             # print '=== %s::%s ===' % (target, toolchain)
00366             # Let's build our test
00367             if target not in TARGET_MAP:
00368                 print(self.logger .log_line(
00369                     self.logger .LogType.NOTIF,
00370                     'Skipped tests for %s target. Target platform not found' %
00371                     (target)))
00372                 continue
00373 
00374             clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk  or
00375                                        self.opts_clean  or clean)
00376 
00377             profile = extract_profile(self.opts_parser , self.opts , toolchain)
00378             stats_depth = self.opts .stats_depth or 2
00379 
00380             try:
00381                 build_mbed_libs_result = build_mbed_libs(
00382                     T, toolchain,
00383                     clean=clean_mbed_libs_options,
00384                     verbose=self.opts_verbose ,
00385                     jobs=self.opts_jobs ,
00386                     report=build_report,
00387                     properties=build_properties,
00388                     build_profile=profile)
00389 
00390                 if not build_mbed_libs_result:
00391                     print(self.logger .log_line(
00392                         self.logger .LogType.NOTIF,
00393                         'Skipped tests for %s target. Toolchain %s is not '
00394                         'supported for this target'% (T.name, toolchain)))
00395                     continue
00396 
00397             except ToolException:
00398                 print(self.logger .log_line(
00399                     self.logger .LogType.ERROR,
00400                     'There were errors while building MBED libs for %s using %s'
00401                     % (target, toolchain)))
00402                 continue
00403 
00404             build_dir = join(BUILD_DIR, "test", target, toolchain)
00405 
00406             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00407             test_suite_properties['build_dir'] = build_dir
00408             test_suite_properties['skipped'] = []
00409 
00410             # Enumerate through all tests and shuffle test order if requested
00411             test_map_keys = sorted(TEST_MAP.keys())
00412 
00413             if self.opts_shuffle_test_order :
00414                 random.shuffle(test_map_keys, self.shuffle_random_func )
00415                 # Update database with shuffle seed f applicable
00416                 if self.db_logger :
00417                     self.db_logger .reconnect();
00418                     if self.db_logger .is_connected():
00419                         self.db_logger .update_build_id_info(
00420                             self.db_logger_build_id ,
00421                             _shuffle_seed=self.shuffle_random_func ())
00422                         self.db_logger .disconnect();
00423 
00424             if self.db_logger :
00425                 self.db_logger .reconnect();
00426                 if self.db_logger .is_connected():
00427                     # Update MUTs and Test Specification in database
00428                     self.db_logger .update_build_id_info(
00429                         self.db_logger_build_id ,
00430                         _muts=self.muts , _test_spec=self.test_spec )
00431                     # Update Extra information in database (some options passed to test suite)
00432                     self.db_logger .update_build_id_info(
00433                         self.db_logger_build_id ,
00434                         _extra=json.dumps(self.dump_options ()))
00435                     self.db_logger .disconnect();
00436 
00437             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00438             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00439 
00440             for skipped_test_id in skipped_test_map_keys:
00441                 test_suite_properties['skipped'].append(skipped_test_id)
00442 
00443 
00444             # First pass through all tests and determine which libraries need to be built
00445             libraries = []
00446             for test_id in valid_test_map_keys:
00447                 test = TEST_MAP[test_id]
00448 
00449                 # Detect which lib should be added to test
00450                 # Some libs have to compiled like RTOS or ETH
00451                 for lib in LIBRARIES:
00452                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00453                         libraries.append(lib['id'])
00454 
00455 
00456             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00457 
00458             # Build all required libraries
00459             for lib_id in libraries:
00460                 try:
00461                     build_lib(lib_id,
00462                               T,
00463                               toolchain,
00464                               verbose=self.opts_verbose ,
00465                               clean=clean_mbed_libs_options,
00466                               jobs=self.opts_jobs ,
00467                               report=build_report,
00468                               properties=build_properties,
00469                               build_profile=profile)
00470 
00471                 except ToolException:
00472                     print(self.logger .log_line(
00473                         self.logger .LogType.ERROR,
00474                         'There were errors while building library %s' % lib_id))
00475                     continue
00476 
00477 
00478             for test_id in valid_test_map_keys:
00479                 test = TEST_MAP[test_id]
00480 
00481                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00482 
00483                 # TODO: move this 2 below loops to separate function
00484                 INC_DIRS = []
00485                 for lib_id in libraries:
00486                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00487                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00488 
00489                 MACROS = []
00490                 for lib_id in libraries:
00491                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00492                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00493                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00494                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00495                 test_uuid = uuid.uuid4()
00496                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00497 
00498                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00499                 if target not in self.test_summary_ext :
00500                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00501                 if toolchain not in self.test_summary_ext [target]:
00502                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00503 
00504                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00505 
00506                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00507                 try:
00508                     path = build_project(test.source_dir, join(build_dir, test_id), T,
00509                         toolchain, test.dependencies, clean=clean_project_options,
00510                         verbose=self.opts_verbose , name=project_name, macros=MACROS,
00511                         inc_dirs=INC_DIRS, jobs=self.opts_jobs , report=build_report,
00512                         properties=build_properties, project_id=test_id,
00513                         project_description=test.get_description(),
00514                         build_profile=profile, stats_depth=stats_depth)
00515 
00516                 except Exception as e:
00517                     project_name_str = project_name if project_name is not None else test_id
00518 
00519 
00520                     test_result = self.TEST_RESULT_FAIL 
00521 
00522                     if isinstance(e, ToolException):
00523                         print(self.logger .log_line(
00524                             self.logger .LogType.ERROR,
00525                             'There were errors while building project %s' %
00526                             project_name_str))
00527                         test_result = self.TEST_RESULT_BUILD_FAILED 
00528                     elif isinstance(e, NotSupportedException):
00529                         print(elf.logger.log_line(
00530                             self.logger .LogType.INFO,
00531                             'Project %s is not supported' % project_name_str))
00532                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00533 
00534 
00535                     # Append test results to global test summary
00536                     self.test_summary .append(
00537                         (test_result, target, toolchain, test_id,
00538                          test.get_description(), 0, 0, '-')
00539                     )
00540 
00541                     # Add detailed test result to test summary structure
00542                     if test_id not in self.test_summary_ext [target][toolchain]:
00543                         self.test_summary_ext [target][toolchain][test_id] = []
00544 
00545                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00546                         'result' : test_result,
00547                         'output' : '',
00548                         'target_name' : target,
00549                         'target_name_unique': target,
00550                         'toolchain_name' : toolchain,
00551                         'id' : test_id,
00552                         'description' : test.get_description(),
00553                         'elapsed_time' : 0,
00554                         'duration' : 0,
00555                         'copy_method' : None
00556                     }})
00557                     continue
00558 
00559                 if self.opts_only_build_tests :
00560                     # With this option we are skipping testing phase
00561                     continue
00562 
00563                 # Test duration can be increased by global value
00564                 test_duration = test.duration
00565                 if self.opts_extend_test_timeout  is not None:
00566                     test_duration += self.opts_extend_test_timeout 
00567 
00568                 # For an automated test the duration act as a timeout after
00569                 # which the test gets interrupted
00570                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00571                 test_loops = self.get_test_loop_count (test_id)
00572 
00573                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00574                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00575                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00576 
00577                 # read MUTs, test specification and perform tests
00578                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00579 
00580                 if handle_results is None:
00581                     continue
00582 
00583                 for handle_result in handle_results:
00584                     if handle_result:
00585                         single_test_result, detailed_test_results = handle_result
00586                     else:
00587                         continue
00588 
00589                     # Append test results to global test summary
00590                     if single_test_result is not None:
00591                         self.test_summary .append(single_test_result)
00592 
00593                     # Add detailed test result to test summary structure
00594                     if target not in self.test_summary_ext [target][toolchain]:
00595                         if test_id not in self.test_summary_ext [target][toolchain]:
00596                             self.test_summary_ext [target][toolchain][test_id] = []
00597 
00598                         append_test_result = detailed_test_results
00599 
00600                         # If waterfall and consolidate-waterfall options are enabled,
00601                         # only include the last test result in the report.
00602                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00603                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00604 
00605                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00606 
00607             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00608             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00609 
00610         q.put(target + '_'.join(toolchains))
00611         return
00612 
00613     def execute(self):
00614         clean = self.test_spec .get('clean', False)
00615         test_ids = self.test_spec .get('test_ids', [])
00616         q = Queue()
00617 
00618         # Generate seed for shuffle if seed is not provided in
00619         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00620         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00621             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00622 
00623 
00624         if self.opts_parallel_test_exec :
00625             ###################################################################
00626             # Experimental, parallel test execution per singletest instance.
00627             ###################################################################
00628             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00629             # Note: We are building here in parallel for each target separately!
00630             # So we are not building the same thing multiple times and compilers
00631             # in separate threads do not collide.
00632             # Inside execute_thread_slice() function function handle() will be called to
00633             # get information about available MUTs (per target).
00634             for target, toolchains in self.test_spec ['targets'].items():
00635                 self.test_suite_properties_ext [target] = {}
00636                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00637                 t.daemon = True
00638                 t.start()
00639                 execute_threads.append(t)
00640 
00641             for t in execute_threads:
00642                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00643         else:
00644             # Serialized (not parallel) test execution
00645             for target, toolchains in self.test_spec ['targets'].items():
00646                 if target not in self.test_suite_properties_ext :
00647                     self.test_suite_properties_ext [target] = {}
00648 
00649                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00650                 q.get()
00651 
00652         if self.db_logger :
00653             self.db_logger .reconnect();
00654             if self.db_logger .is_connected():
00655                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00656                 self.db_logger .disconnect();
00657 
00658         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00659 
00660     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00661         valid_test_map_keys = []
00662 
00663         for test_id in test_map_keys:
00664             test = TEST_MAP[test_id]
00665             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00666                 continue
00667 
00668             if test_ids and test_id not in test_ids:
00669                 continue
00670 
00671             if self.opts_test_only_peripheral  and not test.peripherals:
00672                 if self.opts_verbose_skipped_tests :
00673                     print(self.logger .log_line(
00674                         self.logger .LogType.INFO,
00675                         'Common test skipped for target %s' % target))
00676                 continue
00677 
00678             if (self.opts_peripheral_by_names  and test.peripherals and
00679                 not any((i in self.opts_peripheral_by_names )
00680                         for i in test.peripherals)):
00681                 # We will skip tests not forced with -p option
00682                 if self.opts_verbose_skipped_tests :
00683                     print(self.logger .log_line(
00684                         self.logger .LogType.INFO,
00685                         'Common test skipped for target %s' % target))
00686                 continue
00687 
00688             if self.opts_test_only_common  and test.peripherals:
00689                 if self.opts_verbose_skipped_tests :
00690                     print(self.logger .log_line(
00691                         self.logger .LogType.INFO,
00692                         'Peripheral test skipped for target %s' % target))
00693                 continue
00694 
00695             if not include_non_automated and not test.automated:
00696                 if self.opts_verbose_skipped_tests :
00697                     print(self.logger .log_line(
00698                         self.logger .LogType.INFO,
00699                         'Non automated test skipped for target %s' % target))
00700                 continue
00701 
00702             if test.is_supported(target, toolchain):
00703                 if test.peripherals is None and self.opts_only_build_tests :
00704                     # When users are using 'build only flag' and test do not have
00705                     # specified peripherals we can allow test building by default
00706                     pass
00707                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00708                     # If we force peripheral with option -p we expect test
00709                     # to pass even if peripheral is not in MUTs file.
00710                     pass
00711                 elif not self.is_peripherals_available (target, test.peripherals):
00712                     if self.opts_verbose_skipped_tests :
00713                         if test.peripherals:
00714                             print(self.logger .log_line(
00715                                 self.logger .LogType.INFO,
00716                                 'Peripheral %s test skipped for target %s' %
00717                                 (",".join(test.peripherals), target)))
00718                         else:
00719                             print(self.logger .log_line(
00720                                 self.logger .LogType.INFO,
00721                                 'Test %s skipped for target %s' %
00722                                 (test_id, target)))
00723                     continue
00724 
00725                 # The test has made it through all the filters, so add it to the valid tests list
00726                 valid_test_map_keys.append(test_id)
00727 
00728         return valid_test_map_keys
00729 
00730     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00731         # NOTE: This will not preserve order
00732         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00733 
00734     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00735         """ Prints well-formed summary with results (SQL table like)
00736             table shows text x toolchain test result matrix
00737         """
00738         RESULT_INDEX = 0
00739         TARGET_INDEX = 1
00740         TOOLCHAIN_INDEX = 2
00741         TEST_INDEX = 3
00742         DESC_INDEX = 4
00743 
00744         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00745         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00746         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00747         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00748 
00749         result = "Test summary:\n"
00750         for target in unique_targets:
00751             result_dict = {} # test : { toolchain : result }
00752             unique_target_toolchains = []
00753             for test in test_summary:
00754                 if test[TARGET_INDEX] == target:
00755                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00756                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00757                     if test[TEST_INDEX] not in result_dict:
00758                         result_dict[test[TEST_INDEX]] = {}
00759                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00760 
00761             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00762             pt = PrettyTable(pt_cols)
00763             for col in pt_cols:
00764                 pt.align[col] = "l"
00765             pt.padding_width = 1 # One space between column edges and contents (default)
00766 
00767             for test in unique_tests:
00768                 if test in result_dict:
00769                     test_results = result_dict[test]
00770                     if test in unique_test_desc:
00771                         row = [target, test, unique_test_desc[test]]
00772                         for toolchain in unique_toolchains:
00773                             if toolchain in test_results:
00774                                 row.append(test_results[toolchain])
00775                         pt.add_row(row)
00776             result += pt.get_string()
00777             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00778                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00779             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00780         return result
00781 
00782     def generate_test_summary (self, test_summary, shuffle_seed=None):
00783         """ Prints well-formed summary with results (SQL table like)
00784             table shows target x test results matrix across
00785         """
00786         success_code = 0    # Success code that can be leter returned to
00787         result = "Test summary:\n"
00788         # Pretty table package is used to print results
00789         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00790                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00791         pt.align["Result"] = "l" # Left align
00792         pt.align["Target"] = "l" # Left align
00793         pt.align["Toolchain"] = "l" # Left align
00794         pt.align["Test ID"] = "l" # Left align
00795         pt.align["Test Description"] = "l" # Left align
00796         pt.padding_width = 1 # One space between column edges and contents (default)
00797 
00798         result_dict = {self.TEST_RESULT_OK  : 0,
00799                        self.TEST_RESULT_FAIL  : 0,
00800                        self.TEST_RESULT_ERROR  : 0,
00801                        self.TEST_RESULT_UNDEF  : 0,
00802                        self.TEST_RESULT_IOERR_COPY  : 0,
00803                        self.TEST_RESULT_IOERR_DISK  : 0,
00804                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00805                        self.TEST_RESULT_NO_IMAGE  : 0,
00806                        self.TEST_RESULT_TIMEOUT  : 0,
00807                        self.TEST_RESULT_MBED_ASSERT  : 0,
00808                        self.TEST_RESULT_BUILD_FAILED  : 0,
00809                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00810         }
00811 
00812         for test in test_summary:
00813             if test[0] in result_dict:
00814                 result_dict[test[0]] += 1
00815             pt.add_row(test)
00816         result += pt.get_string()
00817         result += "\n"
00818 
00819         # Print result count
00820         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
00821         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00822                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00823         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00824         return result
00825 
00826     def test_loop_list_to_dict (self, test_loops_str):
00827         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00828         """
00829         result = {}
00830         if test_loops_str:
00831             test_loops = test_loops_str
00832             for test_loop in test_loops:
00833                 test_loop_count = test_loop.split('=')
00834                 if len(test_loop_count) == 2:
00835                     _test_id, _test_loops = test_loop_count
00836                     try:
00837                         _test_loops = int(_test_loops)
00838                     except:
00839                         continue
00840                     result[_test_id] = _test_loops
00841         return result
00842 
00843     def get_test_loop_count (self, test_id):
00844         """ This function returns no. of loops per test (deducted by test_id_.
00845             If test is not in list of redefined loop counts it will use default value.
00846         """
00847         result = self.GLOBAL_LOOPS_COUNT 
00848         if test_id in self.TEST_LOOPS_DICT :
00849             result = self.TEST_LOOPS_DICT [test_id]
00850         return result
00851 
00852     def delete_file (self, file_path):
00853         """ Remove file from the system
00854         """
00855         result = True
00856         resutl_msg = ""
00857         try:
00858             os.remove(file_path)
00859         except Exception as e:
00860             resutl_msg = e
00861             result = False
00862         return result, resutl_msg
00863 
00864     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00865         """ Test is being invoked for given MUT.
00866         """
00867         # Get test information, image and test timeout
00868         test_id = data['test_id']
00869         test = TEST_MAP[test_id]
00870         test_description = TEST_MAP[test_id].get_description()
00871         image = data["image"]
00872         duration = data.get("duration", 10)
00873 
00874         if mut is None:
00875             print("Error: No Mbed available: MUT[%s]" % data['mcu'])
00876             return None
00877 
00878         mcu = mut['mcu']
00879         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00880 
00881         if self.db_logger :
00882             self.db_logger .reconnect()
00883 
00884         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00885 
00886         # Tests can be looped so test results must be stored for the same test
00887         test_all_result = []
00888         # Test results for one test ran few times
00889         detailed_test_results = {}  # { Loop_number: { results ... } }
00890 
00891         for test_index in range(test_loops):
00892 
00893             # If mbedls is available and we are auto detecting MUT info,
00894             # update MUT info (mounting may changed)
00895             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00896                 platform_name_filter = [mcu]
00897                 muts_list = {}
00898                 found = False
00899 
00900                 for i in range(0, 60):
00901                     print('Looking for %s with MBEDLS' % mcu)
00902                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00903 
00904                     if 1 not in muts_list:
00905                         sleep(3)
00906                     else:
00907                         found = True
00908                         break
00909 
00910                 if not found:
00911                     print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
00912                     return None
00913                 else:
00914                     mut = muts_list[1]
00915 
00916             disk = mut.get('disk')
00917             port = mut.get('port')
00918 
00919             if disk is None or port is None:
00920                 return None
00921 
00922             target_by_mcu = TARGET_MAP[mut['mcu']]
00923             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00924             # Some extra stuff can be declared in MUTs structure
00925             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00926             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00927 
00928             # When the build and test system were separate, this was relative to a
00929             # base network folder base path: join(NETWORK_BASE_PATH, )
00930             image_path = image
00931 
00932             # Host test execution
00933             start_host_exec_time = time()
00934 
00935             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00936             _copy_method = selected_copy_method
00937 
00938             if not exists(image_path):
00939                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00940                 elapsed_time = 0
00941                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00942                 print(single_test_output)
00943             else:
00944                 # Host test execution
00945                 start_host_exec_time = time()
00946 
00947                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00948                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00949                 host_test_result = self.run_host_test (test.host_test,
00950                                                       image_path, disk, port, duration,
00951                                                       micro=target_name,
00952                                                       verbose=host_test_verbose,
00953                                                       reset=host_test_reset,
00954                                                       reset_tout=reset_tout,
00955                                                       copy_method=selected_copy_method,
00956                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00957                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00958 
00959             # Store test result
00960             test_all_result.append(single_test_result)
00961             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00962             elapsed_time = single_testduration  # TIme of single test case execution after reset
00963 
00964             detailed_test_results[test_index] = {
00965                 'result' : single_test_result,
00966                 'output' : single_test_output,
00967                 'target_name' : target_name,
00968                 'target_name_unique' : target_name_unique,
00969                 'toolchain_name' : toolchain_name,
00970                 'id' : test_id,
00971                 'description' : test_description,
00972                 'elapsed_time' : round(elapsed_time, 2),
00973                 'duration' : single_timeout,
00974                 'copy_method' : _copy_method,
00975             }
00976 
00977             print(self.print_test_result (
00978                 single_test_result, target_name_unique, toolchain_name, test_id,
00979                 test_description, elapsed_time, single_timeout))
00980 
00981             # Update database entries for ongoing test
00982             if self.db_logger  and self.db_logger .is_connected():
00983                 test_type = 'SingleTest'
00984                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00985                                                  target_name,
00986                                                  toolchain_name,
00987                                                  test_type,
00988                                                  test_id,
00989                                                  single_test_result,
00990                                                  single_test_output,
00991                                                  elapsed_time,
00992                                                  single_timeout,
00993                                                  test_index)
00994 
00995             # If we perform waterfall test we test until we get OK and we stop testing
00996             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
00997                 break
00998 
00999         if self.db_logger :
01000             self.db_logger .disconnect()
01001 
01002         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
01003                 target_name_unique,
01004                 toolchain_name,
01005                 test_id,
01006                 test_description,
01007                 round(elapsed_time, 2),
01008                 single_timeout,
01009                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
01010 
01011     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
01012         """ Function determines MUT's mbed disk/port and copies binary to
01013             target.
01014         """
01015         handle_results = []
01016         data = json.loads(test_spec)
01017 
01018         # Find a suitable MUT:
01019         mut = None
01020         for id, m in self.muts .items():
01021             if m['mcu'] == data['mcu']:
01022                 mut = m
01023                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
01024                 handle_results.append(handle_result)
01025 
01026         return handle_results
01027 
01028     def print_test_result (self, test_result, target_name, toolchain_name,
01029                           test_id, test_description, elapsed_time, duration):
01030         """ Use specific convention to print test result and related data
01031         """
01032         tokens = []
01033         tokens.append("TargetTest")
01034         tokens.append(target_name)
01035         tokens.append(toolchain_name)
01036         tokens.append(test_id)
01037         tokens.append(test_description)
01038         separator = "::"
01039         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
01040         result = separator.join(tokens) + " [" + test_result +"]" + time_info
01041         return Fore.MAGENTA + result + Fore.RESET
01042 
01043     def shape_test_loop_ok_result_count (self, test_all_result):
01044         """ Reformats list of results to simple string
01045         """
01046         test_loop_count = len(test_all_result)
01047         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01048         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01049 
01050     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01051         """ Reformats list of results to simple string
01052         """
01053         result = self.TEST_RESULT_FAIL 
01054 
01055         if all(test_all_result[0] == res for res in test_all_result):
01056             result = test_all_result[0]
01057         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01058             result = self.TEST_RESULT_OK 
01059 
01060         return result
01061 
01062     def run_host_test (self, name, image_path, disk, port, duration,
01063                       micro=None, reset=None, reset_tout=None,
01064                       verbose=False, copy_method=None, program_cycle_s=None):
01065         """ Function creates new process with host test configured with particular test case.
01066             Function also is pooling for serial port activity from process to catch all data
01067             printed by test runner and host test during test execution
01068         """
01069 
01070         def get_char_from_queue(obs):
01071             """ Get character from queue safe way
01072             """
01073             try:
01074                 c = obs.queue.get(block=True, timeout=0.5)
01075             except Empty:
01076                 c = None
01077             return c
01078 
01079         def filter_queue_char(c):
01080             """ Filters out non ASCII characters from serial port
01081             """
01082             if ord(c) not in range(128):
01083                 c = ' '
01084             return c
01085 
01086         def get_test_result(output):
01087             """ Parse test 'output' data
01088             """
01089             result = self.TEST_RESULT_TIMEOUT 
01090             for line in "".join(output).splitlines():
01091                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01092                 if search_result and len(search_result.groups()):
01093                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01094                     break
01095             return result
01096 
01097         def get_auto_property_value(property_name, line):
01098             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01099                 Returns string
01100             """
01101             result = None
01102             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01103                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01104                 if property is not None and len(property.groups()) == 1:
01105                     result = property.groups()[0]
01106             return result
01107 
01108         cmd = ["python",
01109                '%s.py'% name,
01110                '-d', disk,
01111                '-f', '"%s"'% image_path,
01112                '-p', port,
01113                '-t', str(duration),
01114                '-C', str(program_cycle_s)]
01115 
01116         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01117             cmd += ['--auto']
01118 
01119         # Add extra parameters to host_test
01120         if copy_method is not None:
01121             cmd += ["-c", copy_method]
01122         if micro is not None:
01123             cmd += ["-m", micro]
01124         if reset is not None:
01125             cmd += ["-r", reset]
01126         if reset_tout is not None:
01127             cmd += ["-R", str(reset_tout)]
01128 
01129         if verbose:
01130             print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
01131             print("Test::Output::Start")
01132 
01133         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01134         obs = ProcessObserver(proc)
01135         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01136         line = ''
01137         output = []
01138         start_time = time()
01139         while (time() - start_time) < (2 * duration):
01140             c = get_char_from_queue(obs)
01141             if c:
01142                 if verbose:
01143                     sys.stdout.write(c)
01144                 c = filter_queue_char(c)
01145                 output.append(c)
01146                 # Give the mbed under test a way to communicate the end of the test
01147                 if c in ['\n', '\r']:
01148 
01149                     # Checking for auto-detection information from the test about MUT reset moment
01150                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01151                         # We will update this marker only once to prevent multiple time resets
01152                         update_once_flag['reset_target'] = True
01153                         start_time = time()
01154 
01155                     # Checking for auto-detection information from the test about timeout
01156                     auto_timeout_val = get_auto_property_value('timeout', line)
01157                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01158                         # We will update this marker only once to prevent multiple time resets
01159                         update_once_flag['timeout'] = True
01160                         duration = int(auto_timeout_val)
01161 
01162                     # Detect mbed assert:
01163                     if 'mbed assertation failed: ' in line:
01164                         output.append('{{mbed_assert}}')
01165                         break
01166 
01167                     # Check for test end
01168                     if '{end}' in line:
01169                         break
01170                     line = ''
01171                 else:
01172                     line += c
01173         end_time = time()
01174         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01175 
01176         c = get_char_from_queue(obs)
01177 
01178         if c:
01179             if verbose:
01180                 sys.stdout.write(c)
01181             c = filter_queue_char(c)
01182             output.append(c)
01183 
01184         if verbose:
01185             print("Test::Output::Finish")
01186         # Stop test process
01187         obs.stop()
01188 
01189         result = get_test_result(output)
01190         return (result, "".join(output), testcase_duration, duration)
01191 
01192     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01193         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01194         """
01195         if peripherals is not None:
01196             peripherals = set(peripherals)
01197         for id, mut in self.muts .items():
01198             # Target MCU name check
01199             if mut["mcu"] != target_mcu_name:
01200                 continue
01201             # Peripherals check
01202             if peripherals is not None:
01203                 if 'peripherals' not in mut:
01204                     continue
01205                 if not peripherals.issubset(set(mut['peripherals'])):
01206                     continue
01207             return True
01208         return False
01209 
01210     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01211         """ Function prepares JSON structure describing test specification
01212         """
01213         test_spec = {
01214             "mcu": mcu,
01215             "image": image_path,
01216             "duration": duration,
01217             "test_id": test_id,
01218         }
01219         return json.dumps(test_spec)
01220 
01221 
01222 def get_unique_value_from_summary (test_summary, index):
01223     """ Gets list of unique target names
01224     """
01225     result = []
01226     for test in test_summary:
01227         target_name = test[index]
01228         if target_name not in result:
01229             result.append(target_name)
01230     return sorted(result)
01231 
01232 
01233 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01234     """ Gets list of unique target names and return dictionary
01235     """
01236     result = {}
01237     for test in test_summary:
01238         key = test[index_key]
01239         val = test[index_val]
01240         if key not in result:
01241             result[key] = val
01242     return result
01243 
01244 
01245 def show_json_file_format_error (json_spec_filename, line, column):
01246     """ Prints JSON broken content
01247     """
01248     with open(json_spec_filename) as data_file:
01249         line_no = 1
01250         for json_line in data_file:
01251             if line_no + 5 >= line: # Print last few lines before error
01252                 print('Line %d:\t'%line_no + json_line)
01253             if line_no == line:
01254                 print('%s\t%s^' (' ' * len('Line %d:' % line_no),
01255                                  '-' * (column - 1)))
01256                 break
01257             line_no += 1
01258 
01259 
01260 def json_format_error_defect_pos (json_error_msg):
01261     """ Gets first error line and column in JSON file format.
01262         Parsed from exception thrown by json.loads() string
01263     """
01264     result = None
01265     line, column = 0, 0
01266     # Line value search
01267     line_search = re.search('line [0-9]+', json_error_msg)
01268     if line_search is not None:
01269         ls = line_search.group().split(' ')
01270         if len(ls) == 2:
01271             line = int(ls[1])
01272             # Column position search
01273             column_search = re.search('column [0-9]+', json_error_msg)
01274             if column_search is not None:
01275                 cs = column_search.group().split(' ')
01276                 if len(cs) == 2:
01277                     column = int(cs[1])
01278                     result = [line, column]
01279     return result
01280 
01281 
01282 def get_json_data_from_file (json_spec_filename, verbose=False):
01283     """ Loads from file JSON formatted string to data structure
01284     """
01285     result = None
01286     try:
01287         with open(json_spec_filename) as data_file:
01288             try:
01289                 result = json.load(data_file)
01290             except ValueError as json_error_msg:
01291                 result = None
01292                 print('JSON file %s parsing failed. Reason: %s' %
01293                       (json_spec_filename, json_error_msg))
01294                 # We can print where error occurred inside JSON file if we can parse exception msg
01295                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01296                 if json_format_defect_pos is not None:
01297                     line = json_format_defect_pos[0]
01298                     column = json_format_defect_pos[1]
01299                     print()
01300                     show_json_file_format_error(json_spec_filename, line, column)
01301 
01302     except IOError as fileopen_error_msg:
01303         print('JSON file %s not opened. Reason: %s\n'%
01304               (json_spec_filename, fileopen_error_msg))
01305     if verbose and result:
01306         pp = pprint.PrettyPrinter(indent=4)
01307         pp.pprint(result)
01308     return result
01309 
01310 
01311 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01312     """ Prints MUTs configuration passed to test script for verboseness
01313     """
01314     muts_info_cols = []
01315     # We need to check all unique properties for each defined MUT
01316     for k in json_data:
01317         mut_info = json_data[k]
01318         for mut_property in mut_info:
01319             if mut_property not in muts_info_cols:
01320                 muts_info_cols.append(mut_property)
01321 
01322     # Prepare pretty table object to display all MUTs
01323     pt_cols = ["index"] + muts_info_cols
01324     pt = PrettyTable(pt_cols)
01325     for col in pt_cols:
01326         pt.align[col] = "l"
01327 
01328     # Add rows to pretty print object
01329     for k in json_data:
01330         row = [k]
01331         mut_info = json_data[k]
01332 
01333         add_row = True
01334         if platform_filter and 'mcu' in mut_info:
01335             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01336         if add_row:
01337             for col in muts_info_cols:
01338                 cell_val = mut_info[col] if col in mut_info else None
01339                 if isinstance(cell_val, list):
01340                     cell_val = join_delim.join(cell_val)
01341                 row.append(cell_val)
01342             pt.add_row(row)
01343     return pt.get_string()
01344 
01345 
01346 def print_test_configuration_from_json (json_data, join_delim=", "):
01347     """ Prints test specification configuration passed to test script for verboseness
01348     """
01349     toolchains_info_cols = []
01350     # We need to check all toolchains for each device
01351     for k in json_data:
01352         # k should be 'targets'
01353         targets = json_data[k]
01354         for target in targets:
01355             toolchains = targets[target]
01356             for toolchain in toolchains:
01357                 if toolchain not in toolchains_info_cols:
01358                     toolchains_info_cols.append(toolchain)
01359 
01360     # Prepare pretty table object to display test specification
01361     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01362     pt = PrettyTable(pt_cols)
01363     for col in pt_cols:
01364         pt.align[col] = "l"
01365 
01366     # { target : [conflicted toolchains] }
01367     toolchain_conflicts = {}
01368     toolchain_path_conflicts = []
01369     for k in json_data:
01370         # k should be 'targets'
01371         targets = json_data[k]
01372         for target in targets:
01373             target_supported_toolchains = get_target_supported_toolchains(target)
01374             if not target_supported_toolchains:
01375                 target_supported_toolchains = []
01376             target_name = target if target in TARGET_MAP else "%s*"% target
01377             row = [target_name]
01378             toolchains = targets[target]
01379 
01380             for toolchain in sorted(toolchains_info_cols):
01381                 # Check for conflicts: target vs toolchain
01382                 conflict = False
01383                 conflict_path = False
01384                 if toolchain in toolchains:
01385                     if toolchain not in target_supported_toolchains:
01386                         conflict = True
01387                         if target not in toolchain_conflicts:
01388                             toolchain_conflicts[target] = []
01389                         toolchain_conflicts[target].append(toolchain)
01390                 # Add marker inside table about target usage / conflict
01391                 cell_val = 'Yes' if toolchain in toolchains else '-'
01392                 if conflict:
01393                     cell_val += '*'
01394                 # Check for conflicts: toolchain vs toolchain path
01395                 if toolchain in TOOLCHAIN_PATHS:
01396                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01397                     if not os.path.isdir(toolchain_path):
01398                         conflict_path = True
01399                         if toolchain not in toolchain_path_conflicts:
01400                             toolchain_path_conflicts.append(toolchain)
01401                 if conflict_path:
01402                     cell_val += '#'
01403                 row.append(cell_val)
01404             pt.add_row(row)
01405 
01406     # generate result string
01407     result = pt.get_string()    # Test specification table
01408     if toolchain_conflicts or toolchain_path_conflicts:
01409         result += "\n"
01410         result += "Toolchain conflicts:\n"
01411         for target in toolchain_conflicts:
01412             if target not in TARGET_MAP:
01413                 result += "\t* Target %s unknown\n"% (target)
01414             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01415             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01416             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01417 
01418         for toolchain in toolchain_path_conflicts:
01419         # Let's check toolchain configuration
01420             if toolchain in TOOLCHAIN_PATHS:
01421                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01422                 if not os.path.isdir(toolchain_path):
01423                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01424     return result
01425 
01426 
01427 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01428     """ Generates table summary with all test cases and additional test cases
01429         information using pretty print functionality. Allows test suite user to
01430         see test cases
01431     """
01432     # get all unique test ID prefixes
01433     unique_test_id = []
01434     for test in TESTS:
01435         split = test['id'].split('_')[:-1]
01436         test_id_prefix = '_'.join(split)
01437         if test_id_prefix not in unique_test_id:
01438             unique_test_id.append(test_id_prefix)
01439     unique_test_id.sort()
01440     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01441     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01442 
01443     test_properties = ['id',
01444                        'automated',
01445                        'description',
01446                        'peripherals',
01447                        'host_test',
01448                        'duration'] if cols is None else cols
01449 
01450     # All tests status table print
01451     pt = PrettyTable(test_properties)
01452     for col in test_properties:
01453         pt.align[col] = "l"
01454     pt.align['duration'] = "r"
01455 
01456     counter_all = 0
01457     counter_automated = 0
01458     pt.padding_width = 1 # One space between column edges and contents (default)
01459 
01460     for test_id in sorted(TEST_MAP.keys()):
01461         if platform_filter is not None:
01462             # FIlter out platforms using regex
01463             if re.search(platform_filter, test_id) is None:
01464                 continue
01465         row = []
01466         test = TEST_MAP[test_id]
01467         split = test_id.split('_')[:-1]
01468         test_id_prefix = '_'.join(split)
01469 
01470         for col in test_properties:
01471             col_value = test[col]
01472             if isinstance(test[col], list):
01473                 col_value = join_delim.join(test[col])
01474             elif test[col] == None:
01475                 col_value = "-"
01476 
01477             row.append(col_value)
01478         if test['automated'] == True:
01479             counter_dict_test_id_types[test_id_prefix] += 1
01480             counter_automated += 1
01481         pt.add_row(row)
01482         # Update counters
01483         counter_all += 1
01484         counter_dict_test_id_types_all[test_id_prefix] += 1
01485     result = pt.get_string()
01486     result += "\n\n"
01487 
01488     if result_summary and not platform_filter:
01489         # Automation result summary
01490         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01491         pt = PrettyTable(test_id_cols)
01492         pt.align['automated'] = "r"
01493         pt.align['all'] = "r"
01494         pt.align['percent [%]'] = "r"
01495 
01496         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01497         str_progress = progress_bar(percent_progress, 75)
01498         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01499         result += "Automation coverage:\n"
01500         result += pt.get_string()
01501         result += "\n\n"
01502 
01503         # Test automation coverage table print
01504         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01505         pt = PrettyTable(test_id_cols)
01506         pt.align['id'] = "l"
01507         pt.align['automated'] = "r"
01508         pt.align['all'] = "r"
01509         pt.align['percent [%]'] = "r"
01510         for unique_id in unique_test_id:
01511             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01512             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01513             str_progress = progress_bar(percent_progress, 75)
01514             row = [unique_id,
01515                    counter_dict_test_id_types[unique_id],
01516                    counter_dict_test_id_types_all[unique_id],
01517                    percent_progress,
01518                    "[" + str_progress + "]"]
01519             pt.add_row(row)
01520         result += "Test automation coverage:\n"
01521         result += pt.get_string()
01522         result += "\n\n"
01523     return result
01524 
01525 
01526 def progress_bar (percent_progress, saturation=0):
01527     """ This function creates progress bar with optional simple saturation mark
01528     """
01529     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01530     str_progress = '#' * step + '.' * int(50 - step)
01531     c = '!' if str_progress[38] == '.' else '|'
01532     if saturation > 0:
01533         saturation = saturation / 2
01534         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01535     return str_progress
01536 
01537 
01538 def singletest_in_cli_mode (single_test):
01539     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01540 
01541         @return returns success code (0 == success) for building and running tests
01542     """
01543     start = time()
01544     # Execute tests depending on options and filter applied
01545     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01546     elapsed_time = time() - start
01547 
01548     # Human readable summary
01549     if not single_test.opts_suppress_summary:
01550         # prints well-formed summary with results (SQL table like)
01551         print(single_test.generate_test_summary(test_summary, shuffle_seed))
01552     if single_test.opts_test_x_toolchain_summary:
01553         # prints well-formed summary with results (SQL table like)
01554         # table shows text x toolchain test result matrix
01555         print(single_test.generate_test_summary_by_target(test_summary,
01556                                                           shuffle_seed))
01557 
01558     print("Completed in %.2f sec" % elapsed_time)
01559     print
01560     # Write summary of the builds
01561 
01562     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01563     status = print_report_exporter.report(build_report)
01564 
01565     # Store extra reports in files
01566     if single_test.opts_report_html_file_name:
01567         # Export results in form of HTML report to separate file
01568         report_exporter = ReportExporter(ResultExporterType.HTML)
01569         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01570     if single_test.opts_report_junit_file_name:
01571         # Export results in form of JUnit XML report to separate file
01572         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01573         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01574     if single_test.opts_report_text_file_name:
01575         # Export results in form of a text file
01576         report_exporter = ReportExporter(ResultExporterType.TEXT)
01577         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01578     if single_test.opts_report_build_file_name:
01579         # Export build results as html report to sparate file
01580         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01581         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01582 
01583     # Returns True if no build failures of the test projects or their dependencies
01584     return status
01585 
01586 class TestLogger ():
01587     """ Super-class for logging and printing ongoing events for test suite pass
01588     """
01589     def __init__ (self, store_log=True):
01590         """ We can control if logger actually stores log in memory
01591             or just handled all log entries immediately
01592         """
01593         self.log  = []
01594         self.log_to_file  = False
01595         self.log_file_name  = None
01596         self.store_log  = store_log
01597 
01598         self.LogType  = construct_enum(INFO='Info',
01599                                       WARN='Warning',
01600                                       NOTIF='Notification',
01601                                       ERROR='Error',
01602                                       EXCEPT='Exception')
01603 
01604         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01605                                             APPEND=2)    # Append to existing log file
01606 
01607     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01608         """ Log one line of text
01609         """
01610         log_timestamp = time()
01611         log_entry = {'log_type' : LogType,
01612                      'log_timestamp' : log_timestamp,
01613                      'log_line' : log_line,
01614                      '_future' : None
01615         }
01616         # Store log in memory
01617         if self.store_log :
01618             self.log .append(log_entry)
01619         return log_entry
01620 
01621 
01622 class CLITestLogger (TestLogger ):
01623     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01624     """
01625     def __init__(self, store_log=True, file_name=None):
01626         TestLogger.__init__(self)
01627         self.log_file_name  = file_name
01628         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01629         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01630 
01631     def log_print (self, log_entry, timestamp=True):
01632         """ Prints on screen formatted log entry
01633         """
01634         ts = log_entry['log_timestamp']
01635         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01636         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01637         return timestamp_str + log_line_str
01638 
01639     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01640         """ Logs line, if log file output was specified log line will be appended
01641             at the end of log file
01642         """
01643         log_entry = TestLogger.log_line(self, LogType, log_line)
01644         log_line_str = self.log_print (log_entry, timestamp)
01645         if self.log_file_name  is not None:
01646             try:
01647                 with open(self.log_file_name , 'a') as f:
01648                     f.write(log_line_str + line_delim)
01649             except IOError:
01650                 pass
01651         return log_line_str
01652 
01653 
01654 def factory_db_logger (db_url):
01655     """ Factory database driver depending on database type supplied in database connection string db_url
01656     """
01657     if db_url is not None:
01658         from tools.test_mysql import MySQLDBAccess
01659         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01660         if connection_info is not None:
01661             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01662             if db_type == 'mysql':
01663                 return MySQLDBAccess()
01664     return None
01665 
01666 
01667 def detect_database_verbose (db_url):
01668     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01669     """
01670     result = BaseDBAccess().parse_db_connection_string(db_url)
01671     if result is not None:
01672         # Parsing passed
01673         (db_type, username, password, host, db_name) = result
01674         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01675         # Let's try to connect
01676         db_ = factory_db_logger(db_url)
01677         if db_ is not None:
01678             print("Connecting to database '%s'..." % db_url)
01679             db_.connect(host, username, password, db_name)
01680             if db_.is_connected():
01681                 print("ok")
01682                 print("Detecting database...")
01683                 print(db_.detect_database(verbose=True))
01684                 print("Disconnecting...")
01685                 db_.disconnect()
01686                 print("done")
01687         else:
01688             print("Database type '%s' unknown" % db_type)
01689     else:
01690         print("Parse error: '%s' - DB Url error" % db_url)
01691 
01692 
01693 def get_module_avail (module_name):
01694     """ This function returns True if module_name is already imported module
01695     """
01696     return module_name in sys.modules.keys()
01697 
01698 def get_autodetected_MUTS_list(platform_name_filter=None):
01699     oldError = None
01700     if os.name == 'nt':
01701         # Disable Windows error box temporarily
01702         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01703 
01704     mbeds = mbed_lstools.create()
01705     detect_muts_list = mbeds.list_mbeds()
01706 
01707     if os.name == 'nt':
01708         ctypes.windll.kernel32.SetErrorMode(oldError)
01709 
01710     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01711 
01712 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01713     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01714         If function fails to auto-detect devices it will return empty dictionary.
01715 
01716         if get_module_avail('mbed_lstools'):
01717             mbeds = mbed_lstools.create()
01718             mbeds_list = mbeds.list_mbeds()
01719 
01720         @param mbeds_list list of mbeds captured from mbed_lstools
01721         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01722     """
01723     result = {}   # Should be in muts_all.json format
01724     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01725     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01726     index = 1
01727     for mut in mbeds_list:
01728         # Filter the MUTS if a filter is specified
01729 
01730         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01731             continue
01732 
01733         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01734         # if not we  are creating our own unique value (last few chars from platform's target_id).
01735         m = {'mcu': mut['platform_name'],
01736              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01737              'port': mut['serial_port'],
01738              'disk': mut['mount_point'],
01739              'peripherals': []     # No peripheral detection
01740              }
01741         if index not in result:
01742             result[index] = {}
01743         result[index] = m
01744         index += 1
01745     return result
01746 
01747 
01748 def get_autodetected_TEST_SPEC (mbeds_list,
01749                                use_default_toolchain=True,
01750                                use_supported_toolchains=False,
01751                                toolchain_filter=None,
01752                                platform_name_filter=None):
01753     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01754         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01755 
01756         use_default_toolchain - if True add default toolchain to test_spec
01757         use_supported_toolchains - if True add all supported toolchains to test_spec
01758         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01759     """
01760     result = {'targets': {} }
01761 
01762     for mut in mbeds_list:
01763         mcu = mut['mcu']
01764         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01765             if mcu in TARGET_MAP:
01766                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01767                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01768 
01769                 # Decide which toolchains should be added to test specification toolchain pool for each target
01770                 toolchains = []
01771                 if use_default_toolchain:
01772                     toolchains.append(default_toolchain)
01773                 if use_supported_toolchains:
01774                     toolchains += supported_toolchains
01775                 if toolchain_filter is not None:
01776                     all_toolchains = supported_toolchains + [default_toolchain]
01777                     for toolchain in toolchain_filter:
01778                         if toolchain in all_toolchains:
01779                             toolchains.append(toolchain)
01780 
01781                 result['targets'][mcu] = list(set(toolchains))
01782     return result
01783 
01784 
01785 def get_default_test_options_parser ():
01786     """ Get common test script options used by CLI, web services etc.
01787     """
01788     parser = argparse.ArgumentParser()
01789     parser.add_argument('-i', '--tests',
01790                         dest='test_spec_filename',
01791                         metavar="FILE",
01792                         type=argparse_filestring_type,
01793                         help='Points to file with test specification')
01794 
01795     parser.add_argument('-M', '--MUTS',
01796                         dest='muts_spec_filename',
01797                         metavar="FILE",
01798                         type=argparse_filestring_type,
01799                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01800 
01801     parser.add_argument("-j", "--jobs",
01802                         dest='jobs',
01803                         metavar="NUMBER",
01804                         type=int,
01805                         help="Define number of compilation jobs. Default value is 1")
01806 
01807     if get_module_avail('mbed_lstools'):
01808         # Additional features available when mbed_lstools is installed on host and imported
01809         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01810         parser.add_argument('--auto',
01811                             dest='auto_detect',
01812                             action="store_true",
01813                             help='Use mbed-ls module to detect all connected mbed devices')
01814 
01815         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01816         parser.add_argument('--tc',
01817                             dest='toolchains_filter',
01818                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01819                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01820 
01821         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01822         parser.add_argument('--oper',
01823                             dest='operability_checks',
01824                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01825                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01826 
01827     parser.add_argument('--clean',
01828                         dest='clean',
01829                         action="store_true",
01830                         help='Clean the build directory')
01831 
01832     parser.add_argument('-P', '--only-peripherals',
01833                         dest='test_only_peripheral',
01834                         default=False,
01835                         action="store_true",
01836                         help='Test only peripheral declared for MUT and skip common tests')
01837 
01838     parser.add_argument("--profile", dest="profile", action="append",
01839                         type=argparse_filestring_type,
01840                         default=[])
01841 
01842     parser.add_argument('-C', '--only-commons',
01843                         dest='test_only_common',
01844                         default=False,
01845                         action="store_true",
01846                         help='Test only board internals. Skip perpherials tests and perform common tests')
01847 
01848     parser.add_argument('-n', '--test-by-names',
01849                         dest='test_by_names',
01850                         type=argparse_many(str),
01851                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01852 
01853     parser.add_argument('-p', '--peripheral-by-names',
01854                       dest='peripheral_by_names',
01855                       type=argparse_many(str),
01856                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01857 
01858     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01859     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01860 
01861     parser.add_argument('-c', '--copy-method',
01862                         dest='copy_method',
01863                         type=argparse_uppercase_type(copy_methods, "flash method"),
01864                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01865 
01866     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01867     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01868 
01869     parser.add_argument('-r', '--reset-type',
01870                         dest='mut_reset_type',
01871                         default=None,
01872                         type=argparse_uppercase_type(reset_methods, "reset method"),
01873                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01874 
01875     parser.add_argument('-g', '--goanna-for-tests',
01876                         dest='goanna_for_tests',
01877                         action="store_true",
01878                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01879 
01880     parser.add_argument('-G', '--goanna-for-sdk',
01881                         dest='goanna_for_mbed_sdk',
01882                         action="store_true",
01883                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01884 
01885     parser.add_argument('-s', '--suppress-summary',
01886                         dest='suppress_summary',
01887                         default=False,
01888                         action="store_true",
01889                         help='Suppresses display of wellformatted table with test results')
01890 
01891     parser.add_argument('-t', '--test-summary',
01892                         dest='test_x_toolchain_summary',
01893                         default=False,
01894                         action="store_true",
01895                         help='Displays wellformatted table with test x toolchain test result per target')
01896 
01897     parser.add_argument('-A', '--test-automation-report',
01898                         dest='test_automation_report',
01899                         default=False,
01900                         action="store_true",
01901                         help='Prints information about all tests and exits')
01902 
01903     parser.add_argument('-R', '--test-case-report',
01904                         dest='test_case_report',
01905                         default=False,
01906                         action="store_true",
01907                         help='Prints information about all test cases and exits')
01908 
01909     parser.add_argument("-S", "--supported-toolchains",
01910                         action="store_true",
01911                         dest="supported_toolchains",
01912                         default=False,
01913                         help="Displays supported matrix of MCUs and toolchains")
01914 
01915     parser.add_argument("-O", "--only-build",
01916                         action="store_true",
01917                         dest="only_build_tests",
01918                         default=False,
01919                         help="Only build tests, skips actual test procedures (flashing etc.)")
01920 
01921     parser.add_argument('--parallel',
01922                         dest='parallel_test_exec',
01923                         default=False,
01924                         action="store_true",
01925                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01926 
01927     parser.add_argument('--config',
01928                         dest='verbose_test_configuration_only',
01929                         default=False,
01930                         action="store_true",
01931                         help='Displays full test specification and MUTs configration and exits')
01932 
01933     parser.add_argument('--loops',
01934                         dest='test_loops_list',
01935                         type=argparse_many(str),
01936                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01937 
01938     parser.add_argument('--global-loops',
01939                         dest='test_global_loops_value',
01940                         type=int,
01941                         help='Set global number of test loops per test. Default value is set 1')
01942 
01943     parser.add_argument('--consolidate-waterfall',
01944                         dest='consolidate_waterfall_test',
01945                         default=False,
01946                         action="store_true",
01947                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01948 
01949     parser.add_argument('-W', '--waterfall',
01950                         dest='waterfall_test',
01951                         default=False,
01952                         action="store_true",
01953                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01954 
01955     parser.add_argument('-N', '--firmware-name',
01956                         dest='firmware_global_name',
01957                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01958 
01959     parser.add_argument('-u', '--shuffle',
01960                         dest='shuffle_test_order',
01961                         default=False,
01962                         action="store_true",
01963                         help='Shuffles test execution order')
01964 
01965     parser.add_argument('--shuffle-seed',
01966                         dest='shuffle_test_seed',
01967                         default=None,
01968                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01969 
01970     parser.add_argument('-f', '--filter',
01971                         dest='general_filter_regex',
01972                         type=argparse_many(str),
01973                         default=None,
01974                         help='For some commands you can use filter to filter out results')
01975 
01976     parser.add_argument('--inc-timeout',
01977                         dest='extend_test_timeout',
01978                         metavar="NUMBER",
01979                         type=int,
01980                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01981 
01982     parser.add_argument('--db',
01983                         dest='db_url',
01984                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01985 
01986     parser.add_argument('-l', '--log',
01987                         dest='log_file_name',
01988                         help='Log events to external file (note not all console entries may be visible in log file)')
01989 
01990     parser.add_argument('--report-html',
01991                         dest='report_html_file_name',
01992                         help='You can log test suite results in form of HTML report')
01993 
01994     parser.add_argument('--report-junit',
01995                         dest='report_junit_file_name',
01996                         help='You can log test suite results in form of JUnit compliant XML report')
01997 
01998     parser.add_argument("--report-build",
01999                         dest="report_build_file_name",
02000                         help="Output the build results to a junit xml file")
02001 
02002     parser.add_argument("--report-text",
02003                         dest="report_text_file_name",
02004                         help="Output the build results to a text file")
02005 
02006     parser.add_argument('--verbose-skipped',
02007                         dest='verbose_skipped_tests',
02008                         default=False,
02009                         action="store_true",
02010                         help='Prints some extra information about skipped tests')
02011 
02012     parser.add_argument('-V', '--verbose-test-result',
02013                         dest='verbose_test_result_only',
02014                         default=False,
02015                         action="store_true",
02016                         help='Prints test serial output')
02017 
02018     parser.add_argument('-v', '--verbose',
02019                         dest='verbose',
02020                         default=False,
02021                         action="store_true",
02022                         help='Verbose mode (prints some extra information)')
02023 
02024     parser.add_argument('--version',
02025                         dest='version',
02026                         default=False,
02027                         action="store_true",
02028                         help='Prints script version and exits')
02029 
02030     parser.add_argument('--stats-depth',
02031                         dest='stats_depth',
02032                         default=2,
02033                         type=int,
02034                         help="Depth level for static memory report")
02035     return parser
02036 
02037 def test_path_to_name (path, base):
02038     """Change all slashes in a path into hyphens
02039     This creates a unique cross-platform test name based on the path
02040     This can eventually be overriden by a to-be-determined meta-data mechanism"""
02041     name_parts = []
02042     head, tail = os.path.split(relpath(path,base))
02043     while (tail and tail != "."):
02044         name_parts.insert(0, tail)
02045         head, tail = os.path.split(head)
02046 
02047     return "-".join(name_parts).lower()
02048 
02049 def get_test_config (config_name, target_name):
02050     """Finds the path to a test configuration file
02051     config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
02052     target_name: name of target to determing if mbed OS interface given is valid
02053     returns path to config, will return None if no valid config is found
02054     """
02055     # If they passed in a full path
02056     if exists(config_name):
02057         # This is a module config
02058         return config_name
02059     # Otherwise find the path to configuration file based on mbed OS interface
02060     return TestConfig.get_config_path(config_name, target_name)
02061 
02062 def find_tests (base_dir, target_name, toolchain_name, app_config=None):
02063     """ Finds all tests in a directory recursively
02064     base_dir: path to the directory to scan for tests (ex. 'path/to/project')
02065     target_name: name of the target to use for scanning (ex. 'K64F')
02066     toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
02067     options: Compile options to pass to the toolchain (ex. ['debug-info'])
02068     app_config - location of a chosen mbed_app.json file
02069 
02070     returns a dictionary where keys are the test name, and the values are
02071     lists of paths needed to biuld the test.
02072     """
02073 
02074     # Temporary structure: tests referenced by (name, base, group, case) tuple
02075     tests = {}
02076     # List of common folders: (predicate function, path) tuple
02077     commons = []
02078 
02079     # Prepare the toolchain
02080     toolchain = prepare_toolchain([base_dir], None, target_name, toolchain_name,
02081                                   silent=True, app_config=app_config)
02082 
02083     # Scan the directory for paths to probe for 'TESTS' folders
02084     base_resources = scan_resources([base_dir], toolchain)
02085 
02086     dirs = base_resources.inc_dirs
02087     for directory in dirs:
02088         subdirs = os.listdir(directory)
02089 
02090         # If the directory contains a subdirectory called 'TESTS', scan it for test cases
02091         if 'TESTS' in subdirs:
02092             walk_base_dir = join(directory, 'TESTS')
02093             test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
02094 
02095             # Loop through all subdirectories
02096             for d in test_resources.inc_dirs:
02097 
02098                 # If the test case folder is not called 'host_tests' or 'COMMON' and it is
02099                 # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
02100                 # then add it to the tests
02101                 relative_path = relpath(d, walk_base_dir)
02102                 relative_path_parts = os.path.normpath(relative_path).split(os.sep)
02103                 if len(relative_path_parts) == 2:
02104                     test_group_directory_path, test_case_directory = os.path.split(d)
02105                     test_group_directory = os.path.basename(test_group_directory_path)
02106 
02107                     # Check to make sure discoverd folder is not in a host test directory or common directory
02108                     special_dirs = ['host_tests', 'COMMON']
02109                     if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
02110                         test_name = test_path_to_name(d, base_dir)
02111                         tests[(test_name, walk_base_dir, test_group_directory, test_case_directory)] = [d]
02112 
02113                 # Also find any COMMON paths, we'll add these later once we find all the base tests
02114                 if 'COMMON' in relative_path_parts:
02115                     if relative_path_parts[0] != 'COMMON':
02116                         def predicate(base_pred, group_pred, (name, base, group, case)):
02117                             return base == base_pred and group == group_pred
02118                         commons.append((functools.partial(predicate, walk_base_dir, relative_path_parts[0]), d))
02119                     else:
02120                         def predicate(base_pred, (name, base, group, case)):
02121                             return base == base_pred
02122                         commons.append((functools.partial(predicate, walk_base_dir), d))
02123 
02124     # Apply common directories
02125     for pred, path in commons:
02126         for test_identity, test_paths in tests.iteritems():
02127             if pred(test_identity):
02128                 test_paths.append(path)
02129 
02130     # Drop identity besides name
02131     return {name: paths for (name, _, _, _), paths in tests.iteritems()}
02132 
02133 def print_tests (tests, format="list", sort=True):
02134     """Given a dictionary of tests (as returned from "find_tests"), print them
02135     in the specified format"""
02136     if format == "list":
02137         for test_name in sorted(tests.keys()):
02138             test_path = tests[test_name][0]
02139             print("Test Case:")
02140             print("    Name: %s" % test_name)
02141             print("    Path: %s" % test_path)
02142     elif format == "json":
02143         print(json.dumps({test_name: test_path[0] for test_name, test_paths
02144                           in tests}, indent=2))
02145     else:
02146         print("Unknown format '%s'" % format)
02147         sys.exit(1)
02148 
02149 def norm_relative_path (path, start):
02150     """This function will create a normalized, relative path. It mimics the
02151     python os.path.relpath function, but also normalizes a Windows-syle path
02152     that use backslashes to a Unix style path that uses forward slashes."""
02153     path = os.path.normpath(path)
02154     path = os.path.relpath(path, start)
02155     path = path.replace("\\", "/")
02156     return path
02157 
02158 
02159 def build_test_worker (*args, **kwargs):
02160     """This is a worker function for the parallel building of tests. The `args`
02161     and `kwargs` are passed directly to `build_project`. It returns a dictionary
02162     with the following structure:
02163 
02164     {
02165         'result': `True` if no exceptions were thrown, `False` otherwise
02166         'reason': Instance of exception that was thrown on failure
02167         'bin_file': Path to the created binary if `build_project` was
02168                     successful. Not present otherwise
02169         'kwargs': The keyword arguments that were passed to `build_project`.
02170                   This includes arguments that were modified (ex. report)
02171     }
02172     """
02173     bin_file = None
02174     ret = {
02175         'result': False,
02176         'args': args,
02177         'kwargs': kwargs
02178     }
02179 
02180     # Use parent TOOLCHAIN_PATHS variable
02181     for key, value in kwargs['toolchain_paths'].items():
02182         TOOLCHAIN_PATHS[key] = value
02183 
02184     del kwargs['toolchain_paths']
02185 
02186     try:
02187         bin_file = build_project(*args, **kwargs)
02188         ret['result'] = True
02189         ret['bin_file'] = bin_file
02190         ret['kwargs'] = kwargs
02191 
02192     except NotSupportedException as e:
02193         ret['reason'] = e
02194     except ToolException as e:
02195         ret['reason'] = e
02196     except KeyboardInterrupt as e:
02197         ret['reason'] = e
02198     except:
02199         # Print unhandled exceptions here
02200         import traceback
02201         traceback.print_exc(file=sys.stdout)
02202 
02203     return ret
02204 
02205 
02206 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02207                 clean=False, notify=None, verbose=False, jobs=1, macros=None,
02208                 silent=False, report=None, properties=None,
02209                 continue_on_build_fail=False, app_config=None,
02210                 build_profile=None, stats_depth=None):
02211     """Given the data structure from 'find_tests' and the typical build parameters,
02212     build all the tests
02213 
02214     Returns a tuple of the build result (True or False) followed by the test
02215     build data structure"""
02216 
02217     execution_directory = "."
02218     base_path = norm_relative_path(build_path, execution_directory)
02219 
02220     target_name = target.name if isinstance(target, Target) else target
02221     cfg, _, _ = get_config(base_source_paths, target_name, toolchain_name)
02222 
02223     baud_rate = 9600
02224     if 'platform.stdio-baud-rate' in cfg:
02225         baud_rate = cfg['platform.stdio-baud-rate'].value
02226 
02227     test_build = {
02228         "platform": target_name,
02229         "toolchain": toolchain_name,
02230         "base_path": base_path,
02231         "baud_rate": baud_rate,
02232         "binary_type": "bootable",
02233         "tests": {}
02234     }
02235 
02236     result = True
02237 
02238     jobs_count = int(jobs if jobs else cpu_count())
02239     p = Pool(processes=jobs_count)
02240     results = []
02241     for test_name, test_paths in tests.items():
02242         if not isinstance(test_paths, list):
02243             test_paths = [test_paths]
02244 
02245         test_build_path = os.path.join(build_path, test_paths[0])
02246         src_paths = base_source_paths + test_paths
02247         bin_file = None
02248         test_case_folder_name = os.path.basename(test_paths[0])
02249 
02250         args = (src_paths, test_build_path, target, toolchain_name)
02251         kwargs = {
02252             'jobs': 1,
02253             'clean': clean,
02254             'macros': macros,
02255             'name': test_case_folder_name,
02256             'project_id': test_name,
02257             'report': report,
02258             'properties': properties,
02259             'verbose': verbose,
02260             'app_config': app_config,
02261             'build_profile': build_profile,
02262             'silent': True,
02263             'toolchain_paths': TOOLCHAIN_PATHS,
02264             'stats_depth': stats_depth
02265         }
02266 
02267         results.append(p.apply_async(build_test_worker, args, kwargs))
02268 
02269     p.close()
02270     result = True
02271     itr = 0
02272     while len(results):
02273         itr += 1
02274         if itr > 360000:
02275             p.terminate()
02276             p.join()
02277             raise ToolException("Compile did not finish in 10 minutes")
02278         else:
02279             sleep(0.01)
02280             pending = 0
02281             for r in results:
02282                 if r.ready() is True:
02283                     try:
02284                         worker_result = r.get()
02285                         results.remove(r)
02286 
02287                         # Take report from the kwargs and merge it into existing report
02288                         if report:
02289                             report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
02290                             for test_key in report_entry.keys():
02291                                 report[target_name][toolchain_name][test_key] = report_entry[test_key]
02292 
02293                         # Set the overall result to a failure if a build failure occurred
02294                         if ('reason' in worker_result and
02295                             not worker_result['reason'] and
02296                             not isinstance(worker_result['reason'], NotSupportedException)):
02297                             result = False
02298                             break
02299 
02300                         # Adding binary path to test build result
02301                         if ('result' in worker_result and
02302                             worker_result['result'] and
02303                             'bin_file' in worker_result):
02304                             bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
02305 
02306                             test_build['tests'][worker_result['kwargs']['project_id']] = {
02307                                 "binaries": [
02308                                     {
02309                                         "path": bin_file
02310                                     }
02311                                 ]
02312                             }
02313 
02314                             test_key = worker_result['kwargs']['project_id'].upper()
02315                             if report:
02316                                 print(report[target_name][toolchain_name][test_key][0][0]['output'].rstrip())
02317                             print('Image: %s\n' % bin_file)
02318 
02319                     except:
02320                         if p._taskqueue.queue:
02321                             p._taskqueue.queue.clear()
02322                             sleep(0.5)
02323                         p.terminate()
02324                         p.join()
02325                         raise
02326                 else:
02327                     pending += 1
02328                     if pending >= jobs_count:
02329                         break
02330 
02331             # Break as soon as possible if there is a failure and we are not
02332             # continuing on build failures
02333             if not result and not continue_on_build_fail:
02334                 if p._taskqueue.queue:
02335                     p._taskqueue.queue.clear()
02336                     sleep(0.5)
02337                 p.terminate()
02338                 break
02339 
02340     p.join()
02341 
02342     test_builds = {}
02343     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02344 
02345     return result, test_builds
02346 
02347 
02348 def test_spec_from_test_builds(test_builds):
02349     return {
02350         "builds": test_builds
02351     }