the other jimmy / mbed-sdk-tools

Fork of mbed-sdk-tools by mbed official

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_api.py Source File

test_api.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2014 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 
00017 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
00018 """
00019 
00020 import os
00021 import re
00022 import sys
00023 import json
00024 import uuid
00025 import pprint
00026 import random
00027 import argparse
00028 import datetime
00029 import threading
00030 import ctypes
00031 from types import ListType
00032 from colorama import Fore, Back, Style
00033 from prettytable import PrettyTable
00034 from copy import copy
00035 
00036 from time import sleep, time
00037 from Queue import Queue, Empty
00038 from os.path import join, exists, basename, relpath
00039 from threading import Thread, Lock
00040 from multiprocessing import Pool, cpu_count
00041 from subprocess import Popen, PIPE
00042 
00043 # Imports related to mbed build api
00044 from tools.tests import TESTS
00045 from tools.tests import TEST_MAP
00046 from tools.paths import BUILD_DIR
00047 from tools.paths import HOST_TESTS
00048 from tools.utils import ToolException
00049 from tools.utils import NotSupportedException
00050 from tools.utils import construct_enum
00051 from tools.memap import MemapParser
00052 from tools.targets import TARGET_MAP
00053 from tools.test_db import BaseDBAccess
00054 from tools.build_api import build_project, build_mbed_libs, build_lib
00055 from tools.build_api import get_target_supported_toolchains
00056 from tools.build_api import write_build_report
00057 from tools.build_api import prep_report
00058 from tools.build_api import prep_properties
00059 from tools.build_api import create_result
00060 from tools.build_api import add_result_to_report
00061 from tools.build_api import prepare_toolchain
00062 from tools.build_api import scan_resources
00063 from tools.build_api import get_config
00064 from tools.libraries import LIBRARIES, LIBRARY_MAP
00065 from tools.options import extract_profile
00066 from tools.toolchains import TOOLCHAIN_PATHS
00067 from tools.toolchains import TOOLCHAINS
00068 from tools.test_exporters import ReportExporter, ResultExporterType
00069 from tools.utils import argparse_filestring_type
00070 from tools.utils import argparse_uppercase_type
00071 from tools.utils import argparse_lowercase_type
00072 from tools.utils import argparse_many
00073 from tools.utils import get_path_depth
00074 
00075 import tools.host_tests.host_tests_plugins as host_tests_plugins
00076 
00077 try:
00078     import mbed_lstools
00079     from tools.compliance.ioper_runner import get_available_oper_test_scopes
00080 except:
00081     pass
00082 
00083 
00084 class ProcessObserver(Thread):
00085     def __init__(self, proc):
00086         Thread.__init__(self)
00087         self.proc = proc
00088         self.queue = Queue()
00089         self.daemon = True
00090         self.active = True
00091         self.start()
00092 
00093     def run(self):
00094         while self.active:
00095             c = self.proc.stdout.read(1)
00096             self.queue.put(c)
00097 
00098     def stop(self):
00099         self.active = False
00100         try:
00101             self.proc.terminate()
00102         except Exception, _:
00103             pass
00104 
00105 
00106 class SingleTestExecutor (threading.Thread):
00107     """ Example: Single test class in separate thread usage
00108     """
00109     def __init__(self, single_test):
00110         self.single_test  = single_test
00111         threading.Thread.__init__(self)
00112 
00113     def run(self):
00114         start = time()
00115         # Execute tests depending on options and filter applied
00116         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test .execute()
00117         elapsed_time = time() - start
00118 
00119         # Human readable summary
00120         if not self.single_test .opts_suppress_summary:
00121             # prints well-formed summary with results (SQL table like)
00122             print self.single_test .generate_test_summary(test_summary, shuffle_seed)
00123         if self.single_test .opts_test_x_toolchain_summary:
00124             # prints well-formed summary with results (SQL table like)
00125             # table shows text x toolchain test result matrix
00126             print self.single_test .generate_test_summary_by_target(test_summary, shuffle_seed)
00127         print "Completed in %.2f sec"% (elapsed_time)
00128 
00129 
00130 class SingleTestRunner (object):
00131     """ Object wrapper for single test run which may involve multiple MUTs
00132     """
00133     RE_DETECT_TESTCASE_RESULT = None
00134 
00135     # Return codes for test script
00136     TEST_RESULT_OK = "OK"
00137     TEST_RESULT_FAIL = "FAIL"
00138     TEST_RESULT_ERROR = "ERROR"
00139     TEST_RESULT_UNDEF = "UNDEF"
00140     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
00141     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
00142     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
00143     TEST_RESULT_TIMEOUT = "TIMEOUT"
00144     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
00145     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
00146     TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
00147     TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
00148 
00149     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
00150     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
00151     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
00152 
00153     muts = {} # MUTs descriptor (from external file)
00154     test_spec = {} # Test specification (from external file)
00155 
00156     # mbed test suite -> SingleTestRunner
00157     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
00158                            "failure" : TEST_RESULT_FAIL,
00159                            "error" : TEST_RESULT_ERROR,
00160                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
00161                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
00162                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
00163                            "timeout" : TEST_RESULT_TIMEOUT,
00164                            "no_image" : TEST_RESULT_NO_IMAGE,
00165                            "end" : TEST_RESULT_UNDEF,
00166                            "mbed_assert" : TEST_RESULT_MBED_ASSERT,
00167                            "build_failed" : TEST_RESULT_BUILD_FAILED,
00168                            "not_supproted" : TEST_RESULT_NOT_SUPPORTED
00169     }
00170 
00171     def __init__ (self,
00172                  _global_loops_count=1,
00173                  _test_loops_list=None,
00174                  _muts={},
00175                  _clean=False,
00176                  _parser=None,
00177                  _opts=None,
00178                  _opts_db_url=None,
00179                  _opts_log_file_name=None,
00180                  _opts_report_html_file_name=None,
00181                  _opts_report_junit_file_name=None,
00182                  _opts_report_build_file_name=None,
00183                  _opts_report_text_file_name=None,
00184                  _opts_build_report={},
00185                  _opts_build_properties={},
00186                  _test_spec={},
00187                  _opts_goanna_for_mbed_sdk=None,
00188                  _opts_goanna_for_tests=None,
00189                  _opts_shuffle_test_order=False,
00190                  _opts_shuffle_test_seed=None,
00191                  _opts_test_by_names=None,
00192                  _opts_peripheral_by_names=None,
00193                  _opts_test_only_peripheral=False,
00194                  _opts_test_only_common=False,
00195                  _opts_verbose_skipped_tests=False,
00196                  _opts_verbose_test_result_only=False,
00197                  _opts_verbose=False,
00198                  _opts_firmware_global_name=None,
00199                  _opts_only_build_tests=False,
00200                  _opts_parallel_test_exec=False,
00201                  _opts_suppress_summary=False,
00202                  _opts_test_x_toolchain_summary=False,
00203                  _opts_copy_method=None,
00204                  _opts_mut_reset_type=None,
00205                  _opts_jobs=None,
00206                  _opts_waterfall_test=None,
00207                  _opts_consolidate_waterfall_test=None,
00208                  _opts_extend_test_timeout=None,
00209                  _opts_auto_detect=None,
00210                  _opts_include_non_automated=False):
00211         """ Let's try hard to init this object
00212         """
00213         from colorama import init
00214         init()
00215 
00216         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING .keys()) + ")\\}"
00217         self.RE_DETECT_TESTCASE_RESULT  = re.compile(PATTERN)
00218         # Settings related to test loops counters
00219         try:
00220             _global_loops_count = int(_global_loops_count)
00221         except:
00222             _global_loops_count = 1
00223         if _global_loops_count < 1:
00224             _global_loops_count = 1
00225         self.GLOBAL_LOOPS_COUNT  = _global_loops_count
00226         self.TEST_LOOPS_LIST  = _test_loops_list if _test_loops_list else []
00227         self.TEST_LOOPS_DICT  = self.test_loop_list_to_dict (_test_loops_list)
00228 
00229         self.shuffle_random_seed  = 0.0
00230         self.SHUFFLE_SEED_ROUND  = 10
00231 
00232         # MUT list and test specification storage
00233         self.muts  = _muts
00234         self.test_spec  = _test_spec
00235 
00236         # Settings passed e.g. from command line
00237         self.opts_db_url  = _opts_db_url
00238         self.opts_log_file_name  = _opts_log_file_name
00239         self.opts_report_html_file_name  = _opts_report_html_file_name
00240         self.opts_report_junit_file_name  = _opts_report_junit_file_name
00241         self.opts_report_build_file_name  = _opts_report_build_file_name
00242         self.opts_report_text_file_name  = _opts_report_text_file_name
00243         self.opts_goanna_for_mbed_sdk  = _opts_goanna_for_mbed_sdk
00244         self.opts_goanna_for_tests  = _opts_goanna_for_tests
00245         self.opts_shuffle_test_order  = _opts_shuffle_test_order
00246         self.opts_shuffle_test_seed  = _opts_shuffle_test_seed
00247         self.opts_test_by_names  = _opts_test_by_names
00248         self.opts_peripheral_by_names  = _opts_peripheral_by_names
00249         self.opts_test_only_peripheral  = _opts_test_only_peripheral
00250         self.opts_test_only_common  = _opts_test_only_common
00251         self.opts_verbose_skipped_tests  = _opts_verbose_skipped_tests
00252         self.opts_verbose_test_result_only  = _opts_verbose_test_result_only
00253         self.opts_verbose  = _opts_verbose
00254         self.opts_firmware_global_name  = _opts_firmware_global_name
00255         self.opts_only_build_tests  = _opts_only_build_tests
00256         self.opts_parallel_test_exec  = _opts_parallel_test_exec
00257         self.opts_suppress_summary  = _opts_suppress_summary
00258         self.opts_test_x_toolchain_summary  = _opts_test_x_toolchain_summary
00259         self.opts_copy_method  = _opts_copy_method
00260         self.opts_mut_reset_type  = _opts_mut_reset_type
00261         self.opts_jobs  = _opts_jobs if _opts_jobs is not None else 1
00262         self.opts_waterfall_test  = _opts_waterfall_test
00263         self.opts_consolidate_waterfall_test  = _opts_consolidate_waterfall_test
00264         self.opts_extend_test_timeout  = _opts_extend_test_timeout
00265         self.opts_clean  = _clean
00266         self.opts_parser  = _parser
00267         self.opts  = _opts
00268         self.opts_auto_detect  = _opts_auto_detect
00269         self.opts_include_non_automated  = _opts_include_non_automated
00270 
00271         self.build_report  = _opts_build_report
00272         self.build_properties  = _opts_build_properties
00273 
00274         # File / screen logger initialization
00275         self.logger  = CLITestLogger(file_name=self.opts_log_file_name )  # Default test logger
00276 
00277         # Database related initializations
00278         self.db_logger  = factory_db_logger(self.opts_db_url )
00279         self.db_logger_build_id  = None # Build ID (database index of build_id table)
00280         # Let's connect to database to set up credentials and confirm database is ready
00281         if self.db_logger :
00282             self.db_logger .connect_url(self.opts_db_url ) # Save db access info inside db_logger object
00283             if self.db_logger .is_connected():
00284                 # Get hostname and uname so we can use it as build description
00285                 # when creating new build_id in external database
00286                 (_hostname, _uname) = self.db_logger .get_hostname()
00287                 _host_location = os.path.dirname(os.path.abspath(__file__))
00288                 build_id_type = None if self.opts_only_build_tests  is None else self.db_logger .BUILD_ID_TYPE_BUILD_ONLY
00289                 self.db_logger_build_id  = self.db_logger .get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
00290                 self.db_logger .disconnect()
00291 
00292     def dump_options (self):
00293         """ Function returns data structure with common settings passed to SingelTestRunner
00294             It can be used for example to fill _extra fields in database storing test suite single run data
00295             Example:
00296             data = self.dump_options()
00297             or
00298             data_str = json.dumps(self.dump_options())
00299         """
00300         result = {"db_url" : str(self.opts_db_url ),
00301                   "log_file_name" :  str(self.opts_log_file_name ),
00302                   "shuffle_test_order" : str(self.opts_shuffle_test_order ),
00303                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed ),
00304                   "test_by_names" :  str(self.opts_test_by_names ),
00305                   "peripheral_by_names" : str(self.opts_peripheral_by_names ),
00306                   "test_only_peripheral" :  str(self.opts_test_only_peripheral ),
00307                   "test_only_common" :  str(self.opts_test_only_common ),
00308                   "verbose" :  str(self.opts_verbose ),
00309                   "firmware_global_name" :  str(self.opts_firmware_global_name ),
00310                   "only_build_tests" :  str(self.opts_only_build_tests ),
00311                   "copy_method" :  str(self.opts_copy_method ),
00312                   "mut_reset_type" :  str(self.opts_mut_reset_type ),
00313                   "jobs" :  str(self.opts_jobs ),
00314                   "extend_test_timeout" :  str(self.opts_extend_test_timeout ),
00315                   "_dummy" : ''
00316         }
00317         return result
00318 
00319     def shuffle_random_func(self):
00320         return self.shuffle_random_seed 
00321 
00322     def is_shuffle_seed_float (self):
00323         """ return true if function parameter can be converted to float
00324         """
00325         result = True
00326         try:
00327             float(self.shuffle_random_seed )
00328         except ValueError:
00329             result = False
00330         return result
00331 
00332     # This will store target / toolchain specific properties
00333     test_suite_properties_ext = {}  # target : toolchain
00334     # Here we store test results
00335     test_summary = []
00336     # Here we store test results in extended data structure
00337     test_summary_ext = {}
00338     execute_thread_slice_lock = Lock()
00339 
00340     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
00341         for toolchain in toolchains:
00342             tt_id = "%s::%s" % (toolchain, target)
00343 
00344             T = TARGET_MAP[target]
00345 
00346             # print target, toolchain
00347             # Test suite properties returned to external tools like CI
00348             test_suite_properties = {
00349                 'jobs': self.opts_jobs ,
00350                 'clean': clean,
00351                 'target': target,
00352                 'vendor': T.extra_labels[0],
00353                 'test_ids': ', '.join(test_ids),
00354                 'toolchain': toolchain,
00355                 'shuffle_random_seed': self.shuffle_random_seed 
00356             }
00357 
00358 
00359             # print '=== %s::%s ===' % (target, toolchain)
00360             # Let's build our test
00361             if target not in TARGET_MAP:
00362                 print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
00363                 continue
00364 
00365             clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk  or clean or self.opts_clean  else None
00366 
00367             profile = extract_profile(self.opts_parser , self.opts , toolchain)
00368 
00369 
00370             try:
00371                 build_mbed_libs_result = build_mbed_libs(T,
00372                                                          toolchain,
00373                                                          clean=clean_mbed_libs_options,
00374                                                          verbose=self.opts_verbose ,
00375                                                          jobs=self.opts_jobs ,
00376                                                          report=build_report,
00377                                                          properties=build_properties,
00378                                                          build_profile=profile)
00379 
00380                 if not build_mbed_libs_result:
00381                     print self.logger .log_line(self.logger .LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
00382                     continue
00383 
00384             except ToolException:
00385                 print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
00386                 continue
00387 
00388             build_dir = join(BUILD_DIR, "test", target, toolchain)
00389 
00390             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
00391             test_suite_properties['build_dir'] = build_dir
00392             test_suite_properties['skipped'] = []
00393 
00394             # Enumerate through all tests and shuffle test order if requested
00395             test_map_keys = sorted(TEST_MAP.keys())
00396 
00397             if self.opts_shuffle_test_order :
00398                 random.shuffle(test_map_keys, self.shuffle_random_func )
00399                 # Update database with shuffle seed f applicable
00400                 if self.db_logger :
00401                     self.db_logger .reconnect();
00402                     if self.db_logger .is_connected():
00403                         self.db_logger .update_build_id_info(self.db_logger_build_id , _shuffle_seed=self.shuffle_random_func ())
00404                         self.db_logger .disconnect();
00405 
00406             if self.db_logger :
00407                 self.db_logger .reconnect();
00408                 if self.db_logger .is_connected():
00409                     # Update MUTs and Test Specification in database
00410                     self.db_logger .update_build_id_info(self.db_logger_build_id , _muts=self.muts , _test_spec=self.test_spec )
00411                     # Update Extra information in database (some options passed to test suite)
00412                     self.db_logger .update_build_id_info(self.db_logger_build_id , _extra=json.dumps(self.dump_options ()))
00413                     self.db_logger .disconnect();
00414 
00415             valid_test_map_keys = self.get_valid_tests (test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated )
00416             skipped_test_map_keys = self.get_skipped_tests (test_map_keys, valid_test_map_keys)
00417 
00418             for skipped_test_id in skipped_test_map_keys:
00419                 test_suite_properties['skipped'].append(skipped_test_id)
00420 
00421 
00422             # First pass through all tests and determine which libraries need to be built
00423             libraries = []
00424             for test_id in valid_test_map_keys:
00425                 test = TEST_MAP[test_id]
00426 
00427                 # Detect which lib should be added to test
00428                 # Some libs have to compiled like RTOS or ETH
00429                 for lib in LIBRARIES:
00430                     if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
00431                         libraries.append(lib['id'])
00432 
00433 
00434             clean_project_options = True if self.opts_goanna_for_tests  or clean or self.opts_clean  else None
00435 
00436             # Build all required libraries
00437             for lib_id in libraries:
00438                 try:
00439                     build_lib(lib_id,
00440                               T,
00441                               toolchain,
00442                               verbose=self.opts_verbose ,
00443                               clean=clean_mbed_libs_options,
00444                               jobs=self.opts_jobs ,
00445                               report=build_report,
00446                               properties=build_properties,
00447                               build_profile=profile)
00448 
00449                 except ToolException:
00450                     print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building library %s'% (lib_id))
00451                     continue
00452 
00453 
00454             for test_id in valid_test_map_keys:
00455                 test = TEST_MAP[test_id]
00456 
00457                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
00458 
00459                 # TODO: move this 2 below loops to separate function
00460                 INC_DIRS = []
00461                 for lib_id in libraries:
00462                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
00463                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
00464 
00465                 MACROS = []
00466                 for lib_id in libraries:
00467                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
00468                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
00469                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
00470                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
00471                 test_uuid = uuid.uuid4()
00472                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
00473 
00474                 # Prepare extended test results data structure (it can be used to generate detailed test report)
00475                 if target not in self.test_summary_ext :
00476                     self.test_summary_ext [target] = {}  # test_summary_ext : toolchain
00477                 if toolchain not in self.test_summary_ext [target]:
00478                     self.test_summary_ext [target][toolchain] = {}    # test_summary_ext : toolchain : target
00479 
00480                 tt_test_id = "%s::%s::%s" % (toolchain, target, test_id)    # For logging only
00481 
00482                 project_name = self.opts_firmware_global_name  if self.opts_firmware_global_name  else None
00483                 try:
00484                     path = build_project(test.source_dir,
00485                                      join(build_dir, test_id),
00486                                      T,
00487                                      toolchain,
00488                                      test.dependencies,
00489                                      clean=clean_project_options,
00490                                      verbose=self.opts_verbose ,
00491                                      name=project_name,
00492                                      macros=MACROS,
00493                                      inc_dirs=INC_DIRS,
00494                                      jobs=self.opts_jobs ,
00495                                      report=build_report,
00496                                      properties=build_properties,
00497                                      project_id=test_id,
00498                                      project_description=test.get_description(),
00499                                      build_profile=profile)
00500 
00501                 except Exception, e:
00502                     project_name_str = project_name if project_name is not None else test_id
00503 
00504 
00505                     test_result = self.TEST_RESULT_FAIL 
00506 
00507                     if isinstance(e, ToolException):
00508                         print self.logger .log_line(self.logger .LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
00509                         test_result = self.TEST_RESULT_BUILD_FAILED 
00510                     elif isinstance(e, NotSupportedException):
00511                         print self.logger .log_line(self.logger .LogType.INFO, 'The project %s is not supported'% (project_name_str))
00512                         test_result = self.TEST_RESULT_NOT_SUPPORTED 
00513 
00514 
00515                     # Append test results to global test summary
00516                     self.test_summary .append(
00517                         (test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
00518                     )
00519 
00520                     # Add detailed test result to test summary structure
00521                     if test_id not in self.test_summary_ext [target][toolchain]:
00522                         self.test_summary_ext [target][toolchain][test_id] = []
00523 
00524                     self.test_summary_ext [target][toolchain][test_id].append({ 0: {
00525                         'result' : test_result,
00526                         'output' : '',
00527                         'target_name' : target,
00528                         'target_name_unique': target,
00529                         'toolchain_name' : toolchain,
00530                         'id' : test_id,
00531                         'description' : test.get_description(),
00532                         'elapsed_time' : 0,
00533                         'duration' : 0,
00534                         'copy_method' : None
00535                     }})
00536                     continue
00537 
00538                 if self.opts_only_build_tests :
00539                     # With this option we are skipping testing phase
00540                     continue
00541 
00542                 # Test duration can be increased by global value
00543                 test_duration = test.duration
00544                 if self.opts_extend_test_timeout  is not None:
00545                     test_duration += self.opts_extend_test_timeout 
00546 
00547                 # For an automated test the duration act as a timeout after
00548                 # which the test gets interrupted
00549                 test_spec = self.shape_test_request (target, path, test_id, test_duration)
00550                 test_loops = self.get_test_loop_count (test_id)
00551 
00552                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
00553                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
00554                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
00555 
00556                 # read MUTs, test specification and perform tests
00557                 handle_results = self.handle (test_spec, target, toolchain, test_loops=test_loops)
00558 
00559                 if handle_results is None:
00560                     continue
00561 
00562                 for handle_result in handle_results:
00563                     if handle_result:
00564                         single_test_result, detailed_test_results = handle_result
00565                     else:
00566                         continue
00567 
00568                     # Append test results to global test summary
00569                     if single_test_result is not None:
00570                         self.test_summary .append(single_test_result)
00571 
00572                     # Add detailed test result to test summary structure
00573                     if target not in self.test_summary_ext [target][toolchain]:
00574                         if test_id not in self.test_summary_ext [target][toolchain]:
00575                             self.test_summary_ext [target][toolchain][test_id] = []
00576 
00577                         append_test_result = detailed_test_results
00578 
00579                         # If waterfall and consolidate-waterfall options are enabled,
00580                         # only include the last test result in the report.
00581                         if self.opts_waterfall_test  and self.opts_consolidate_waterfall_test :
00582                             append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
00583 
00584                         self.test_summary_ext [target][toolchain][test_id].append(append_test_result)
00585 
00586             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
00587             self.test_suite_properties_ext [target][toolchain] = test_suite_properties
00588 
00589         q.put(target + '_'.join(toolchains))
00590         return
00591 
00592     def execute(self):
00593         clean = self.test_spec .get('clean', False)
00594         test_ids = self.test_spec .get('test_ids', [])
00595         q = Queue()
00596 
00597         # Generate seed for shuffle if seed is not provided in
00598         self.shuffle_random_seed  = round(random.random(), self.SHUFFLE_SEED_ROUND )
00599         if self.opts_shuffle_test_seed  is not None and self.is_shuffle_seed_float ():
00600             self.shuffle_random_seed  = round(float(self.opts_shuffle_test_seed ), self.SHUFFLE_SEED_ROUND )
00601 
00602 
00603         if self.opts_parallel_test_exec :
00604             ###################################################################
00605             # Experimental, parallel test execution per singletest instance.
00606             ###################################################################
00607             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
00608             # Note: We are building here in parallel for each target separately!
00609             # So we are not building the same thing multiple times and compilers
00610             # in separate threads do not collide.
00611             # Inside execute_thread_slice() function function handle() will be called to
00612             # get information about available MUTs (per target).
00613             for target, toolchains in self.test_spec ['targets'].iteritems():
00614                 self.test_suite_properties_ext [target] = {}
00615                 t = threading.Thread(target=self.execute_thread_slice , args = (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties ))
00616                 t.daemon = True
00617                 t.start()
00618                 execute_threads.append(t)
00619 
00620             for t in execute_threads:
00621                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
00622         else:
00623             # Serialized (not parallel) test execution
00624             for target, toolchains in self.test_spec ['targets'].iteritems():
00625                 if target not in self.test_suite_properties_ext :
00626                     self.test_suite_properties_ext [target] = {}
00627 
00628                 self.execute_thread_slice (q, target, toolchains, clean, test_ids, self.build_report , self.build_properties )
00629                 q.get()
00630 
00631         if self.db_logger :
00632             self.db_logger .reconnect();
00633             if self.db_logger .is_connected():
00634                 self.db_logger .update_build_id_info(self.db_logger_build_id , _status_fk=self.db_logger .BUILD_ID_STATUS_COMPLETED)
00635                 self.db_logger .disconnect();
00636 
00637         return self.test_summary , self.shuffle_random_seed , self.test_summary_ext , self.test_suite_properties_ext , self.build_report , self.build_properties 
00638 
00639     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
00640         valid_test_map_keys = []
00641 
00642         for test_id in test_map_keys:
00643             test = TEST_MAP[test_id]
00644             if self.opts_test_by_names  and test_id not in self.opts_test_by_names :
00645                 continue
00646 
00647             if test_ids and test_id not in test_ids:
00648                 continue
00649 
00650             if self.opts_test_only_peripheral  and not test.peripherals:
00651                 if self.opts_verbose_skipped_tests :
00652                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00653                 continue
00654 
00655             if self.opts_peripheral_by_names  and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names ]):
00656                 # We will skip tests not forced with -p option
00657                 if self.opts_verbose_skipped_tests :
00658                     print self.logger .log_line(self.logger .LogType.INFO, 'Common test skipped for target %s'% (target))
00659                 continue
00660 
00661             if self.opts_test_only_common  and test.peripherals:
00662                 if self.opts_verbose_skipped_tests :
00663                     print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral test skipped for target %s'% (target))
00664                 continue
00665 
00666             if not include_non_automated and not test.automated:
00667                 if self.opts_verbose_skipped_tests :
00668                     print self.logger .log_line(self.logger .LogType.INFO, 'Non automated test skipped for target %s'% (target))
00669                 continue
00670 
00671             if test.is_supported(target, toolchain):
00672                 if test.peripherals is None and self.opts_only_build_tests :
00673                     # When users are using 'build only flag' and test do not have
00674                     # specified peripherals we can allow test building by default
00675                     pass
00676                 elif self.opts_peripheral_by_names  and test_id not in self.opts_peripheral_by_names :
00677                     # If we force peripheral with option -p we expect test
00678                     # to pass even if peripheral is not in MUTs file.
00679                     pass
00680                 elif not self.is_peripherals_available (target, test.peripherals):
00681                     if self.opts_verbose_skipped_tests :
00682                         if test.peripherals:
00683                             print self.logger .log_line(self.logger .LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
00684                         else:
00685                             print self.logger .log_line(self.logger .LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
00686                     continue
00687 
00688                 # The test has made it through all the filters, so add it to the valid tests list
00689                 valid_test_map_keys.append(test_id)
00690 
00691         return valid_test_map_keys
00692 
00693     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
00694         # NOTE: This will not preserve order
00695         return list(set(all_test_map_keys) - set(valid_test_map_keys))
00696 
00697     def generate_test_summary_by_target (self, test_summary, shuffle_seed=None):
00698         """ Prints well-formed summary with results (SQL table like)
00699             table shows text x toolchain test result matrix
00700         """
00701         RESULT_INDEX = 0
00702         TARGET_INDEX = 1
00703         TOOLCHAIN_INDEX = 2
00704         TEST_INDEX = 3
00705         DESC_INDEX = 4
00706 
00707         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
00708         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
00709         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
00710         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
00711 
00712         result = "Test summary:\n"
00713         for target in unique_targets:
00714             result_dict = {} # test : { toolchain : result }
00715             unique_target_toolchains = []
00716             for test in test_summary:
00717                 if test[TARGET_INDEX] == target:
00718                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
00719                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
00720                     if test[TEST_INDEX] not in result_dict:
00721                         result_dict[test[TEST_INDEX]] = {}
00722                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
00723 
00724             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
00725             pt = PrettyTable(pt_cols)
00726             for col in pt_cols:
00727                 pt.align[col] = "l"
00728             pt.padding_width = 1 # One space between column edges and contents (default)
00729 
00730             for test in unique_tests:
00731                 if test in result_dict:
00732                     test_results = result_dict[test]
00733                     if test in unique_test_desc:
00734                         row = [target, test, unique_test_desc[test]]
00735                         for toolchain in unique_toolchains:
00736                             if toolchain in test_results:
00737                                 row.append(test_results[toolchain])
00738                         pt.add_row(row)
00739             result += pt.get_string()
00740             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND ,
00741                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00742             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00743         return result
00744 
00745     def generate_test_summary (self, test_summary, shuffle_seed=None):
00746         """ Prints well-formed summary with results (SQL table like)
00747             table shows target x test results matrix across
00748         """
00749         success_code = 0    # Success code that can be leter returned to
00750         result = "Test summary:\n"
00751         # Pretty table package is used to print results
00752         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
00753                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
00754         pt.align["Result"] = "l" # Left align
00755         pt.align["Target"] = "l" # Left align
00756         pt.align["Toolchain"] = "l" # Left align
00757         pt.align["Test ID"] = "l" # Left align
00758         pt.align["Test Description"] = "l" # Left align
00759         pt.padding_width = 1 # One space between column edges and contents (default)
00760 
00761         result_dict = {self.TEST_RESULT_OK  : 0,
00762                        self.TEST_RESULT_FAIL  : 0,
00763                        self.TEST_RESULT_ERROR  : 0,
00764                        self.TEST_RESULT_UNDEF  : 0,
00765                        self.TEST_RESULT_IOERR_COPY  : 0,
00766                        self.TEST_RESULT_IOERR_DISK  : 0,
00767                        self.TEST_RESULT_IOERR_SERIAL  : 0,
00768                        self.TEST_RESULT_NO_IMAGE  : 0,
00769                        self.TEST_RESULT_TIMEOUT  : 0,
00770                        self.TEST_RESULT_MBED_ASSERT  : 0,
00771                        self.TEST_RESULT_BUILD_FAILED  : 0,
00772                        self.TEST_RESULT_NOT_SUPPORTED  : 0
00773         }
00774 
00775         for test in test_summary:
00776             if test[0] in result_dict:
00777                 result_dict[test[0]] += 1
00778             pt.add_row(test)
00779         result += pt.get_string()
00780         result += "\n"
00781 
00782         # Print result count
00783         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
00784         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND ,
00785                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed )
00786         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order  else '')
00787         return result
00788 
00789     def test_loop_list_to_dict (self, test_loops_str):
00790         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
00791         """
00792         result = {}
00793         if test_loops_str:
00794             test_loops = test_loops_str
00795             for test_loop in test_loops:
00796                 test_loop_count = test_loop.split('=')
00797                 if len(test_loop_count) == 2:
00798                     _test_id, _test_loops = test_loop_count
00799                     try:
00800                         _test_loops = int(_test_loops)
00801                     except:
00802                         continue
00803                     result[_test_id] = _test_loops
00804         return result
00805 
00806     def get_test_loop_count (self, test_id):
00807         """ This function returns no. of loops per test (deducted by test_id_.
00808             If test is not in list of redefined loop counts it will use default value.
00809         """
00810         result = self.GLOBAL_LOOPS_COUNT 
00811         if test_id in self.TEST_LOOPS_DICT :
00812             result = self.TEST_LOOPS_DICT [test_id]
00813         return result
00814 
00815     def delete_file (self, file_path):
00816         """ Remove file from the system
00817         """
00818         result = True
00819         resutl_msg = ""
00820         try:
00821             os.remove(file_path)
00822         except Exception, e:
00823             resutl_msg = e
00824             result = False
00825         return result, resutl_msg
00826 
00827     def handle_mut (self, mut, data, target_name, toolchain_name, test_loops=1):
00828         """ Test is being invoked for given MUT.
00829         """
00830         # Get test information, image and test timeout
00831         test_id = data['test_id']
00832         test = TEST_MAP[test_id]
00833         test_description = TEST_MAP[test_id].get_description()
00834         image = data["image"]
00835         duration = data.get("duration", 10)
00836 
00837         if mut is None:
00838             print "Error: No Mbed available: MUT[%s]" % data['mcu']
00839             return None
00840 
00841         mcu = mut['mcu']
00842         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
00843 
00844         if self.db_logger :
00845             self.db_logger .reconnect()
00846 
00847         selected_copy_method = self.opts_copy_method  if copy_method is None else copy_method
00848 
00849         # Tests can be looped so test results must be stored for the same test
00850         test_all_result = []
00851         # Test results for one test ran few times
00852         detailed_test_results = {}  # { Loop_number: { results ... } }
00853 
00854         for test_index in range(test_loops):
00855 
00856             # If mbedls is available and we are auto detecting MUT info,
00857             # update MUT info (mounting may changed)
00858             if get_module_avail('mbed_lstools') and self.opts_auto_detect :
00859                 platform_name_filter = [mcu]
00860                 muts_list = {}
00861                 found = False
00862 
00863                 for i in range(0, 60):
00864                     print('Looking for %s with MBEDLS' % mcu)
00865                     muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
00866 
00867                     if 1 not in muts_list:
00868                         sleep(3)
00869                     else:
00870                         found = True
00871                         break
00872 
00873                 if not found:
00874                     print "Error: mbed not found with MBEDLS: %s" % data['mcu']
00875                     return None
00876                 else:
00877                     mut = muts_list[1]
00878 
00879             disk = mut.get('disk')
00880             port = mut.get('port')
00881 
00882             if disk is None or port is None:
00883                 return None
00884 
00885             target_by_mcu = TARGET_MAP[mut['mcu']]
00886             target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
00887             # Some extra stuff can be declared in MUTs structure
00888             reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
00889             reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
00890 
00891             # When the build and test system were separate, this was relative to a
00892             # base network folder base path: join(NETWORK_BASE_PATH, )
00893             image_path = image
00894 
00895             # Host test execution
00896             start_host_exec_time = time()
00897 
00898             single_test_result = self.TEST_RESULT_UNDEF  # single test run result
00899             _copy_method = selected_copy_method
00900 
00901             if not exists(image_path):
00902                 single_test_result = self.TEST_RESULT_NO_IMAGE 
00903                 elapsed_time = 0
00904                 single_test_output = self.logger .log_line(self.logger .LogType.ERROR, 'Image file does not exist: %s'% image_path)
00905                 print single_test_output
00906             else:
00907                 # Host test execution
00908                 start_host_exec_time = time()
00909 
00910                 host_test_verbose = self.opts_verbose_test_result_only  or self.opts_verbose 
00911                 host_test_reset = self.opts_mut_reset_type  if reset_type is None else reset_type
00912                 host_test_result = self.run_host_test (test.host_test,
00913                                                       image_path, disk, port, duration,
00914                                                       micro=target_name,
00915                                                       verbose=host_test_verbose,
00916                                                       reset=host_test_reset,
00917                                                       reset_tout=reset_tout,
00918                                                       copy_method=selected_copy_method,
00919                                                       program_cycle_s=target_by_mcu.program_cycle_s)
00920                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
00921 
00922             # Store test result
00923             test_all_result.append(single_test_result)
00924             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
00925             elapsed_time = single_testduration  # TIme of single test case execution after reset
00926 
00927             detailed_test_results[test_index] = {
00928                 'result' : single_test_result,
00929                 'output' : single_test_output,
00930                 'target_name' : target_name,
00931                 'target_name_unique' : target_name_unique,
00932                 'toolchain_name' : toolchain_name,
00933                 'id' : test_id,
00934                 'description' : test_description,
00935                 'elapsed_time' : round(elapsed_time, 2),
00936                 'duration' : single_timeout,
00937                 'copy_method' : _copy_method,
00938             }
00939 
00940             print self.print_test_result (single_test_result, target_name_unique, toolchain_name,
00941                                          test_id, test_description, elapsed_time, single_timeout)
00942 
00943             # Update database entries for ongoing test
00944             if self.db_logger  and self.db_logger .is_connected():
00945                 test_type = 'SingleTest'
00946                 self.db_logger .insert_test_entry(self.db_logger_build_id ,
00947                                                  target_name,
00948                                                  toolchain_name,
00949                                                  test_type,
00950                                                  test_id,
00951                                                  single_test_result,
00952                                                  single_test_output,
00953                                                  elapsed_time,
00954                                                  single_timeout,
00955                                                  test_index)
00956 
00957             # If we perform waterfall test we test until we get OK and we stop testing
00958             if self.opts_waterfall_test  and single_test_result == self.TEST_RESULT_OK :
00959                 break
00960 
00961         if self.db_logger :
00962             self.db_logger .disconnect()
00963 
00964         return (self.shape_global_test_loop_result (test_all_result, self.opts_waterfall_test  and self.opts_consolidate_waterfall_test ),
00965                 target_name_unique,
00966                 toolchain_name,
00967                 test_id,
00968                 test_description,
00969                 round(elapsed_time, 2),
00970                 single_timeout,
00971                 self.shape_test_loop_ok_result_count (test_all_result)), detailed_test_results
00972 
00973     def handle (self, test_spec, target_name, toolchain_name, test_loops=1):
00974         """ Function determines MUT's mbed disk/port and copies binary to
00975             target.
00976         """
00977         handle_results = []
00978         data = json.loads(test_spec)
00979 
00980         # Find a suitable MUT:
00981         mut = None
00982         for id, m in self.muts .iteritems():
00983             if m['mcu'] == data['mcu']:
00984                 mut = m
00985                 handle_result = self.handle_mut (mut, data, target_name, toolchain_name, test_loops=test_loops)
00986                 handle_results.append(handle_result)
00987 
00988         return handle_results
00989 
00990     def print_test_result (self, test_result, target_name, toolchain_name,
00991                           test_id, test_description, elapsed_time, duration):
00992         """ Use specific convention to print test result and related data
00993         """
00994         tokens = []
00995         tokens.append("TargetTest")
00996         tokens.append(target_name)
00997         tokens.append(toolchain_name)
00998         tokens.append(test_id)
00999         tokens.append(test_description)
01000         separator = "::"
01001         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
01002         result = separator.join(tokens) + " [" + test_result +"]" + time_info
01003         return Fore.MAGENTA + result + Fore.RESET
01004 
01005     def shape_test_loop_ok_result_count (self, test_all_result):
01006         """ Reformats list of results to simple string
01007         """
01008         test_loop_count = len(test_all_result)
01009         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK )
01010         return "%d/%d"% (test_loop_ok_result, test_loop_count)
01011 
01012     def shape_global_test_loop_result (self, test_all_result, waterfall_and_consolidate):
01013         """ Reformats list of results to simple string
01014         """
01015         result = self.TEST_RESULT_FAIL 
01016 
01017         if all(test_all_result[0] == res for res in test_all_result):
01018             result = test_all_result[0]
01019         elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK  for res in test_all_result):
01020             result = self.TEST_RESULT_OK 
01021 
01022         return result
01023 
01024     def run_host_test (self, name, image_path, disk, port, duration,
01025                       micro=None, reset=None, reset_tout=None,
01026                       verbose=False, copy_method=None, program_cycle_s=None):
01027         """ Function creates new process with host test configured with particular test case.
01028             Function also is pooling for serial port activity from process to catch all data
01029             printed by test runner and host test during test execution
01030         """
01031 
01032         def get_char_from_queue(obs):
01033             """ Get character from queue safe way
01034             """
01035             try:
01036                 c = obs.queue.get(block=True, timeout=0.5)
01037             except Empty, _:
01038                 c = None
01039             return c
01040 
01041         def filter_queue_char(c):
01042             """ Filters out non ASCII characters from serial port
01043             """
01044             if ord(c) not in range(128):
01045                 c = ' '
01046             return c
01047 
01048         def get_test_result(output):
01049             """ Parse test 'output' data
01050             """
01051             result = self.TEST_RESULT_TIMEOUT 
01052             for line in "".join(output).splitlines():
01053                 search_result = self.RE_DETECT_TESTCASE_RESULT .search(line)
01054                 if search_result and len(search_result.groups()):
01055                     result = self.TEST_RESULT_MAPPING [search_result.groups(0)[0]]
01056                     break
01057             return result
01058 
01059         def get_auto_property_value(property_name, line):
01060             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
01061                 Returns string
01062             """
01063             result = None
01064             if re.search("HOST: Property '%s'"% property_name, line) is not None:
01065                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
01066                 if property is not None and len(property.groups()) == 1:
01067                     result = property.groups()[0]
01068             return result
01069 
01070         # print "{%s} port:%s disk:%s"  % (name, port, disk),
01071         cmd = ["python",
01072                '%s.py'% name,
01073                '-d', disk,
01074                '-f', '"%s"'% image_path,
01075                '-p', port,
01076                '-t', str(duration),
01077                '-C', str(program_cycle_s)]
01078 
01079         if get_module_avail('mbed_lstools') and self.opts_auto_detect :
01080             cmd += ['--auto']
01081 
01082         # Add extra parameters to host_test
01083         if copy_method is not None:
01084             cmd += ["-c", copy_method]
01085         if micro is not None:
01086             cmd += ["-m", micro]
01087         if reset is not None:
01088             cmd += ["-r", reset]
01089         if reset_tout is not None:
01090             cmd += ["-R", str(reset_tout)]
01091 
01092         if verbose:
01093             print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
01094             print "Test::Output::Start"
01095 
01096         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
01097         obs = ProcessObserver(proc)
01098         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
01099         line = ''
01100         output = []
01101         start_time = time()
01102         while (time() - start_time) < (2 * duration):
01103             c = get_char_from_queue(obs)
01104             if c:
01105                 if verbose:
01106                     sys.stdout.write(c)
01107                 c = filter_queue_char(c)
01108                 output.append(c)
01109                 # Give the mbed under test a way to communicate the end of the test
01110                 if c in ['\n', '\r']:
01111 
01112                     # Checking for auto-detection information from the test about MUT reset moment
01113                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
01114                         # We will update this marker only once to prevent multiple time resets
01115                         update_once_flag['reset_target'] = True
01116                         start_time = time()
01117 
01118                     # Checking for auto-detection information from the test about timeout
01119                     auto_timeout_val = get_auto_property_value('timeout', line)
01120                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
01121                         # We will update this marker only once to prevent multiple time resets
01122                         update_once_flag['timeout'] = True
01123                         duration = int(auto_timeout_val)
01124 
01125                     # Detect mbed assert:
01126                     if 'mbed assertation failed: ' in line:
01127                         output.append('{{mbed_assert}}')
01128                         break
01129 
01130                     # Check for test end
01131                     if '{end}' in line:
01132                         break
01133                     line = ''
01134                 else:
01135                     line += c
01136         end_time = time()
01137         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
01138 
01139         c = get_char_from_queue(obs)
01140 
01141         if c:
01142             if verbose:
01143                 sys.stdout.write(c)
01144             c = filter_queue_char(c)
01145             output.append(c)
01146 
01147         if verbose:
01148             print "Test::Output::Finish"
01149         # Stop test process
01150         obs.stop()
01151 
01152         result = get_test_result(output)
01153         return (result, "".join(output), testcase_duration, duration)
01154 
01155     def is_peripherals_available (self, target_mcu_name, peripherals=None):
01156         """ Checks if specified target should run specific peripheral test case defined in MUTs file
01157         """
01158         if peripherals is not None:
01159             peripherals = set(peripherals)
01160         for id, mut in self.muts .iteritems():
01161             # Target MCU name check
01162             if mut["mcu"] != target_mcu_name:
01163                 continue
01164             # Peripherals check
01165             if peripherals is not None:
01166                 if 'peripherals' not in mut:
01167                     continue
01168                 if not peripherals.issubset(set(mut['peripherals'])):
01169                     continue
01170             return True
01171         return False
01172 
01173     def shape_test_request (self, mcu, image_path, test_id, duration=10):
01174         """ Function prepares JSON structure describing test specification
01175         """
01176         test_spec = {
01177             "mcu": mcu,
01178             "image": image_path,
01179             "duration": duration,
01180             "test_id": test_id,
01181         }
01182         return json.dumps(test_spec)
01183 
01184 
01185 def get_unique_value_from_summary (test_summary, index):
01186     """ Gets list of unique target names
01187     """
01188     result = []
01189     for test in test_summary:
01190         target_name = test[index]
01191         if target_name not in result:
01192             result.append(target_name)
01193     return sorted(result)
01194 
01195 
01196 def get_unique_value_from_summary_ext (test_summary, index_key, index_val):
01197     """ Gets list of unique target names and return dictionary
01198     """
01199     result = {}
01200     for test in test_summary:
01201         key = test[index_key]
01202         val = test[index_val]
01203         if key not in result:
01204             result[key] = val
01205     return result
01206 
01207 
01208 def show_json_file_format_error (json_spec_filename, line, column):
01209     """ Prints JSON broken content
01210     """
01211     with open(json_spec_filename) as data_file:
01212         line_no = 1
01213         for json_line in data_file:
01214             if line_no + 5 >= line: # Print last few lines before error
01215                 print 'Line %d:\t'%line_no + json_line, # Prints line
01216             if line_no == line:
01217                 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
01218                 break
01219             line_no += 1
01220 
01221 
01222 def json_format_error_defect_pos (json_error_msg):
01223     """ Gets first error line and column in JSON file format.
01224         Parsed from exception thrown by json.loads() string
01225     """
01226     result = None
01227     line, column = 0, 0
01228     # Line value search
01229     line_search = re.search('line [0-9]+', json_error_msg)
01230     if line_search is not None:
01231         ls = line_search.group().split(' ')
01232         if len(ls) == 2:
01233             line = int(ls[1])
01234             # Column position search
01235             column_search = re.search('column [0-9]+', json_error_msg)
01236             if column_search is not None:
01237                 cs = column_search.group().split(' ')
01238                 if len(cs) == 2:
01239                     column = int(cs[1])
01240                     result = [line, column]
01241     return result
01242 
01243 
01244 def get_json_data_from_file (json_spec_filename, verbose=False):
01245     """ Loads from file JSON formatted string to data structure
01246     """
01247     result = None
01248     try:
01249         with open(json_spec_filename) as data_file:
01250             try:
01251                 result = json.load(data_file)
01252             except ValueError as json_error_msg:
01253                 result = None
01254                 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
01255                 # We can print where error occurred inside JSON file if we can parse exception msg
01256                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
01257                 if json_format_defect_pos is not None:
01258                     line = json_format_defect_pos[0]
01259                     column = json_format_defect_pos[1]
01260                     print
01261                     show_json_file_format_error(json_spec_filename, line, column)
01262 
01263     except IOError as fileopen_error_msg:
01264         print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
01265         print
01266     if verbose and result:
01267         pp = pprint.PrettyPrinter(indent=4)
01268         pp.pprint(result)
01269     return result
01270 
01271 
01272 def print_muts_configuration_from_json (json_data, join_delim=", ", platform_filter=None):
01273     """ Prints MUTs configuration passed to test script for verboseness
01274     """
01275     muts_info_cols = []
01276     # We need to check all unique properties for each defined MUT
01277     for k in json_data:
01278         mut_info = json_data[k]
01279         for mut_property in mut_info:
01280             if mut_property not in muts_info_cols:
01281                 muts_info_cols.append(mut_property)
01282 
01283     # Prepare pretty table object to display all MUTs
01284     pt_cols = ["index"] + muts_info_cols
01285     pt = PrettyTable(pt_cols)
01286     for col in pt_cols:
01287         pt.align[col] = "l"
01288 
01289     # Add rows to pretty print object
01290     for k in json_data:
01291         row = [k]
01292         mut_info = json_data[k]
01293 
01294         add_row = True
01295         if platform_filter and 'mcu' in mut_info:
01296             add_row = re.search(platform_filter, mut_info['mcu']) is not None
01297         if add_row:
01298             for col in muts_info_cols:
01299                 cell_val = mut_info[col] if col in mut_info else None
01300                 if type(cell_val) == ListType:
01301                     cell_val = join_delim.join(cell_val)
01302                 row.append(cell_val)
01303             pt.add_row(row)
01304     return pt.get_string()
01305 
01306 
01307 def print_test_configuration_from_json (json_data, join_delim=", "):
01308     """ Prints test specification configuration passed to test script for verboseness
01309     """
01310     toolchains_info_cols = []
01311     # We need to check all toolchains for each device
01312     for k in json_data:
01313         # k should be 'targets'
01314         targets = json_data[k]
01315         for target in targets:
01316             toolchains = targets[target]
01317             for toolchain in toolchains:
01318                 if toolchain not in toolchains_info_cols:
01319                     toolchains_info_cols.append(toolchain)
01320 
01321     # Prepare pretty table object to display test specification
01322     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
01323     pt = PrettyTable(pt_cols)
01324     for col in pt_cols:
01325         pt.align[col] = "l"
01326 
01327     # { target : [conflicted toolchains] }
01328     toolchain_conflicts = {}
01329     toolchain_path_conflicts = []
01330     for k in json_data:
01331         # k should be 'targets'
01332         targets = json_data[k]
01333         for target in targets:
01334             target_supported_toolchains = get_target_supported_toolchains(target)
01335             if not target_supported_toolchains:
01336                 target_supported_toolchains = []
01337             target_name = target if target in TARGET_MAP else "%s*"% target
01338             row = [target_name]
01339             toolchains = targets[target]
01340 
01341             for toolchain in sorted(toolchains_info_cols):
01342                 # Check for conflicts: target vs toolchain
01343                 conflict = False
01344                 conflict_path = False
01345                 if toolchain in toolchains:
01346                     if toolchain not in target_supported_toolchains:
01347                         conflict = True
01348                         if target not in toolchain_conflicts:
01349                             toolchain_conflicts[target] = []
01350                         toolchain_conflicts[target].append(toolchain)
01351                 # Add marker inside table about target usage / conflict
01352                 cell_val = 'Yes' if toolchain in toolchains else '-'
01353                 if conflict:
01354                     cell_val += '*'
01355                 # Check for conflicts: toolchain vs toolchain path
01356                 if toolchain in TOOLCHAIN_PATHS:
01357                     toolchain_path = TOOLCHAIN_PATHS[toolchain]
01358                     if not os.path.isdir(toolchain_path):
01359                         conflict_path = True
01360                         if toolchain not in toolchain_path_conflicts:
01361                             toolchain_path_conflicts.append(toolchain)
01362                 if conflict_path:
01363                     cell_val += '#'
01364                 row.append(cell_val)
01365             pt.add_row(row)
01366 
01367     # generate result string
01368     result = pt.get_string()    # Test specification table
01369     if toolchain_conflicts or toolchain_path_conflicts:
01370         result += "\n"
01371         result += "Toolchain conflicts:\n"
01372         for target in toolchain_conflicts:
01373             if target not in TARGET_MAP:
01374                 result += "\t* Target %s unknown\n"% (target)
01375             conflict_target_list = join_delim.join(toolchain_conflicts[target])
01376             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
01377             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
01378 
01379         for toolchain in toolchain_path_conflicts:
01380         # Let's check toolchain configuration
01381             if toolchain in TOOLCHAIN_PATHS:
01382                 toolchain_path = TOOLCHAIN_PATHS[toolchain]
01383                 if not os.path.isdir(toolchain_path):
01384                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
01385     return result
01386 
01387 
01388 def get_avail_tests_summary_table (cols=None, result_summary=True, join_delim=',',platform_filter=None):
01389     """ Generates table summary with all test cases and additional test cases
01390         information using pretty print functionality. Allows test suite user to
01391         see test cases
01392     """
01393     # get all unique test ID prefixes
01394     unique_test_id = []
01395     for test in TESTS:
01396         split = test['id'].split('_')[:-1]
01397         test_id_prefix = '_'.join(split)
01398         if test_id_prefix not in unique_test_id:
01399             unique_test_id.append(test_id_prefix)
01400     unique_test_id.sort()
01401     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
01402     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
01403 
01404     test_properties = ['id',
01405                        'automated',
01406                        'description',
01407                        'peripherals',
01408                        'host_test',
01409                        'duration'] if cols is None else cols
01410 
01411     # All tests status table print
01412     pt = PrettyTable(test_properties)
01413     for col in test_properties:
01414         pt.align[col] = "l"
01415     pt.align['duration'] = "r"
01416 
01417     counter_all = 0
01418     counter_automated = 0
01419     pt.padding_width = 1 # One space between column edges and contents (default)
01420 
01421     for test_id in sorted(TEST_MAP.keys()):
01422         if platform_filter is not None:
01423             # FIlter out platforms using regex
01424             if re.search(platform_filter, test_id) is None:
01425                 continue
01426         row = []
01427         test = TEST_MAP[test_id]
01428         split = test_id.split('_')[:-1]
01429         test_id_prefix = '_'.join(split)
01430 
01431         for col in test_properties:
01432             col_value = test[col]
01433             if type(test[col]) == ListType:
01434                 col_value = join_delim.join(test[col])
01435             elif test[col] == None:
01436                 col_value = "-"
01437 
01438             row.append(col_value)
01439         if test['automated'] == True:
01440             counter_dict_test_id_types[test_id_prefix] += 1
01441             counter_automated += 1
01442         pt.add_row(row)
01443         # Update counters
01444         counter_all += 1
01445         counter_dict_test_id_types_all[test_id_prefix] += 1
01446     result = pt.get_string()
01447     result += "\n\n"
01448 
01449     if result_summary and not platform_filter:
01450         # Automation result summary
01451         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
01452         pt = PrettyTable(test_id_cols)
01453         pt.align['automated'] = "r"
01454         pt.align['all'] = "r"
01455         pt.align['percent [%]'] = "r"
01456 
01457         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
01458         str_progress = progress_bar(percent_progress, 75)
01459         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
01460         result += "Automation coverage:\n"
01461         result += pt.get_string()
01462         result += "\n\n"
01463 
01464         # Test automation coverage table print
01465         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
01466         pt = PrettyTable(test_id_cols)
01467         pt.align['id'] = "l"
01468         pt.align['automated'] = "r"
01469         pt.align['all'] = "r"
01470         pt.align['percent [%]'] = "r"
01471         for unique_id in unique_test_id:
01472             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
01473             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
01474             str_progress = progress_bar(percent_progress, 75)
01475             row = [unique_id,
01476                    counter_dict_test_id_types[unique_id],
01477                    counter_dict_test_id_types_all[unique_id],
01478                    percent_progress,
01479                    "[" + str_progress + "]"]
01480             pt.add_row(row)
01481         result += "Test automation coverage:\n"
01482         result += pt.get_string()
01483         result += "\n\n"
01484     return result
01485 
01486 
01487 def progress_bar (percent_progress, saturation=0):
01488     """ This function creates progress bar with optional simple saturation mark
01489     """
01490     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
01491     str_progress = '#' * step + '.' * int(50 - step)
01492     c = '!' if str_progress[38] == '.' else '|'
01493     if saturation > 0:
01494         saturation = saturation / 2
01495         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
01496     return str_progress
01497 
01498 
01499 def singletest_in_cli_mode (single_test):
01500     """ Runs SingleTestRunner object in CLI (Command line interface) mode
01501 
01502         @return returns success code (0 == success) for building and running tests
01503     """
01504     start = time()
01505     # Execute tests depending on options and filter applied
01506     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
01507     elapsed_time = time() - start
01508 
01509     # Human readable summary
01510     if not single_test.opts_suppress_summary:
01511         # prints well-formed summary with results (SQL table like)
01512         print single_test.generate_test_summary(test_summary, shuffle_seed)
01513     if single_test.opts_test_x_toolchain_summary:
01514         # prints well-formed summary with results (SQL table like)
01515         # table shows text x toolchain test result matrix
01516         print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
01517 
01518     print "Completed in %.2f sec"% (elapsed_time)
01519     print
01520     # Write summary of the builds
01521 
01522     print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
01523     status = print_report_exporter.report(build_report)
01524 
01525     # Store extra reports in files
01526     if single_test.opts_report_html_file_name:
01527         # Export results in form of HTML report to separate file
01528         report_exporter = ReportExporter(ResultExporterType.HTML)
01529         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
01530     if single_test.opts_report_junit_file_name:
01531         # Export results in form of JUnit XML report to separate file
01532         report_exporter = ReportExporter(ResultExporterType.JUNIT)
01533         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
01534     if single_test.opts_report_text_file_name:
01535         # Export results in form of a text file
01536         report_exporter = ReportExporter(ResultExporterType.TEXT)
01537         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
01538     if single_test.opts_report_build_file_name:
01539         # Export build results as html report to sparate file
01540         report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
01541         report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
01542 
01543     # Returns True if no build failures of the test projects or their dependencies
01544     return status
01545 
01546 class TestLogger ():
01547     """ Super-class for logging and printing ongoing events for test suite pass
01548     """
01549     def __init__ (self, store_log=True):
01550         """ We can control if logger actually stores log in memory
01551             or just handled all log entries immediately
01552         """
01553         self.log  = []
01554         self.log_to_file  = False
01555         self.log_file_name  = None
01556         self.store_log  = store_log
01557 
01558         self.LogType  = construct_enum(INFO='Info',
01559                                       WARN='Warning',
01560                                       NOTIF='Notification',
01561                                       ERROR='Error',
01562                                       EXCEPT='Exception')
01563 
01564         self.LogToFileAttr  = construct_enum(CREATE=1,    # Create or overwrite existing log file
01565                                             APPEND=2)    # Append to existing log file
01566 
01567     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01568         """ Log one line of text
01569         """
01570         log_timestamp = time()
01571         log_entry = {'log_type' : LogType,
01572                      'log_timestamp' : log_timestamp,
01573                      'log_line' : log_line,
01574                      '_future' : None
01575         }
01576         # Store log in memory
01577         if self.store_log :
01578             self.log .append(log_entry)
01579         return log_entry
01580 
01581 
01582 class CLITestLogger (TestLogger ):
01583     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
01584     """
01585     def __init__(self, store_log=True, file_name=None):
01586         TestLogger.__init__(self)
01587         self.log_file_name  = file_name
01588         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
01589         self.TIMESTAMP_FORMAT  = '%H:%M:%S' # Time only
01590 
01591     def log_print (self, log_entry, timestamp=True):
01592         """ Prints on screen formatted log entry
01593         """
01594         ts = log_entry['log_timestamp']
01595         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT ) if timestamp else ''
01596         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
01597         return timestamp_str + log_line_str
01598 
01599     def log_line (self, LogType, log_line, timestamp=True, line_delim='\n'):
01600         """ Logs line, if log file output was specified log line will be appended
01601             at the end of log file
01602         """
01603         log_entry = TestLogger.log_line(self, LogType, log_line)
01604         log_line_str = self.log_print (log_entry, timestamp)
01605         if self.log_file_name  is not None:
01606             try:
01607                 with open(self.log_file_name , 'a') as f:
01608                     f.write(log_line_str + line_delim)
01609             except IOError:
01610                 pass
01611         return log_line_str
01612 
01613 
01614 def factory_db_logger (db_url):
01615     """ Factory database driver depending on database type supplied in database connection string db_url
01616     """
01617     if db_url is not None:
01618         from tools.test_mysql import MySQLDBAccess
01619         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
01620         if connection_info is not None:
01621             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
01622             if db_type == 'mysql':
01623                 return MySQLDBAccess()
01624     return None
01625 
01626 
01627 def detect_database_verbose (db_url):
01628     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
01629     """
01630     result = BaseDBAccess().parse_db_connection_string(db_url)
01631     if result is not None:
01632         # Parsing passed
01633         (db_type, username, password, host, db_name) = result
01634         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
01635         # Let's try to connect
01636         db_ = factory_db_logger(db_url)
01637         if db_ is not None:
01638             print "Connecting to database '%s'..."% db_url,
01639             db_.connect(host, username, password, db_name)
01640             if db_.is_connected():
01641                 print "ok"
01642                 print "Detecting database..."
01643                 print db_.detect_database(verbose=True)
01644                 print "Disconnecting...",
01645                 db_.disconnect()
01646                 print "done"
01647         else:
01648             print "Database type '%s' unknown"% db_type
01649     else:
01650         print "Parse error: '%s' - DB Url error"% (db_url)
01651 
01652 
01653 def get_module_avail (module_name):
01654     """ This function returns True if module_name is already impored module
01655     """
01656     return module_name in sys.modules.keys()
01657 
01658 
01659 def get_autodetected_MUTS_list(platform_name_filter=None):
01660     oldError = None
01661     if os.name == 'nt':
01662         # Disable Windows error box temporarily
01663         oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
01664 
01665     mbeds = mbed_lstools.create()
01666     detect_muts_list = mbeds.list_mbeds()
01667 
01668     if os.name == 'nt':
01669         ctypes.windll.kernel32.SetErrorMode(oldError)
01670 
01671     return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
01672 
01673 def get_autodetected_MUTS (mbeds_list, platform_name_filter=None):
01674     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
01675         If function fails to auto-detect devices it will return empty dictionary.
01676 
01677         if get_module_avail('mbed_lstools'):
01678             mbeds = mbed_lstools.create()
01679             mbeds_list = mbeds.list_mbeds()
01680 
01681         @param mbeds_list list of mbeds captured from mbed_lstools
01682         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
01683     """
01684     result = {}   # Should be in muts_all.json format
01685     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
01686     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
01687     index = 1
01688     for mut in mbeds_list:
01689         # Filter the MUTS if a filter is specified
01690 
01691         if platform_name_filter and not mut['platform_name'] in platform_name_filter:
01692             continue
01693 
01694         # For mcu_unique - we are assigning 'platform_name_unique' value from  mbedls output (if its existing)
01695         # if not we  are creating our own unique value (last few chars from platform's target_id).
01696         m = {'mcu': mut['platform_name'],
01697              'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
01698              'port': mut['serial_port'],
01699              'disk': mut['mount_point'],
01700              'peripherals': []     # No peripheral detection
01701              }
01702         if index not in result:
01703             result[index] = {}
01704         result[index] = m
01705         index += 1
01706     return result
01707 
01708 
01709 def get_autodetected_TEST_SPEC (mbeds_list,
01710                                use_default_toolchain=True,
01711                                use_supported_toolchains=False,
01712                                toolchain_filter=None,
01713                                platform_name_filter=None):
01714     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
01715         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
01716 
01717         use_default_toolchain - if True add default toolchain to test_spec
01718         use_supported_toolchains - if True add all supported toolchains to test_spec
01719         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
01720     """
01721     result = {'targets': {} }
01722 
01723     for mut in mbeds_list:
01724         mcu = mut['mcu']
01725         if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
01726             if mcu in TARGET_MAP:
01727                 default_toolchain = TARGET_MAP[mcu].default_toolchain
01728                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
01729 
01730                 # Decide which toolchains should be added to test specification toolchain pool for each target
01731                 toolchains = []
01732                 if use_default_toolchain:
01733                     toolchains.append(default_toolchain)
01734                 if use_supported_toolchains:
01735                     toolchains += supported_toolchains
01736                 if toolchain_filter is not None:
01737                     all_toolchains = supported_toolchains + [default_toolchain]
01738                     for toolchain in toolchain_filter:
01739                         if toolchain in all_toolchains:
01740                             toolchains.append(toolchain)
01741 
01742                 result['targets'][mcu] = list(set(toolchains))
01743     return result
01744 
01745 
01746 def get_default_test_options_parser ():
01747     """ Get common test script options used by CLI, web services etc.
01748     """
01749     parser = argparse.ArgumentParser()
01750     parser.add_argument('-i', '--tests',
01751                         dest='test_spec_filename',
01752                         metavar="FILE",
01753                         type=argparse_filestring_type,
01754                         help='Points to file with test specification')
01755 
01756     parser.add_argument('-M', '--MUTS',
01757                         dest='muts_spec_filename',
01758                         metavar="FILE",
01759                         type=argparse_filestring_type,
01760                         help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
01761 
01762     parser.add_argument("-j", "--jobs",
01763                         dest='jobs',
01764                         metavar="NUMBER",
01765                         type=int,
01766                         help="Define number of compilation jobs. Default value is 1")
01767 
01768     if get_module_avail('mbed_lstools'):
01769         # Additional features available when mbed_lstools is installed on host and imported
01770         # mbed_lstools allow users to detect connected to host mbed-enabled devices
01771         parser.add_argument('--auto',
01772                             dest='auto_detect',
01773                             action="store_true",
01774                             help='Use mbed-ls module to detect all connected mbed devices')
01775 
01776         toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
01777         parser.add_argument('--tc',
01778                             dest='toolchains_filter',
01779                         type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
01780                             help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
01781 
01782         test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
01783         parser.add_argument('--oper',
01784                             dest='operability_checks',
01785                             type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
01786                             help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
01787 
01788     parser.add_argument('--clean',
01789                         dest='clean',
01790                         action="store_true",
01791                         help='Clean the build directory')
01792 
01793     parser.add_argument('-P', '--only-peripherals',
01794                         dest='test_only_peripheral',
01795                         default=False,
01796                         action="store_true",
01797                         help='Test only peripheral declared for MUT and skip common tests')
01798 
01799     parser.add_argument("--profile", dest="profile", action="append",
01800                         type=argparse_filestring_type,
01801                         default=[])
01802 
01803     parser.add_argument('-C', '--only-commons',
01804                         dest='test_only_common',
01805                         default=False,
01806                         action="store_true",
01807                         help='Test only board internals. Skip perpherials tests and perform common tests')
01808 
01809     parser.add_argument('-n', '--test-by-names',
01810                         dest='test_by_names',
01811                         type=argparse_many(str),
01812                         help='Runs only test enumerated it this switch. Use comma to separate test case names')
01813 
01814     parser.add_argument('-p', '--peripheral-by-names',
01815                       dest='peripheral_by_names',
01816                       type=argparse_many(str),
01817                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
01818 
01819     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
01820     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
01821 
01822     parser.add_argument('-c', '--copy-method',
01823                         dest='copy_method',
01824                         type=argparse_uppercase_type(copy_methods, "flash method"),
01825                         help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
01826 
01827     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
01828     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
01829 
01830     parser.add_argument('-r', '--reset-type',
01831                         dest='mut_reset_type',
01832                         default=None,
01833                         type=argparse_uppercase_type(reset_methods, "reset method"),
01834                         help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
01835 
01836     parser.add_argument('-g', '--goanna-for-tests',
01837                         dest='goanna_for_tests',
01838                         action="store_true",
01839                         help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
01840 
01841     parser.add_argument('-G', '--goanna-for-sdk',
01842                         dest='goanna_for_mbed_sdk',
01843                         action="store_true",
01844                         help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
01845 
01846     parser.add_argument('-s', '--suppress-summary',
01847                         dest='suppress_summary',
01848                         default=False,
01849                         action="store_true",
01850                         help='Suppresses display of wellformatted table with test results')
01851 
01852     parser.add_argument('-t', '--test-summary',
01853                         dest='test_x_toolchain_summary',
01854                         default=False,
01855                         action="store_true",
01856                         help='Displays wellformatted table with test x toolchain test result per target')
01857 
01858     parser.add_argument('-A', '--test-automation-report',
01859                         dest='test_automation_report',
01860                         default=False,
01861                         action="store_true",
01862                         help='Prints information about all tests and exits')
01863 
01864     parser.add_argument('-R', '--test-case-report',
01865                         dest='test_case_report',
01866                         default=False,
01867                         action="store_true",
01868                         help='Prints information about all test cases and exits')
01869 
01870     parser.add_argument("-S", "--supported-toolchains",
01871                         action="store_true",
01872                         dest="supported_toolchains",
01873                         default=False,
01874                         help="Displays supported matrix of MCUs and toolchains")
01875 
01876     parser.add_argument("-O", "--only-build",
01877                         action="store_true",
01878                         dest="only_build_tests",
01879                         default=False,
01880                         help="Only build tests, skips actual test procedures (flashing etc.)")
01881 
01882     parser.add_argument('--parallel',
01883                         dest='parallel_test_exec',
01884                         default=False,
01885                         action="store_true",
01886                         help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
01887 
01888     parser.add_argument('--config',
01889                         dest='verbose_test_configuration_only',
01890                         default=False,
01891                         action="store_true",
01892                         help='Displays full test specification and MUTs configration and exits')
01893 
01894     parser.add_argument('--loops',
01895                         dest='test_loops_list',
01896                         type=argparse_many(str),
01897                         help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
01898 
01899     parser.add_argument('--global-loops',
01900                         dest='test_global_loops_value',
01901                         type=int,
01902                         help='Set global number of test loops per test. Default value is set 1')
01903 
01904     parser.add_argument('--consolidate-waterfall',
01905                         dest='consolidate_waterfall_test',
01906                         default=False,
01907                         action="store_true",
01908                         help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
01909 
01910     parser.add_argument('-W', '--waterfall',
01911                         dest='waterfall_test',
01912                         default=False,
01913                         action="store_true",
01914                         help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
01915 
01916     parser.add_argument('-N', '--firmware-name',
01917                         dest='firmware_global_name',
01918                         help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
01919 
01920     parser.add_argument('-u', '--shuffle',
01921                         dest='shuffle_test_order',
01922                         default=False,
01923                         action="store_true",
01924                         help='Shuffles test execution order')
01925 
01926     parser.add_argument('--shuffle-seed',
01927                         dest='shuffle_test_seed',
01928                         default=None,
01929                         help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
01930 
01931     parser.add_argument('-f', '--filter',
01932                         dest='general_filter_regex',
01933                         type=argparse_many(str),
01934                         default=None,
01935                         help='For some commands you can use filter to filter out results')
01936 
01937     parser.add_argument('--inc-timeout',
01938                         dest='extend_test_timeout',
01939                         metavar="NUMBER",
01940                         type=int,
01941                         help='You can increase global timeout for each test by specifying additional test timeout in seconds')
01942 
01943     parser.add_argument('--db',
01944                         dest='db_url',
01945                         help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
01946 
01947     parser.add_argument('-l', '--log',
01948                         dest='log_file_name',
01949                         help='Log events to external file (note not all console entries may be visible in log file)')
01950 
01951     parser.add_argument('--report-html',
01952                         dest='report_html_file_name',
01953                         help='You can log test suite results in form of HTML report')
01954 
01955     parser.add_argument('--report-junit',
01956                         dest='report_junit_file_name',
01957                         help='You can log test suite results in form of JUnit compliant XML report')
01958 
01959     parser.add_argument("--report-build",
01960                         dest="report_build_file_name",
01961                         help="Output the build results to a junit xml file")
01962 
01963     parser.add_argument("--report-text",
01964                         dest="report_text_file_name",
01965                         help="Output the build results to a text file")
01966 
01967     parser.add_argument('--verbose-skipped',
01968                         dest='verbose_skipped_tests',
01969                         default=False,
01970                         action="store_true",
01971                         help='Prints some extra information about skipped tests')
01972 
01973     parser.add_argument('-V', '--verbose-test-result',
01974                         dest='verbose_test_result_only',
01975                         default=False,
01976                         action="store_true",
01977                         help='Prints test serial output')
01978 
01979     parser.add_argument('-v', '--verbose',
01980                         dest='verbose',
01981                         default=False,
01982                         action="store_true",
01983                         help='Verbose mode (prints some extra information)')
01984 
01985     parser.add_argument('--version',
01986                         dest='version',
01987                         default=False,
01988                         action="store_true",
01989                         help='Prints script version and exits')
01990     return parser
01991 
01992 def test_path_to_name (path, base):
01993     """Change all slashes in a path into hyphens
01994     This creates a unique cross-platform test name based on the path
01995     This can eventually be overriden by a to-be-determined meta-data mechanism"""
01996     name_parts = []
01997     head, tail = os.path.split(relpath(path,base))
01998     while (tail and tail != "."):
01999         name_parts.insert(0, tail)
02000         head, tail = os.path.split(head)
02001 
02002     return "-".join(name_parts).lower()
02003 
02004 def find_tests (base_dir, target_name, toolchain_name, app_config=None):
02005     """ Finds all tests in a directory recursively
02006     base_dir: path to the directory to scan for tests (ex. 'path/to/project')
02007     target_name: name of the target to use for scanning (ex. 'K64F')
02008     toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
02009     options: Compile options to pass to the toolchain (ex. ['debug-info'])
02010     app_config - location of a chosen mbed_app.json file
02011     """
02012 
02013     tests = {}
02014 
02015     # Prepare the toolchain
02016     toolchain = prepare_toolchain([base_dir], target_name, toolchain_name,
02017                                   silent=True, app_config=app_config)
02018 
02019     # Scan the directory for paths to probe for 'TESTS' folders
02020     base_resources = scan_resources([base_dir], toolchain)
02021 
02022     dirs = base_resources.inc_dirs
02023     for directory in dirs:
02024         subdirs = os.listdir(directory)
02025 
02026         # If the directory contains a subdirectory called 'TESTS', scan it for test cases
02027         if 'TESTS' in subdirs:
02028             walk_base_dir = join(directory, 'TESTS')
02029             test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
02030 
02031             # Loop through all subdirectories
02032             for d in test_resources.inc_dirs:
02033 
02034                 # If the test case folder is not called 'host_tests' and it is
02035                 # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
02036                 # then add it to the tests
02037                 path_depth = get_path_depth(relpath(d, walk_base_dir))
02038                 if path_depth == 2:
02039                     test_group_directory_path, test_case_directory = os.path.split(d)
02040                     test_group_directory = os.path.basename(test_group_directory_path)
02041                     
02042                     # Check to make sure discoverd folder is not in a host test directory
02043                     if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
02044                         test_name = test_path_to_name(d, base_dir)
02045                         tests[test_name] = d
02046 
02047     return tests
02048 
02049 def print_tests (tests, format="list", sort=True):
02050     """Given a dictionary of tests (as returned from "find_tests"), print them
02051     in the specified format"""
02052     if format == "list":
02053         for test_name in sorted(tests.keys()):
02054             test_path = tests[test_name]
02055             print "Test Case:"
02056             print "    Name: %s" % test_name
02057             print "    Path: %s" % test_path
02058     elif format == "json":
02059         print json.dumps(tests, indent=2)
02060     else:
02061         print "Unknown format '%s'" % format
02062         sys.exit(1)
02063 
02064 def norm_relative_path (path, start):
02065     """This function will create a normalized, relative path. It mimics the
02066     python os.path.relpath function, but also normalizes a Windows-syle path
02067     that use backslashes to a Unix style path that uses forward slashes."""
02068     path = os.path.normpath(path)
02069     path = os.path.relpath(path, start)
02070     path = path.replace("\\", "/")
02071     return path
02072 
02073 
02074 def build_test_worker (*args, **kwargs):
02075     """This is a worker function for the parallel building of tests. The `args`
02076     and `kwargs` are passed directly to `build_project`. It returns a dictionary
02077     with the following structure:
02078 
02079     {
02080         'result': `True` if no exceptions were thrown, `False` otherwise
02081         'reason': Instance of exception that was thrown on failure
02082         'bin_file': Path to the created binary if `build_project` was
02083                     successful. Not present otherwise
02084         'kwargs': The keyword arguments that were passed to `build_project`.
02085                   This includes arguments that were modified (ex. report)
02086     }
02087     """
02088     bin_file = None
02089     ret = {
02090         'result': False,
02091         'args': args,
02092         'kwargs': kwargs
02093     }
02094 
02095     # Use parent TOOLCHAIN_PATHS variable
02096     for key, value in kwargs['toolchain_paths'].iteritems():
02097         TOOLCHAIN_PATHS[key] = value
02098 
02099     del kwargs['toolchain_paths']
02100 
02101     try:
02102         bin_file = build_project(*args, **kwargs)
02103         ret['result'] = True
02104         ret['bin_file'] = bin_file
02105         ret['kwargs'] = kwargs
02106 
02107     except NotSupportedException, e:
02108         ret['reason'] = e
02109     except ToolException, e:
02110         ret['reason'] = e
02111     except KeyboardInterrupt, e:
02112         ret['reason'] = e
02113     except:
02114         # Print unhandled exceptions here
02115         import traceback
02116         traceback.print_exc(file=sys.stdout)
02117 
02118     return ret
02119 
02120 
02121 def build_tests (tests, base_source_paths, build_path, target, toolchain_name,
02122                 clean=False, notify=None, verbose=False, jobs=1, macros=None,
02123                 silent=False, report=None, properties=None,
02124                 continue_on_build_fail=False, app_config=None,
02125                 build_profile=None):
02126     """Given the data structure from 'find_tests' and the typical build parameters,
02127     build all the tests
02128 
02129     Returns a tuple of the build result (True or False) followed by the test
02130     build data structure"""
02131 
02132     execution_directory = "."
02133     base_path = norm_relative_path(build_path, execution_directory)
02134 
02135     target_name = target if isinstance(target, str) else target.name
02136     cfg, macros, features = get_config(base_source_paths, target_name, toolchain_name)
02137 
02138     baud_rate = 9600
02139     if 'platform.stdio-baud-rate' in cfg:
02140         baud_rate = cfg['platform.stdio-baud-rate'].value
02141 
02142     test_build = {
02143         "platform": target_name,
02144         "toolchain": toolchain_name,
02145         "base_path": base_path,
02146         "baud_rate": baud_rate,
02147         "binary_type": "bootable",
02148         "tests": {}
02149     }
02150 
02151     result = True
02152 
02153     jobs_count = int(jobs if jobs else cpu_count())
02154     p = Pool(processes=jobs_count)
02155     results = []
02156     for test_name, test_path in tests.iteritems():
02157         test_build_path = os.path.join(build_path, test_path)
02158         src_path = base_source_paths + [test_path]
02159         bin_file = None
02160         test_case_folder_name = os.path.basename(test_path)
02161         
02162         args = (src_path, test_build_path, target, toolchain_name)
02163         kwargs = {
02164             'jobs': 1,
02165             'clean': clean,
02166             'macros': macros,
02167             'name': test_case_folder_name,
02168             'project_id': test_name,
02169             'report': report,
02170             'properties': properties,
02171             'verbose': verbose,
02172             'app_config': app_config,
02173             'build_profile': build_profile,
02174             'silent': True,
02175             'toolchain_paths': TOOLCHAIN_PATHS
02176         }
02177         
02178         results.append(p.apply_async(build_test_worker, args, kwargs))
02179 
02180     p.close()
02181     result = True
02182     itr = 0
02183     while len(results):
02184         itr += 1
02185         if itr > 360000:
02186             p.terminate()
02187             p.join()
02188             raise ToolException("Compile did not finish in 10 minutes")
02189         else:
02190             sleep(0.01)
02191             pending = 0
02192             for r in results:
02193                 if r.ready() is True:
02194                     try:
02195                         worker_result = r.get()
02196                         results.remove(r)
02197 
02198                         # Take report from the kwargs and merge it into existing report
02199                         report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
02200                         for test_key in report_entry.keys():
02201                             report[target_name][toolchain_name][test_key] = report_entry[test_key]
02202                         
02203                         # Set the overall result to a failure if a build failure occurred
02204                         if not worker_result['result'] and not isinstance(worker_result['reason'], NotSupportedException):
02205                             result = False
02206                             break
02207 
02208                         # Adding binary path to test build result
02209                         if worker_result['result'] and 'bin_file' in worker_result:
02210                             bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
02211 
02212                             test_build['tests'][worker_result['kwargs']['project_id']] = {
02213                                 "binaries": [
02214                                     {
02215                                         "path": bin_file
02216                                     }
02217                                 ]
02218                             }
02219 
02220                             test_key = worker_result['kwargs']['project_id'].upper()
02221                             print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip()
02222                             print 'Image: %s\n' % bin_file
02223 
02224                     except:
02225                         if p._taskqueue.queue:
02226                             p._taskqueue.queue.clear()
02227                             sleep(0.5)
02228                         p.terminate()
02229                         p.join()
02230                         raise
02231                 else:
02232                     pending += 1
02233                     if pending >= jobs_count:
02234                         break
02235 
02236             # Break as soon as possible if there is a failure and we are not
02237             # continuing on build failures
02238             if not result and not continue_on_build_fail:
02239                 if p._taskqueue.queue:
02240                     p._taskqueue.queue.clear()
02241                     sleep(0.5)
02242                 p.terminate()
02243                 break
02244 
02245     p.join()
02246 
02247     test_builds = {}
02248     test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
02249 
02250     return result, test_builds
02251 
02252 
02253 def test_spec_from_test_builds(test_builds):
02254     return {
02255         "builds": test_builds
02256     }