mbed os with nrf51 internal bandgap enabled to read battery level
Dependents: BLE_file_test BLE_Blink ExternalEncoder
tools/test_api.py@0:f269e3021894, 2016-10-23 (annotated)
- Committer:
- elessair
- Date:
- Sun Oct 23 15:10:02 2016 +0000
- Revision:
- 0:f269e3021894
Initial commit
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
elessair | 0:f269e3021894 | 1 | """ |
elessair | 0:f269e3021894 | 2 | mbed SDK |
elessair | 0:f269e3021894 | 3 | Copyright (c) 2011-2014 ARM Limited |
elessair | 0:f269e3021894 | 4 | |
elessair | 0:f269e3021894 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); |
elessair | 0:f269e3021894 | 6 | you may not use this file except in compliance with the License. |
elessair | 0:f269e3021894 | 7 | You may obtain a copy of the License at |
elessair | 0:f269e3021894 | 8 | |
elessair | 0:f269e3021894 | 9 | http://www.apache.org/licenses/LICENSE-2.0 |
elessair | 0:f269e3021894 | 10 | |
elessair | 0:f269e3021894 | 11 | Unless required by applicable law or agreed to in writing, software |
elessair | 0:f269e3021894 | 12 | distributed under the License is distributed on an "AS IS" BASIS, |
elessair | 0:f269e3021894 | 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
elessair | 0:f269e3021894 | 14 | See the License for the specific language governing permissions and |
elessair | 0:f269e3021894 | 15 | limitations under the License. |
elessair | 0:f269e3021894 | 16 | |
elessair | 0:f269e3021894 | 17 | Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com> |
elessair | 0:f269e3021894 | 18 | """ |
elessair | 0:f269e3021894 | 19 | |
elessair | 0:f269e3021894 | 20 | import os |
elessair | 0:f269e3021894 | 21 | import re |
elessair | 0:f269e3021894 | 22 | import sys |
elessair | 0:f269e3021894 | 23 | import json |
elessair | 0:f269e3021894 | 24 | import uuid |
elessair | 0:f269e3021894 | 25 | import pprint |
elessair | 0:f269e3021894 | 26 | import random |
elessair | 0:f269e3021894 | 27 | import argparse |
elessair | 0:f269e3021894 | 28 | import datetime |
elessair | 0:f269e3021894 | 29 | import threading |
elessair | 0:f269e3021894 | 30 | import ctypes |
elessair | 0:f269e3021894 | 31 | from types import ListType |
elessair | 0:f269e3021894 | 32 | from colorama import Fore, Back, Style |
elessair | 0:f269e3021894 | 33 | from prettytable import PrettyTable |
elessair | 0:f269e3021894 | 34 | from copy import copy |
elessair | 0:f269e3021894 | 35 | |
elessair | 0:f269e3021894 | 36 | from time import sleep, time |
elessair | 0:f269e3021894 | 37 | from Queue import Queue, Empty |
elessair | 0:f269e3021894 | 38 | from os.path import join, exists, basename, relpath |
elessair | 0:f269e3021894 | 39 | from threading import Thread, Lock |
elessair | 0:f269e3021894 | 40 | from multiprocessing import Pool, cpu_count |
elessair | 0:f269e3021894 | 41 | from subprocess import Popen, PIPE |
elessair | 0:f269e3021894 | 42 | |
elessair | 0:f269e3021894 | 43 | # Imports related to mbed build api |
elessair | 0:f269e3021894 | 44 | from tools.tests import TESTS |
elessair | 0:f269e3021894 | 45 | from tools.tests import TEST_MAP |
elessair | 0:f269e3021894 | 46 | from tools.paths import BUILD_DIR |
elessair | 0:f269e3021894 | 47 | from tools.paths import HOST_TESTS |
elessair | 0:f269e3021894 | 48 | from tools.utils import ToolException |
elessair | 0:f269e3021894 | 49 | from tools.utils import NotSupportedException |
elessair | 0:f269e3021894 | 50 | from tools.utils import construct_enum |
elessair | 0:f269e3021894 | 51 | from tools.memap import MemapParser |
elessair | 0:f269e3021894 | 52 | from tools.targets import TARGET_MAP |
elessair | 0:f269e3021894 | 53 | from tools.test_db import BaseDBAccess |
elessair | 0:f269e3021894 | 54 | from tools.build_api import build_project, build_mbed_libs, build_lib |
elessair | 0:f269e3021894 | 55 | from tools.build_api import get_target_supported_toolchains |
elessair | 0:f269e3021894 | 56 | from tools.build_api import write_build_report |
elessair | 0:f269e3021894 | 57 | from tools.build_api import prep_report |
elessair | 0:f269e3021894 | 58 | from tools.build_api import prep_properties |
elessair | 0:f269e3021894 | 59 | from tools.build_api import create_result |
elessair | 0:f269e3021894 | 60 | from tools.build_api import add_result_to_report |
elessair | 0:f269e3021894 | 61 | from tools.build_api import prepare_toolchain |
elessair | 0:f269e3021894 | 62 | from tools.build_api import scan_resources |
elessair | 0:f269e3021894 | 63 | from tools.libraries import LIBRARIES, LIBRARY_MAP |
elessair | 0:f269e3021894 | 64 | from tools.options import extract_profile |
elessair | 0:f269e3021894 | 65 | from tools.toolchains import TOOLCHAIN_PATHS |
elessair | 0:f269e3021894 | 66 | from tools.toolchains import TOOLCHAINS |
elessair | 0:f269e3021894 | 67 | from tools.test_exporters import ReportExporter, ResultExporterType |
elessair | 0:f269e3021894 | 68 | from tools.utils import argparse_filestring_type |
elessair | 0:f269e3021894 | 69 | from tools.utils import argparse_uppercase_type |
elessair | 0:f269e3021894 | 70 | from tools.utils import argparse_lowercase_type |
elessair | 0:f269e3021894 | 71 | from tools.utils import argparse_many |
elessair | 0:f269e3021894 | 72 | from tools.utils import get_path_depth |
elessair | 0:f269e3021894 | 73 | |
elessair | 0:f269e3021894 | 74 | import tools.host_tests.host_tests_plugins as host_tests_plugins |
elessair | 0:f269e3021894 | 75 | |
elessair | 0:f269e3021894 | 76 | try: |
elessair | 0:f269e3021894 | 77 | import mbed_lstools |
elessair | 0:f269e3021894 | 78 | from tools.compliance.ioper_runner import get_available_oper_test_scopes |
elessair | 0:f269e3021894 | 79 | except: |
elessair | 0:f269e3021894 | 80 | pass |
elessair | 0:f269e3021894 | 81 | |
elessair | 0:f269e3021894 | 82 | |
elessair | 0:f269e3021894 | 83 | class ProcessObserver(Thread): |
elessair | 0:f269e3021894 | 84 | def __init__(self, proc): |
elessair | 0:f269e3021894 | 85 | Thread.__init__(self) |
elessair | 0:f269e3021894 | 86 | self.proc = proc |
elessair | 0:f269e3021894 | 87 | self.queue = Queue() |
elessair | 0:f269e3021894 | 88 | self.daemon = True |
elessair | 0:f269e3021894 | 89 | self.active = True |
elessair | 0:f269e3021894 | 90 | self.start() |
elessair | 0:f269e3021894 | 91 | |
elessair | 0:f269e3021894 | 92 | def run(self): |
elessair | 0:f269e3021894 | 93 | while self.active: |
elessair | 0:f269e3021894 | 94 | c = self.proc.stdout.read(1) |
elessair | 0:f269e3021894 | 95 | self.queue.put(c) |
elessair | 0:f269e3021894 | 96 | |
elessair | 0:f269e3021894 | 97 | def stop(self): |
elessair | 0:f269e3021894 | 98 | self.active = False |
elessair | 0:f269e3021894 | 99 | try: |
elessair | 0:f269e3021894 | 100 | self.proc.terminate() |
elessair | 0:f269e3021894 | 101 | except Exception, _: |
elessair | 0:f269e3021894 | 102 | pass |
elessair | 0:f269e3021894 | 103 | |
elessair | 0:f269e3021894 | 104 | |
elessair | 0:f269e3021894 | 105 | class SingleTestExecutor(threading.Thread): |
elessair | 0:f269e3021894 | 106 | """ Example: Single test class in separate thread usage |
elessair | 0:f269e3021894 | 107 | """ |
elessair | 0:f269e3021894 | 108 | def __init__(self, single_test): |
elessair | 0:f269e3021894 | 109 | self.single_test = single_test |
elessair | 0:f269e3021894 | 110 | threading.Thread.__init__(self) |
elessair | 0:f269e3021894 | 111 | |
elessair | 0:f269e3021894 | 112 | def run(self): |
elessair | 0:f269e3021894 | 113 | start = time() |
elessair | 0:f269e3021894 | 114 | # Execute tests depending on options and filter applied |
elessair | 0:f269e3021894 | 115 | test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute() |
elessair | 0:f269e3021894 | 116 | elapsed_time = time() - start |
elessair | 0:f269e3021894 | 117 | |
elessair | 0:f269e3021894 | 118 | # Human readable summary |
elessair | 0:f269e3021894 | 119 | if not self.single_test.opts_suppress_summary: |
elessair | 0:f269e3021894 | 120 | # prints well-formed summary with results (SQL table like) |
elessair | 0:f269e3021894 | 121 | print self.single_test.generate_test_summary(test_summary, shuffle_seed) |
elessair | 0:f269e3021894 | 122 | if self.single_test.opts_test_x_toolchain_summary: |
elessair | 0:f269e3021894 | 123 | # prints well-formed summary with results (SQL table like) |
elessair | 0:f269e3021894 | 124 | # table shows text x toolchain test result matrix |
elessair | 0:f269e3021894 | 125 | print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed) |
elessair | 0:f269e3021894 | 126 | print "Completed in %.2f sec"% (elapsed_time) |
elessair | 0:f269e3021894 | 127 | |
elessair | 0:f269e3021894 | 128 | |
elessair | 0:f269e3021894 | 129 | class SingleTestRunner(object): |
elessair | 0:f269e3021894 | 130 | """ Object wrapper for single test run which may involve multiple MUTs |
elessair | 0:f269e3021894 | 131 | """ |
elessair | 0:f269e3021894 | 132 | RE_DETECT_TESTCASE_RESULT = None |
elessair | 0:f269e3021894 | 133 | |
elessair | 0:f269e3021894 | 134 | # Return codes for test script |
elessair | 0:f269e3021894 | 135 | TEST_RESULT_OK = "OK" |
elessair | 0:f269e3021894 | 136 | TEST_RESULT_FAIL = "FAIL" |
elessair | 0:f269e3021894 | 137 | TEST_RESULT_ERROR = "ERROR" |
elessair | 0:f269e3021894 | 138 | TEST_RESULT_UNDEF = "UNDEF" |
elessair | 0:f269e3021894 | 139 | TEST_RESULT_IOERR_COPY = "IOERR_COPY" |
elessair | 0:f269e3021894 | 140 | TEST_RESULT_IOERR_DISK = "IOERR_DISK" |
elessair | 0:f269e3021894 | 141 | TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL" |
elessair | 0:f269e3021894 | 142 | TEST_RESULT_TIMEOUT = "TIMEOUT" |
elessair | 0:f269e3021894 | 143 | TEST_RESULT_NO_IMAGE = "NO_IMAGE" |
elessair | 0:f269e3021894 | 144 | TEST_RESULT_MBED_ASSERT = "MBED_ASSERT" |
elessair | 0:f269e3021894 | 145 | TEST_RESULT_BUILD_FAILED = "BUILD_FAILED" |
elessair | 0:f269e3021894 | 146 | TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED" |
elessair | 0:f269e3021894 | 147 | |
elessair | 0:f269e3021894 | 148 | GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated |
elessair | 0:f269e3021894 | 149 | TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id |
elessair | 0:f269e3021894 | 150 | TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count} |
elessair | 0:f269e3021894 | 151 | |
elessair | 0:f269e3021894 | 152 | muts = {} # MUTs descriptor (from external file) |
elessair | 0:f269e3021894 | 153 | test_spec = {} # Test specification (from external file) |
elessair | 0:f269e3021894 | 154 | |
elessair | 0:f269e3021894 | 155 | # mbed test suite -> SingleTestRunner |
elessair | 0:f269e3021894 | 156 | TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK, |
elessair | 0:f269e3021894 | 157 | "failure" : TEST_RESULT_FAIL, |
elessair | 0:f269e3021894 | 158 | "error" : TEST_RESULT_ERROR, |
elessair | 0:f269e3021894 | 159 | "ioerr_copy" : TEST_RESULT_IOERR_COPY, |
elessair | 0:f269e3021894 | 160 | "ioerr_disk" : TEST_RESULT_IOERR_DISK, |
elessair | 0:f269e3021894 | 161 | "ioerr_serial" : TEST_RESULT_IOERR_SERIAL, |
elessair | 0:f269e3021894 | 162 | "timeout" : TEST_RESULT_TIMEOUT, |
elessair | 0:f269e3021894 | 163 | "no_image" : TEST_RESULT_NO_IMAGE, |
elessair | 0:f269e3021894 | 164 | "end" : TEST_RESULT_UNDEF, |
elessair | 0:f269e3021894 | 165 | "mbed_assert" : TEST_RESULT_MBED_ASSERT, |
elessair | 0:f269e3021894 | 166 | "build_failed" : TEST_RESULT_BUILD_FAILED, |
elessair | 0:f269e3021894 | 167 | "not_supproted" : TEST_RESULT_NOT_SUPPORTED |
elessair | 0:f269e3021894 | 168 | } |
elessair | 0:f269e3021894 | 169 | |
elessair | 0:f269e3021894 | 170 | def __init__(self, |
elessair | 0:f269e3021894 | 171 | _global_loops_count=1, |
elessair | 0:f269e3021894 | 172 | _test_loops_list=None, |
elessair | 0:f269e3021894 | 173 | _muts={}, |
elessair | 0:f269e3021894 | 174 | _clean=False, |
elessair | 0:f269e3021894 | 175 | _parser=None, |
elessair | 0:f269e3021894 | 176 | _opts=None, |
elessair | 0:f269e3021894 | 177 | _opts_db_url=None, |
elessair | 0:f269e3021894 | 178 | _opts_log_file_name=None, |
elessair | 0:f269e3021894 | 179 | _opts_report_html_file_name=None, |
elessair | 0:f269e3021894 | 180 | _opts_report_junit_file_name=None, |
elessair | 0:f269e3021894 | 181 | _opts_report_build_file_name=None, |
elessair | 0:f269e3021894 | 182 | _opts_report_text_file_name=None, |
elessair | 0:f269e3021894 | 183 | _opts_build_report={}, |
elessair | 0:f269e3021894 | 184 | _opts_build_properties={}, |
elessair | 0:f269e3021894 | 185 | _test_spec={}, |
elessair | 0:f269e3021894 | 186 | _opts_goanna_for_mbed_sdk=None, |
elessair | 0:f269e3021894 | 187 | _opts_goanna_for_tests=None, |
elessair | 0:f269e3021894 | 188 | _opts_shuffle_test_order=False, |
elessair | 0:f269e3021894 | 189 | _opts_shuffle_test_seed=None, |
elessair | 0:f269e3021894 | 190 | _opts_test_by_names=None, |
elessair | 0:f269e3021894 | 191 | _opts_peripheral_by_names=None, |
elessair | 0:f269e3021894 | 192 | _opts_test_only_peripheral=False, |
elessair | 0:f269e3021894 | 193 | _opts_test_only_common=False, |
elessair | 0:f269e3021894 | 194 | _opts_verbose_skipped_tests=False, |
elessair | 0:f269e3021894 | 195 | _opts_verbose_test_result_only=False, |
elessair | 0:f269e3021894 | 196 | _opts_verbose=False, |
elessair | 0:f269e3021894 | 197 | _opts_firmware_global_name=None, |
elessair | 0:f269e3021894 | 198 | _opts_only_build_tests=False, |
elessair | 0:f269e3021894 | 199 | _opts_parallel_test_exec=False, |
elessair | 0:f269e3021894 | 200 | _opts_suppress_summary=False, |
elessair | 0:f269e3021894 | 201 | _opts_test_x_toolchain_summary=False, |
elessair | 0:f269e3021894 | 202 | _opts_copy_method=None, |
elessair | 0:f269e3021894 | 203 | _opts_mut_reset_type=None, |
elessair | 0:f269e3021894 | 204 | _opts_jobs=None, |
elessair | 0:f269e3021894 | 205 | _opts_waterfall_test=None, |
elessair | 0:f269e3021894 | 206 | _opts_consolidate_waterfall_test=None, |
elessair | 0:f269e3021894 | 207 | _opts_extend_test_timeout=None, |
elessair | 0:f269e3021894 | 208 | _opts_auto_detect=None, |
elessair | 0:f269e3021894 | 209 | _opts_include_non_automated=False): |
elessair | 0:f269e3021894 | 210 | """ Let's try hard to init this object |
elessair | 0:f269e3021894 | 211 | """ |
elessair | 0:f269e3021894 | 212 | from colorama import init |
elessair | 0:f269e3021894 | 213 | init() |
elessair | 0:f269e3021894 | 214 | |
elessair | 0:f269e3021894 | 215 | PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}" |
elessair | 0:f269e3021894 | 216 | self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN) |
elessair | 0:f269e3021894 | 217 | # Settings related to test loops counters |
elessair | 0:f269e3021894 | 218 | try: |
elessair | 0:f269e3021894 | 219 | _global_loops_count = int(_global_loops_count) |
elessair | 0:f269e3021894 | 220 | except: |
elessair | 0:f269e3021894 | 221 | _global_loops_count = 1 |
elessair | 0:f269e3021894 | 222 | if _global_loops_count < 1: |
elessair | 0:f269e3021894 | 223 | _global_loops_count = 1 |
elessair | 0:f269e3021894 | 224 | self.GLOBAL_LOOPS_COUNT = _global_loops_count |
elessair | 0:f269e3021894 | 225 | self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else [] |
elessair | 0:f269e3021894 | 226 | self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list) |
elessair | 0:f269e3021894 | 227 | |
elessair | 0:f269e3021894 | 228 | self.shuffle_random_seed = 0.0 |
elessair | 0:f269e3021894 | 229 | self.SHUFFLE_SEED_ROUND = 10 |
elessair | 0:f269e3021894 | 230 | |
elessair | 0:f269e3021894 | 231 | # MUT list and test specification storage |
elessair | 0:f269e3021894 | 232 | self.muts = _muts |
elessair | 0:f269e3021894 | 233 | self.test_spec = _test_spec |
elessair | 0:f269e3021894 | 234 | |
elessair | 0:f269e3021894 | 235 | # Settings passed e.g. from command line |
elessair | 0:f269e3021894 | 236 | self.opts_db_url = _opts_db_url |
elessair | 0:f269e3021894 | 237 | self.opts_log_file_name = _opts_log_file_name |
elessair | 0:f269e3021894 | 238 | self.opts_report_html_file_name = _opts_report_html_file_name |
elessair | 0:f269e3021894 | 239 | self.opts_report_junit_file_name = _opts_report_junit_file_name |
elessair | 0:f269e3021894 | 240 | self.opts_report_build_file_name = _opts_report_build_file_name |
elessair | 0:f269e3021894 | 241 | self.opts_report_text_file_name = _opts_report_text_file_name |
elessair | 0:f269e3021894 | 242 | self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk |
elessair | 0:f269e3021894 | 243 | self.opts_goanna_for_tests = _opts_goanna_for_tests |
elessair | 0:f269e3021894 | 244 | self.opts_shuffle_test_order = _opts_shuffle_test_order |
elessair | 0:f269e3021894 | 245 | self.opts_shuffle_test_seed = _opts_shuffle_test_seed |
elessair | 0:f269e3021894 | 246 | self.opts_test_by_names = _opts_test_by_names |
elessair | 0:f269e3021894 | 247 | self.opts_peripheral_by_names = _opts_peripheral_by_names |
elessair | 0:f269e3021894 | 248 | self.opts_test_only_peripheral = _opts_test_only_peripheral |
elessair | 0:f269e3021894 | 249 | self.opts_test_only_common = _opts_test_only_common |
elessair | 0:f269e3021894 | 250 | self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests |
elessair | 0:f269e3021894 | 251 | self.opts_verbose_test_result_only = _opts_verbose_test_result_only |
elessair | 0:f269e3021894 | 252 | self.opts_verbose = _opts_verbose |
elessair | 0:f269e3021894 | 253 | self.opts_firmware_global_name = _opts_firmware_global_name |
elessair | 0:f269e3021894 | 254 | self.opts_only_build_tests = _opts_only_build_tests |
elessair | 0:f269e3021894 | 255 | self.opts_parallel_test_exec = _opts_parallel_test_exec |
elessair | 0:f269e3021894 | 256 | self.opts_suppress_summary = _opts_suppress_summary |
elessair | 0:f269e3021894 | 257 | self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary |
elessair | 0:f269e3021894 | 258 | self.opts_copy_method = _opts_copy_method |
elessair | 0:f269e3021894 | 259 | self.opts_mut_reset_type = _opts_mut_reset_type |
elessair | 0:f269e3021894 | 260 | self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1 |
elessair | 0:f269e3021894 | 261 | self.opts_waterfall_test = _opts_waterfall_test |
elessair | 0:f269e3021894 | 262 | self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test |
elessair | 0:f269e3021894 | 263 | self.opts_extend_test_timeout = _opts_extend_test_timeout |
elessair | 0:f269e3021894 | 264 | self.opts_clean = _clean |
elessair | 0:f269e3021894 | 265 | self.opts_parser = _parser |
elessair | 0:f269e3021894 | 266 | self.opts = _opts |
elessair | 0:f269e3021894 | 267 | self.opts_auto_detect = _opts_auto_detect |
elessair | 0:f269e3021894 | 268 | self.opts_include_non_automated = _opts_include_non_automated |
elessair | 0:f269e3021894 | 269 | |
elessair | 0:f269e3021894 | 270 | self.build_report = _opts_build_report |
elessair | 0:f269e3021894 | 271 | self.build_properties = _opts_build_properties |
elessair | 0:f269e3021894 | 272 | |
elessair | 0:f269e3021894 | 273 | # File / screen logger initialization |
elessair | 0:f269e3021894 | 274 | self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger |
elessair | 0:f269e3021894 | 275 | |
elessair | 0:f269e3021894 | 276 | # Database related initializations |
elessair | 0:f269e3021894 | 277 | self.db_logger = factory_db_logger(self.opts_db_url) |
elessair | 0:f269e3021894 | 278 | self.db_logger_build_id = None # Build ID (database index of build_id table) |
elessair | 0:f269e3021894 | 279 | # Let's connect to database to set up credentials and confirm database is ready |
elessair | 0:f269e3021894 | 280 | if self.db_logger: |
elessair | 0:f269e3021894 | 281 | self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object |
elessair | 0:f269e3021894 | 282 | if self.db_logger.is_connected(): |
elessair | 0:f269e3021894 | 283 | # Get hostname and uname so we can use it as build description |
elessair | 0:f269e3021894 | 284 | # when creating new build_id in external database |
elessair | 0:f269e3021894 | 285 | (_hostname, _uname) = self.db_logger.get_hostname() |
elessair | 0:f269e3021894 | 286 | _host_location = os.path.dirname(os.path.abspath(__file__)) |
elessair | 0:f269e3021894 | 287 | build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY |
elessair | 0:f269e3021894 | 288 | self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type) |
elessair | 0:f269e3021894 | 289 | self.db_logger.disconnect() |
elessair | 0:f269e3021894 | 290 | |
elessair | 0:f269e3021894 | 291 | def dump_options(self): |
elessair | 0:f269e3021894 | 292 | """ Function returns data structure with common settings passed to SingelTestRunner |
elessair | 0:f269e3021894 | 293 | It can be used for example to fill _extra fields in database storing test suite single run data |
elessair | 0:f269e3021894 | 294 | Example: |
elessair | 0:f269e3021894 | 295 | data = self.dump_options() |
elessair | 0:f269e3021894 | 296 | or |
elessair | 0:f269e3021894 | 297 | data_str = json.dumps(self.dump_options()) |
elessair | 0:f269e3021894 | 298 | """ |
elessair | 0:f269e3021894 | 299 | result = {"db_url" : str(self.opts_db_url), |
elessair | 0:f269e3021894 | 300 | "log_file_name" : str(self.opts_log_file_name), |
elessair | 0:f269e3021894 | 301 | "shuffle_test_order" : str(self.opts_shuffle_test_order), |
elessair | 0:f269e3021894 | 302 | "shuffle_test_seed" : str(self.opts_shuffle_test_seed), |
elessair | 0:f269e3021894 | 303 | "test_by_names" : str(self.opts_test_by_names), |
elessair | 0:f269e3021894 | 304 | "peripheral_by_names" : str(self.opts_peripheral_by_names), |
elessair | 0:f269e3021894 | 305 | "test_only_peripheral" : str(self.opts_test_only_peripheral), |
elessair | 0:f269e3021894 | 306 | "test_only_common" : str(self.opts_test_only_common), |
elessair | 0:f269e3021894 | 307 | "verbose" : str(self.opts_verbose), |
elessair | 0:f269e3021894 | 308 | "firmware_global_name" : str(self.opts_firmware_global_name), |
elessair | 0:f269e3021894 | 309 | "only_build_tests" : str(self.opts_only_build_tests), |
elessair | 0:f269e3021894 | 310 | "copy_method" : str(self.opts_copy_method), |
elessair | 0:f269e3021894 | 311 | "mut_reset_type" : str(self.opts_mut_reset_type), |
elessair | 0:f269e3021894 | 312 | "jobs" : str(self.opts_jobs), |
elessair | 0:f269e3021894 | 313 | "extend_test_timeout" : str(self.opts_extend_test_timeout), |
elessair | 0:f269e3021894 | 314 | "_dummy" : '' |
elessair | 0:f269e3021894 | 315 | } |
elessair | 0:f269e3021894 | 316 | return result |
elessair | 0:f269e3021894 | 317 | |
elessair | 0:f269e3021894 | 318 | def shuffle_random_func(self): |
elessair | 0:f269e3021894 | 319 | return self.shuffle_random_seed |
elessair | 0:f269e3021894 | 320 | |
elessair | 0:f269e3021894 | 321 | def is_shuffle_seed_float(self): |
elessair | 0:f269e3021894 | 322 | """ return true if function parameter can be converted to float |
elessair | 0:f269e3021894 | 323 | """ |
elessair | 0:f269e3021894 | 324 | result = True |
elessair | 0:f269e3021894 | 325 | try: |
elessair | 0:f269e3021894 | 326 | float(self.shuffle_random_seed) |
elessair | 0:f269e3021894 | 327 | except ValueError: |
elessair | 0:f269e3021894 | 328 | result = False |
elessair | 0:f269e3021894 | 329 | return result |
elessair | 0:f269e3021894 | 330 | |
elessair | 0:f269e3021894 | 331 | # This will store target / toolchain specific properties |
elessair | 0:f269e3021894 | 332 | test_suite_properties_ext = {} # target : toolchain |
elessair | 0:f269e3021894 | 333 | # Here we store test results |
elessair | 0:f269e3021894 | 334 | test_summary = [] |
elessair | 0:f269e3021894 | 335 | # Here we store test results in extended data structure |
elessair | 0:f269e3021894 | 336 | test_summary_ext = {} |
elessair | 0:f269e3021894 | 337 | execute_thread_slice_lock = Lock() |
elessair | 0:f269e3021894 | 338 | |
elessair | 0:f269e3021894 | 339 | def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties): |
elessair | 0:f269e3021894 | 340 | for toolchain in toolchains: |
elessair | 0:f269e3021894 | 341 | tt_id = "%s::%s" % (toolchain, target) |
elessair | 0:f269e3021894 | 342 | |
elessair | 0:f269e3021894 | 343 | T = TARGET_MAP[target] |
elessair | 0:f269e3021894 | 344 | |
elessair | 0:f269e3021894 | 345 | # print target, toolchain |
elessair | 0:f269e3021894 | 346 | # Test suite properties returned to external tools like CI |
elessair | 0:f269e3021894 | 347 | test_suite_properties = { |
elessair | 0:f269e3021894 | 348 | 'jobs': self.opts_jobs, |
elessair | 0:f269e3021894 | 349 | 'clean': clean, |
elessair | 0:f269e3021894 | 350 | 'target': target, |
elessair | 0:f269e3021894 | 351 | 'vendor': T.extra_labels[0], |
elessair | 0:f269e3021894 | 352 | 'test_ids': ', '.join(test_ids), |
elessair | 0:f269e3021894 | 353 | 'toolchain': toolchain, |
elessair | 0:f269e3021894 | 354 | 'shuffle_random_seed': self.shuffle_random_seed |
elessair | 0:f269e3021894 | 355 | } |
elessair | 0:f269e3021894 | 356 | |
elessair | 0:f269e3021894 | 357 | |
elessair | 0:f269e3021894 | 358 | # print '=== %s::%s ===' % (target, toolchain) |
elessair | 0:f269e3021894 | 359 | # Let's build our test |
elessair | 0:f269e3021894 | 360 | if target not in TARGET_MAP: |
elessair | 0:f269e3021894 | 361 | print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target)) |
elessair | 0:f269e3021894 | 362 | continue |
elessair | 0:f269e3021894 | 363 | |
elessair | 0:f269e3021894 | 364 | clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None |
elessair | 0:f269e3021894 | 365 | |
elessair | 0:f269e3021894 | 366 | profile = extract_profile(self.opts_parser, self.opts, toolchain) |
elessair | 0:f269e3021894 | 367 | |
elessair | 0:f269e3021894 | 368 | |
elessair | 0:f269e3021894 | 369 | try: |
elessair | 0:f269e3021894 | 370 | build_mbed_libs_result = build_mbed_libs(T, |
elessair | 0:f269e3021894 | 371 | toolchain, |
elessair | 0:f269e3021894 | 372 | clean=clean_mbed_libs_options, |
elessair | 0:f269e3021894 | 373 | verbose=self.opts_verbose, |
elessair | 0:f269e3021894 | 374 | jobs=self.opts_jobs, |
elessair | 0:f269e3021894 | 375 | report=build_report, |
elessair | 0:f269e3021894 | 376 | properties=build_properties, |
elessair | 0:f269e3021894 | 377 | build_profile=profile) |
elessair | 0:f269e3021894 | 378 | |
elessair | 0:f269e3021894 | 379 | if not build_mbed_libs_result: |
elessair | 0:f269e3021894 | 380 | print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain)) |
elessair | 0:f269e3021894 | 381 | continue |
elessair | 0:f269e3021894 | 382 | |
elessair | 0:f269e3021894 | 383 | except ToolException: |
elessair | 0:f269e3021894 | 384 | print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain)) |
elessair | 0:f269e3021894 | 385 | continue |
elessair | 0:f269e3021894 | 386 | |
elessair | 0:f269e3021894 | 387 | build_dir = join(BUILD_DIR, "test", target, toolchain) |
elessair | 0:f269e3021894 | 388 | |
elessair | 0:f269e3021894 | 389 | test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result |
elessair | 0:f269e3021894 | 390 | test_suite_properties['build_dir'] = build_dir |
elessair | 0:f269e3021894 | 391 | test_suite_properties['skipped'] = [] |
elessair | 0:f269e3021894 | 392 | |
elessair | 0:f269e3021894 | 393 | # Enumerate through all tests and shuffle test order if requested |
elessair | 0:f269e3021894 | 394 | test_map_keys = sorted(TEST_MAP.keys()) |
elessair | 0:f269e3021894 | 395 | |
elessair | 0:f269e3021894 | 396 | if self.opts_shuffle_test_order: |
elessair | 0:f269e3021894 | 397 | random.shuffle(test_map_keys, self.shuffle_random_func) |
elessair | 0:f269e3021894 | 398 | # Update database with shuffle seed f applicable |
elessair | 0:f269e3021894 | 399 | if self.db_logger: |
elessair | 0:f269e3021894 | 400 | self.db_logger.reconnect(); |
elessair | 0:f269e3021894 | 401 | if self.db_logger.is_connected(): |
elessair | 0:f269e3021894 | 402 | self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func()) |
elessair | 0:f269e3021894 | 403 | self.db_logger.disconnect(); |
elessair | 0:f269e3021894 | 404 | |
elessair | 0:f269e3021894 | 405 | if self.db_logger: |
elessair | 0:f269e3021894 | 406 | self.db_logger.reconnect(); |
elessair | 0:f269e3021894 | 407 | if self.db_logger.is_connected(): |
elessair | 0:f269e3021894 | 408 | # Update MUTs and Test Specification in database |
elessair | 0:f269e3021894 | 409 | self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec) |
elessair | 0:f269e3021894 | 410 | # Update Extra information in database (some options passed to test suite) |
elessair | 0:f269e3021894 | 411 | self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options())) |
elessair | 0:f269e3021894 | 412 | self.db_logger.disconnect(); |
elessair | 0:f269e3021894 | 413 | |
elessair | 0:f269e3021894 | 414 | valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated) |
elessair | 0:f269e3021894 | 415 | skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys) |
elessair | 0:f269e3021894 | 416 | |
elessair | 0:f269e3021894 | 417 | for skipped_test_id in skipped_test_map_keys: |
elessair | 0:f269e3021894 | 418 | test_suite_properties['skipped'].append(skipped_test_id) |
elessair | 0:f269e3021894 | 419 | |
elessair | 0:f269e3021894 | 420 | |
elessair | 0:f269e3021894 | 421 | # First pass through all tests and determine which libraries need to be built |
elessair | 0:f269e3021894 | 422 | libraries = [] |
elessair | 0:f269e3021894 | 423 | for test_id in valid_test_map_keys: |
elessair | 0:f269e3021894 | 424 | test = TEST_MAP[test_id] |
elessair | 0:f269e3021894 | 425 | |
elessair | 0:f269e3021894 | 426 | # Detect which lib should be added to test |
elessair | 0:f269e3021894 | 427 | # Some libs have to compiled like RTOS or ETH |
elessair | 0:f269e3021894 | 428 | for lib in LIBRARIES: |
elessair | 0:f269e3021894 | 429 | if lib['build_dir'] in test.dependencies and lib['id'] not in libraries: |
elessair | 0:f269e3021894 | 430 | libraries.append(lib['id']) |
elessair | 0:f269e3021894 | 431 | |
elessair | 0:f269e3021894 | 432 | |
elessair | 0:f269e3021894 | 433 | clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None |
elessair | 0:f269e3021894 | 434 | |
elessair | 0:f269e3021894 | 435 | # Build all required libraries |
elessair | 0:f269e3021894 | 436 | for lib_id in libraries: |
elessair | 0:f269e3021894 | 437 | try: |
elessair | 0:f269e3021894 | 438 | build_lib(lib_id, |
elessair | 0:f269e3021894 | 439 | T, |
elessair | 0:f269e3021894 | 440 | toolchain, |
elessair | 0:f269e3021894 | 441 | verbose=self.opts_verbose, |
elessair | 0:f269e3021894 | 442 | clean=clean_mbed_libs_options, |
elessair | 0:f269e3021894 | 443 | jobs=self.opts_jobs, |
elessair | 0:f269e3021894 | 444 | report=build_report, |
elessair | 0:f269e3021894 | 445 | properties=build_properties, |
elessair | 0:f269e3021894 | 446 | build_profile=profile) |
elessair | 0:f269e3021894 | 447 | |
elessair | 0:f269e3021894 | 448 | except ToolException: |
elessair | 0:f269e3021894 | 449 | print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id)) |
elessair | 0:f269e3021894 | 450 | continue |
elessair | 0:f269e3021894 | 451 | |
elessair | 0:f269e3021894 | 452 | |
elessair | 0:f269e3021894 | 453 | for test_id in valid_test_map_keys: |
elessair | 0:f269e3021894 | 454 | test = TEST_MAP[test_id] |
elessair | 0:f269e3021894 | 455 | |
elessair | 0:f269e3021894 | 456 | test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries) |
elessair | 0:f269e3021894 | 457 | |
elessair | 0:f269e3021894 | 458 | # TODO: move this 2 below loops to separate function |
elessair | 0:f269e3021894 | 459 | INC_DIRS = [] |
elessair | 0:f269e3021894 | 460 | for lib_id in libraries: |
elessair | 0:f269e3021894 | 461 | if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']: |
elessair | 0:f269e3021894 | 462 | INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext']) |
elessair | 0:f269e3021894 | 463 | |
elessair | 0:f269e3021894 | 464 | MACROS = [] |
elessair | 0:f269e3021894 | 465 | for lib_id in libraries: |
elessair | 0:f269e3021894 | 466 | if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']: |
elessair | 0:f269e3021894 | 467 | MACROS.extend(LIBRARY_MAP[lib_id]['macros']) |
elessair | 0:f269e3021894 | 468 | MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target) |
elessair | 0:f269e3021894 | 469 | MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id) |
elessair | 0:f269e3021894 | 470 | test_uuid = uuid.uuid4() |
elessair | 0:f269e3021894 | 471 | MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid)) |
elessair | 0:f269e3021894 | 472 | |
elessair | 0:f269e3021894 | 473 | # Prepare extended test results data structure (it can be used to generate detailed test report) |
elessair | 0:f269e3021894 | 474 | if target not in self.test_summary_ext: |
elessair | 0:f269e3021894 | 475 | self.test_summary_ext[target] = {} # test_summary_ext : toolchain |
elessair | 0:f269e3021894 | 476 | if toolchain not in self.test_summary_ext[target]: |
elessair | 0:f269e3021894 | 477 | self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target |
elessair | 0:f269e3021894 | 478 | |
elessair | 0:f269e3021894 | 479 | tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only |
elessair | 0:f269e3021894 | 480 | |
elessair | 0:f269e3021894 | 481 | project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None |
elessair | 0:f269e3021894 | 482 | try: |
elessair | 0:f269e3021894 | 483 | path = build_project(test.source_dir, |
elessair | 0:f269e3021894 | 484 | join(build_dir, test_id), |
elessair | 0:f269e3021894 | 485 | T, |
elessair | 0:f269e3021894 | 486 | toolchain, |
elessair | 0:f269e3021894 | 487 | test.dependencies, |
elessair | 0:f269e3021894 | 488 | clean=clean_project_options, |
elessair | 0:f269e3021894 | 489 | verbose=self.opts_verbose, |
elessair | 0:f269e3021894 | 490 | name=project_name, |
elessair | 0:f269e3021894 | 491 | macros=MACROS, |
elessair | 0:f269e3021894 | 492 | inc_dirs=INC_DIRS, |
elessair | 0:f269e3021894 | 493 | jobs=self.opts_jobs, |
elessair | 0:f269e3021894 | 494 | report=build_report, |
elessair | 0:f269e3021894 | 495 | properties=build_properties, |
elessair | 0:f269e3021894 | 496 | project_id=test_id, |
elessair | 0:f269e3021894 | 497 | project_description=test.get_description(), |
elessair | 0:f269e3021894 | 498 | build_profile=profile) |
elessair | 0:f269e3021894 | 499 | |
elessair | 0:f269e3021894 | 500 | except Exception, e: |
elessair | 0:f269e3021894 | 501 | project_name_str = project_name if project_name is not None else test_id |
elessair | 0:f269e3021894 | 502 | |
elessair | 0:f269e3021894 | 503 | |
elessair | 0:f269e3021894 | 504 | test_result = self.TEST_RESULT_FAIL |
elessair | 0:f269e3021894 | 505 | |
elessair | 0:f269e3021894 | 506 | if isinstance(e, ToolException): |
elessair | 0:f269e3021894 | 507 | print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str)) |
elessair | 0:f269e3021894 | 508 | test_result = self.TEST_RESULT_BUILD_FAILED |
elessair | 0:f269e3021894 | 509 | elif isinstance(e, NotSupportedException): |
elessair | 0:f269e3021894 | 510 | print self.logger.log_line(self.logger.LogType.INFO, 'The project %s is not supported'% (project_name_str)) |
elessair | 0:f269e3021894 | 511 | test_result = self.TEST_RESULT_NOT_SUPPORTED |
elessair | 0:f269e3021894 | 512 | |
elessair | 0:f269e3021894 | 513 | |
elessair | 0:f269e3021894 | 514 | # Append test results to global test summary |
elessair | 0:f269e3021894 | 515 | self.test_summary.append( |
elessair | 0:f269e3021894 | 516 | (test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-') |
elessair | 0:f269e3021894 | 517 | ) |
elessair | 0:f269e3021894 | 518 | |
elessair | 0:f269e3021894 | 519 | # Add detailed test result to test summary structure |
elessair | 0:f269e3021894 | 520 | if test_id not in self.test_summary_ext[target][toolchain]: |
elessair | 0:f269e3021894 | 521 | self.test_summary_ext[target][toolchain][test_id] = [] |
elessair | 0:f269e3021894 | 522 | |
elessair | 0:f269e3021894 | 523 | self.test_summary_ext[target][toolchain][test_id].append({ 0: { |
elessair | 0:f269e3021894 | 524 | 'result' : test_result, |
elessair | 0:f269e3021894 | 525 | 'output' : '', |
elessair | 0:f269e3021894 | 526 | 'target_name' : target, |
elessair | 0:f269e3021894 | 527 | 'target_name_unique': target, |
elessair | 0:f269e3021894 | 528 | 'toolchain_name' : toolchain, |
elessair | 0:f269e3021894 | 529 | 'id' : test_id, |
elessair | 0:f269e3021894 | 530 | 'description' : test.get_description(), |
elessair | 0:f269e3021894 | 531 | 'elapsed_time' : 0, |
elessair | 0:f269e3021894 | 532 | 'duration' : 0, |
elessair | 0:f269e3021894 | 533 | 'copy_method' : None |
elessair | 0:f269e3021894 | 534 | }}) |
elessair | 0:f269e3021894 | 535 | continue |
elessair | 0:f269e3021894 | 536 | |
elessair | 0:f269e3021894 | 537 | if self.opts_only_build_tests: |
elessair | 0:f269e3021894 | 538 | # With this option we are skipping testing phase |
elessair | 0:f269e3021894 | 539 | continue |
elessair | 0:f269e3021894 | 540 | |
elessair | 0:f269e3021894 | 541 | # Test duration can be increased by global value |
elessair | 0:f269e3021894 | 542 | test_duration = test.duration |
elessair | 0:f269e3021894 | 543 | if self.opts_extend_test_timeout is not None: |
elessair | 0:f269e3021894 | 544 | test_duration += self.opts_extend_test_timeout |
elessair | 0:f269e3021894 | 545 | |
elessair | 0:f269e3021894 | 546 | # For an automated test the duration act as a timeout after |
elessair | 0:f269e3021894 | 547 | # which the test gets interrupted |
elessair | 0:f269e3021894 | 548 | test_spec = self.shape_test_request(target, path, test_id, test_duration) |
elessair | 0:f269e3021894 | 549 | test_loops = self.get_test_loop_count(test_id) |
elessair | 0:f269e3021894 | 550 | |
elessair | 0:f269e3021894 | 551 | test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration |
elessair | 0:f269e3021894 | 552 | test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops |
elessair | 0:f269e3021894 | 553 | test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path |
elessair | 0:f269e3021894 | 554 | |
elessair | 0:f269e3021894 | 555 | # read MUTs, test specification and perform tests |
elessair | 0:f269e3021894 | 556 | handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops) |
elessair | 0:f269e3021894 | 557 | |
elessair | 0:f269e3021894 | 558 | if handle_results is None: |
elessair | 0:f269e3021894 | 559 | continue |
elessair | 0:f269e3021894 | 560 | |
elessair | 0:f269e3021894 | 561 | for handle_result in handle_results: |
elessair | 0:f269e3021894 | 562 | if handle_result: |
elessair | 0:f269e3021894 | 563 | single_test_result, detailed_test_results = handle_result |
elessair | 0:f269e3021894 | 564 | else: |
elessair | 0:f269e3021894 | 565 | continue |
elessair | 0:f269e3021894 | 566 | |
elessair | 0:f269e3021894 | 567 | # Append test results to global test summary |
elessair | 0:f269e3021894 | 568 | if single_test_result is not None: |
elessair | 0:f269e3021894 | 569 | self.test_summary.append(single_test_result) |
elessair | 0:f269e3021894 | 570 | |
elessair | 0:f269e3021894 | 571 | # Add detailed test result to test summary structure |
elessair | 0:f269e3021894 | 572 | if target not in self.test_summary_ext[target][toolchain]: |
elessair | 0:f269e3021894 | 573 | if test_id not in self.test_summary_ext[target][toolchain]: |
elessair | 0:f269e3021894 | 574 | self.test_summary_ext[target][toolchain][test_id] = [] |
elessair | 0:f269e3021894 | 575 | |
elessair | 0:f269e3021894 | 576 | append_test_result = detailed_test_results |
elessair | 0:f269e3021894 | 577 | |
elessair | 0:f269e3021894 | 578 | # If waterfall and consolidate-waterfall options are enabled, |
elessair | 0:f269e3021894 | 579 | # only include the last test result in the report. |
elessair | 0:f269e3021894 | 580 | if self.opts_waterfall_test and self.opts_consolidate_waterfall_test: |
elessair | 0:f269e3021894 | 581 | append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]} |
elessair | 0:f269e3021894 | 582 | |
elessair | 0:f269e3021894 | 583 | self.test_summary_ext[target][toolchain][test_id].append(append_test_result) |
elessair | 0:f269e3021894 | 584 | |
elessair | 0:f269e3021894 | 585 | test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped']) |
elessair | 0:f269e3021894 | 586 | self.test_suite_properties_ext[target][toolchain] = test_suite_properties |
elessair | 0:f269e3021894 | 587 | |
elessair | 0:f269e3021894 | 588 | q.put(target + '_'.join(toolchains)) |
elessair | 0:f269e3021894 | 589 | return |
elessair | 0:f269e3021894 | 590 | |
elessair | 0:f269e3021894 | 591 | def execute(self): |
elessair | 0:f269e3021894 | 592 | clean = self.test_spec.get('clean', False) |
elessair | 0:f269e3021894 | 593 | test_ids = self.test_spec.get('test_ids', []) |
elessair | 0:f269e3021894 | 594 | q = Queue() |
elessair | 0:f269e3021894 | 595 | |
elessair | 0:f269e3021894 | 596 | # Generate seed for shuffle if seed is not provided in |
elessair | 0:f269e3021894 | 597 | self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND) |
elessair | 0:f269e3021894 | 598 | if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float(): |
elessair | 0:f269e3021894 | 599 | self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND) |
elessair | 0:f269e3021894 | 600 | |
elessair | 0:f269e3021894 | 601 | |
elessair | 0:f269e3021894 | 602 | if self.opts_parallel_test_exec: |
elessair | 0:f269e3021894 | 603 | ################################################################### |
elessair | 0:f269e3021894 | 604 | # Experimental, parallel test execution per singletest instance. |
elessair | 0:f269e3021894 | 605 | ################################################################### |
elessair | 0:f269e3021894 | 606 | execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests |
elessair | 0:f269e3021894 | 607 | # Note: We are building here in parallel for each target separately! |
elessair | 0:f269e3021894 | 608 | # So we are not building the same thing multiple times and compilers |
elessair | 0:f269e3021894 | 609 | # in separate threads do not collide. |
elessair | 0:f269e3021894 | 610 | # Inside execute_thread_slice() function function handle() will be called to |
elessair | 0:f269e3021894 | 611 | # get information about available MUTs (per target). |
elessair | 0:f269e3021894 | 612 | for target, toolchains in self.test_spec['targets'].iteritems(): |
elessair | 0:f269e3021894 | 613 | self.test_suite_properties_ext[target] = {} |
elessair | 0:f269e3021894 | 614 | t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)) |
elessair | 0:f269e3021894 | 615 | t.daemon = True |
elessair | 0:f269e3021894 | 616 | t.start() |
elessair | 0:f269e3021894 | 617 | execute_threads.append(t) |
elessair | 0:f269e3021894 | 618 | |
elessair | 0:f269e3021894 | 619 | for t in execute_threads: |
elessair | 0:f269e3021894 | 620 | q.get() # t.join() would block some threads because we should not wait in any order for thread end |
elessair | 0:f269e3021894 | 621 | else: |
elessair | 0:f269e3021894 | 622 | # Serialized (not parallel) test execution |
elessair | 0:f269e3021894 | 623 | for target, toolchains in self.test_spec['targets'].iteritems(): |
elessair | 0:f269e3021894 | 624 | if target not in self.test_suite_properties_ext: |
elessair | 0:f269e3021894 | 625 | self.test_suite_properties_ext[target] = {} |
elessair | 0:f269e3021894 | 626 | |
elessair | 0:f269e3021894 | 627 | self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties) |
elessair | 0:f269e3021894 | 628 | q.get() |
elessair | 0:f269e3021894 | 629 | |
elessair | 0:f269e3021894 | 630 | if self.db_logger: |
elessair | 0:f269e3021894 | 631 | self.db_logger.reconnect(); |
elessair | 0:f269e3021894 | 632 | if self.db_logger.is_connected(): |
elessair | 0:f269e3021894 | 633 | self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED) |
elessair | 0:f269e3021894 | 634 | self.db_logger.disconnect(); |
elessair | 0:f269e3021894 | 635 | |
elessair | 0:f269e3021894 | 636 | return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties |
elessair | 0:f269e3021894 | 637 | |
elessair | 0:f269e3021894 | 638 | def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated): |
elessair | 0:f269e3021894 | 639 | valid_test_map_keys = [] |
elessair | 0:f269e3021894 | 640 | |
elessair | 0:f269e3021894 | 641 | for test_id in test_map_keys: |
elessair | 0:f269e3021894 | 642 | test = TEST_MAP[test_id] |
elessair | 0:f269e3021894 | 643 | if self.opts_test_by_names and test_id not in self.opts_test_by_names: |
elessair | 0:f269e3021894 | 644 | continue |
elessair | 0:f269e3021894 | 645 | |
elessair | 0:f269e3021894 | 646 | if test_ids and test_id not in test_ids: |
elessair | 0:f269e3021894 | 647 | continue |
elessair | 0:f269e3021894 | 648 | |
elessair | 0:f269e3021894 | 649 | if self.opts_test_only_peripheral and not test.peripherals: |
elessair | 0:f269e3021894 | 650 | if self.opts_verbose_skipped_tests: |
elessair | 0:f269e3021894 | 651 | print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target)) |
elessair | 0:f269e3021894 | 652 | continue |
elessair | 0:f269e3021894 | 653 | |
elessair | 0:f269e3021894 | 654 | if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names]): |
elessair | 0:f269e3021894 | 655 | # We will skip tests not forced with -p option |
elessair | 0:f269e3021894 | 656 | if self.opts_verbose_skipped_tests: |
elessair | 0:f269e3021894 | 657 | print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target)) |
elessair | 0:f269e3021894 | 658 | continue |
elessair | 0:f269e3021894 | 659 | |
elessair | 0:f269e3021894 | 660 | if self.opts_test_only_common and test.peripherals: |
elessair | 0:f269e3021894 | 661 | if self.opts_verbose_skipped_tests: |
elessair | 0:f269e3021894 | 662 | print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target)) |
elessair | 0:f269e3021894 | 663 | continue |
elessair | 0:f269e3021894 | 664 | |
elessair | 0:f269e3021894 | 665 | if not include_non_automated and not test.automated: |
elessair | 0:f269e3021894 | 666 | if self.opts_verbose_skipped_tests: |
elessair | 0:f269e3021894 | 667 | print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target)) |
elessair | 0:f269e3021894 | 668 | continue |
elessair | 0:f269e3021894 | 669 | |
elessair | 0:f269e3021894 | 670 | if test.is_supported(target, toolchain): |
elessair | 0:f269e3021894 | 671 | if test.peripherals is None and self.opts_only_build_tests: |
elessair | 0:f269e3021894 | 672 | # When users are using 'build only flag' and test do not have |
elessair | 0:f269e3021894 | 673 | # specified peripherals we can allow test building by default |
elessair | 0:f269e3021894 | 674 | pass |
elessair | 0:f269e3021894 | 675 | elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names: |
elessair | 0:f269e3021894 | 676 | # If we force peripheral with option -p we expect test |
elessair | 0:f269e3021894 | 677 | # to pass even if peripheral is not in MUTs file. |
elessair | 0:f269e3021894 | 678 | pass |
elessair | 0:f269e3021894 | 679 | elif not self.is_peripherals_available(target, test.peripherals): |
elessair | 0:f269e3021894 | 680 | if self.opts_verbose_skipped_tests: |
elessair | 0:f269e3021894 | 681 | if test.peripherals: |
elessair | 0:f269e3021894 | 682 | print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target)) |
elessair | 0:f269e3021894 | 683 | else: |
elessair | 0:f269e3021894 | 684 | print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target)) |
elessair | 0:f269e3021894 | 685 | continue |
elessair | 0:f269e3021894 | 686 | |
elessair | 0:f269e3021894 | 687 | # The test has made it through all the filters, so add it to the valid tests list |
elessair | 0:f269e3021894 | 688 | valid_test_map_keys.append(test_id) |
elessair | 0:f269e3021894 | 689 | |
elessair | 0:f269e3021894 | 690 | return valid_test_map_keys |
elessair | 0:f269e3021894 | 691 | |
elessair | 0:f269e3021894 | 692 | def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys): |
elessair | 0:f269e3021894 | 693 | # NOTE: This will not preserve order |
elessair | 0:f269e3021894 | 694 | return list(set(all_test_map_keys) - set(valid_test_map_keys)) |
elessair | 0:f269e3021894 | 695 | |
elessair | 0:f269e3021894 | 696 | def generate_test_summary_by_target(self, test_summary, shuffle_seed=None): |
elessair | 0:f269e3021894 | 697 | """ Prints well-formed summary with results (SQL table like) |
elessair | 0:f269e3021894 | 698 | table shows text x toolchain test result matrix |
elessair | 0:f269e3021894 | 699 | """ |
elessair | 0:f269e3021894 | 700 | RESULT_INDEX = 0 |
elessair | 0:f269e3021894 | 701 | TARGET_INDEX = 1 |
elessair | 0:f269e3021894 | 702 | TOOLCHAIN_INDEX = 2 |
elessair | 0:f269e3021894 | 703 | TEST_INDEX = 3 |
elessair | 0:f269e3021894 | 704 | DESC_INDEX = 4 |
elessair | 0:f269e3021894 | 705 | |
elessair | 0:f269e3021894 | 706 | unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX) |
elessair | 0:f269e3021894 | 707 | unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX) |
elessair | 0:f269e3021894 | 708 | unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX) |
elessair | 0:f269e3021894 | 709 | unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX) |
elessair | 0:f269e3021894 | 710 | |
elessair | 0:f269e3021894 | 711 | result = "Test summary:\n" |
elessair | 0:f269e3021894 | 712 | for target in unique_targets: |
elessair | 0:f269e3021894 | 713 | result_dict = {} # test : { toolchain : result } |
elessair | 0:f269e3021894 | 714 | unique_target_toolchains = [] |
elessair | 0:f269e3021894 | 715 | for test in test_summary: |
elessair | 0:f269e3021894 | 716 | if test[TARGET_INDEX] == target: |
elessair | 0:f269e3021894 | 717 | if test[TOOLCHAIN_INDEX] not in unique_target_toolchains: |
elessair | 0:f269e3021894 | 718 | unique_target_toolchains.append(test[TOOLCHAIN_INDEX]) |
elessair | 0:f269e3021894 | 719 | if test[TEST_INDEX] not in result_dict: |
elessair | 0:f269e3021894 | 720 | result_dict[test[TEST_INDEX]] = {} |
elessair | 0:f269e3021894 | 721 | result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX] |
elessair | 0:f269e3021894 | 722 | |
elessair | 0:f269e3021894 | 723 | pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains |
elessair | 0:f269e3021894 | 724 | pt = PrettyTable(pt_cols) |
elessair | 0:f269e3021894 | 725 | for col in pt_cols: |
elessair | 0:f269e3021894 | 726 | pt.align[col] = "l" |
elessair | 0:f269e3021894 | 727 | pt.padding_width = 1 # One space between column edges and contents (default) |
elessair | 0:f269e3021894 | 728 | |
elessair | 0:f269e3021894 | 729 | for test in unique_tests: |
elessair | 0:f269e3021894 | 730 | if test in result_dict: |
elessair | 0:f269e3021894 | 731 | test_results = result_dict[test] |
elessair | 0:f269e3021894 | 732 | if test in unique_test_desc: |
elessair | 0:f269e3021894 | 733 | row = [target, test, unique_test_desc[test]] |
elessair | 0:f269e3021894 | 734 | for toolchain in unique_toolchains: |
elessair | 0:f269e3021894 | 735 | if toolchain in test_results: |
elessair | 0:f269e3021894 | 736 | row.append(test_results[toolchain]) |
elessair | 0:f269e3021894 | 737 | pt.add_row(row) |
elessair | 0:f269e3021894 | 738 | result += pt.get_string() |
elessair | 0:f269e3021894 | 739 | shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND, |
elessair | 0:f269e3021894 | 740 | shuffle_seed if shuffle_seed else self.shuffle_random_seed) |
elessair | 0:f269e3021894 | 741 | result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '') |
elessair | 0:f269e3021894 | 742 | return result |
elessair | 0:f269e3021894 | 743 | |
elessair | 0:f269e3021894 | 744 | def generate_test_summary(self, test_summary, shuffle_seed=None): |
elessair | 0:f269e3021894 | 745 | """ Prints well-formed summary with results (SQL table like) |
elessair | 0:f269e3021894 | 746 | table shows target x test results matrix across |
elessair | 0:f269e3021894 | 747 | """ |
elessair | 0:f269e3021894 | 748 | success_code = 0 # Success code that can be leter returned to |
elessair | 0:f269e3021894 | 749 | result = "Test summary:\n" |
elessair | 0:f269e3021894 | 750 | # Pretty table package is used to print results |
elessair | 0:f269e3021894 | 751 | pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", |
elessair | 0:f269e3021894 | 752 | "Elapsed Time (sec)", "Timeout (sec)", "Loops"]) |
elessair | 0:f269e3021894 | 753 | pt.align["Result"] = "l" # Left align |
elessair | 0:f269e3021894 | 754 | pt.align["Target"] = "l" # Left align |
elessair | 0:f269e3021894 | 755 | pt.align["Toolchain"] = "l" # Left align |
elessair | 0:f269e3021894 | 756 | pt.align["Test ID"] = "l" # Left align |
elessair | 0:f269e3021894 | 757 | pt.align["Test Description"] = "l" # Left align |
elessair | 0:f269e3021894 | 758 | pt.padding_width = 1 # One space between column edges and contents (default) |
elessair | 0:f269e3021894 | 759 | |
elessair | 0:f269e3021894 | 760 | result_dict = {self.TEST_RESULT_OK : 0, |
elessair | 0:f269e3021894 | 761 | self.TEST_RESULT_FAIL : 0, |
elessair | 0:f269e3021894 | 762 | self.TEST_RESULT_ERROR : 0, |
elessair | 0:f269e3021894 | 763 | self.TEST_RESULT_UNDEF : 0, |
elessair | 0:f269e3021894 | 764 | self.TEST_RESULT_IOERR_COPY : 0, |
elessair | 0:f269e3021894 | 765 | self.TEST_RESULT_IOERR_DISK : 0, |
elessair | 0:f269e3021894 | 766 | self.TEST_RESULT_IOERR_SERIAL : 0, |
elessair | 0:f269e3021894 | 767 | self.TEST_RESULT_NO_IMAGE : 0, |
elessair | 0:f269e3021894 | 768 | self.TEST_RESULT_TIMEOUT : 0, |
elessair | 0:f269e3021894 | 769 | self.TEST_RESULT_MBED_ASSERT : 0, |
elessair | 0:f269e3021894 | 770 | self.TEST_RESULT_BUILD_FAILED : 0, |
elessair | 0:f269e3021894 | 771 | self.TEST_RESULT_NOT_SUPPORTED : 0 |
elessair | 0:f269e3021894 | 772 | } |
elessair | 0:f269e3021894 | 773 | |
elessair | 0:f269e3021894 | 774 | for test in test_summary: |
elessair | 0:f269e3021894 | 775 | if test[0] in result_dict: |
elessair | 0:f269e3021894 | 776 | result_dict[test[0]] += 1 |
elessair | 0:f269e3021894 | 777 | pt.add_row(test) |
elessair | 0:f269e3021894 | 778 | result += pt.get_string() |
elessair | 0:f269e3021894 | 779 | result += "\n" |
elessair | 0:f269e3021894 | 780 | |
elessair | 0:f269e3021894 | 781 | # Print result count |
elessair | 0:f269e3021894 | 782 | result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()]) |
elessair | 0:f269e3021894 | 783 | shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND, |
elessair | 0:f269e3021894 | 784 | shuffle_seed if shuffle_seed else self.shuffle_random_seed) |
elessair | 0:f269e3021894 | 785 | result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '') |
elessair | 0:f269e3021894 | 786 | return result |
elessair | 0:f269e3021894 | 787 | |
elessair | 0:f269e3021894 | 788 | def test_loop_list_to_dict(self, test_loops_str): |
elessair | 0:f269e3021894 | 789 | """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count} |
elessair | 0:f269e3021894 | 790 | """ |
elessair | 0:f269e3021894 | 791 | result = {} |
elessair | 0:f269e3021894 | 792 | if test_loops_str: |
elessair | 0:f269e3021894 | 793 | test_loops = test_loops_str |
elessair | 0:f269e3021894 | 794 | for test_loop in test_loops: |
elessair | 0:f269e3021894 | 795 | test_loop_count = test_loop.split('=') |
elessair | 0:f269e3021894 | 796 | if len(test_loop_count) == 2: |
elessair | 0:f269e3021894 | 797 | _test_id, _test_loops = test_loop_count |
elessair | 0:f269e3021894 | 798 | try: |
elessair | 0:f269e3021894 | 799 | _test_loops = int(_test_loops) |
elessair | 0:f269e3021894 | 800 | except: |
elessair | 0:f269e3021894 | 801 | continue |
elessair | 0:f269e3021894 | 802 | result[_test_id] = _test_loops |
elessair | 0:f269e3021894 | 803 | return result |
elessair | 0:f269e3021894 | 804 | |
elessair | 0:f269e3021894 | 805 | def get_test_loop_count(self, test_id): |
elessair | 0:f269e3021894 | 806 | """ This function returns no. of loops per test (deducted by test_id_. |
elessair | 0:f269e3021894 | 807 | If test is not in list of redefined loop counts it will use default value. |
elessair | 0:f269e3021894 | 808 | """ |
elessair | 0:f269e3021894 | 809 | result = self.GLOBAL_LOOPS_COUNT |
elessair | 0:f269e3021894 | 810 | if test_id in self.TEST_LOOPS_DICT: |
elessair | 0:f269e3021894 | 811 | result = self.TEST_LOOPS_DICT[test_id] |
elessair | 0:f269e3021894 | 812 | return result |
elessair | 0:f269e3021894 | 813 | |
elessair | 0:f269e3021894 | 814 | def delete_file(self, file_path): |
elessair | 0:f269e3021894 | 815 | """ Remove file from the system |
elessair | 0:f269e3021894 | 816 | """ |
elessair | 0:f269e3021894 | 817 | result = True |
elessair | 0:f269e3021894 | 818 | resutl_msg = "" |
elessair | 0:f269e3021894 | 819 | try: |
elessair | 0:f269e3021894 | 820 | os.remove(file_path) |
elessair | 0:f269e3021894 | 821 | except Exception, e: |
elessair | 0:f269e3021894 | 822 | resutl_msg = e |
elessair | 0:f269e3021894 | 823 | result = False |
elessair | 0:f269e3021894 | 824 | return result, resutl_msg |
elessair | 0:f269e3021894 | 825 | |
elessair | 0:f269e3021894 | 826 | def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1): |
elessair | 0:f269e3021894 | 827 | """ Test is being invoked for given MUT. |
elessair | 0:f269e3021894 | 828 | """ |
elessair | 0:f269e3021894 | 829 | # Get test information, image and test timeout |
elessair | 0:f269e3021894 | 830 | test_id = data['test_id'] |
elessair | 0:f269e3021894 | 831 | test = TEST_MAP[test_id] |
elessair | 0:f269e3021894 | 832 | test_description = TEST_MAP[test_id].get_description() |
elessair | 0:f269e3021894 | 833 | image = data["image"] |
elessair | 0:f269e3021894 | 834 | duration = data.get("duration", 10) |
elessair | 0:f269e3021894 | 835 | |
elessair | 0:f269e3021894 | 836 | if mut is None: |
elessair | 0:f269e3021894 | 837 | print "Error: No Mbed available: MUT[%s]" % data['mcu'] |
elessair | 0:f269e3021894 | 838 | return None |
elessair | 0:f269e3021894 | 839 | |
elessair | 0:f269e3021894 | 840 | mcu = mut['mcu'] |
elessair | 0:f269e3021894 | 841 | copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc. |
elessair | 0:f269e3021894 | 842 | |
elessair | 0:f269e3021894 | 843 | if self.db_logger: |
elessair | 0:f269e3021894 | 844 | self.db_logger.reconnect() |
elessair | 0:f269e3021894 | 845 | |
elessair | 0:f269e3021894 | 846 | selected_copy_method = self.opts_copy_method if copy_method is None else copy_method |
elessair | 0:f269e3021894 | 847 | |
elessair | 0:f269e3021894 | 848 | # Tests can be looped so test results must be stored for the same test |
elessair | 0:f269e3021894 | 849 | test_all_result = [] |
elessair | 0:f269e3021894 | 850 | # Test results for one test ran few times |
elessair | 0:f269e3021894 | 851 | detailed_test_results = {} # { Loop_number: { results ... } } |
elessair | 0:f269e3021894 | 852 | |
elessair | 0:f269e3021894 | 853 | for test_index in range(test_loops): |
elessair | 0:f269e3021894 | 854 | |
elessair | 0:f269e3021894 | 855 | # If mbedls is available and we are auto detecting MUT info, |
elessair | 0:f269e3021894 | 856 | # update MUT info (mounting may changed) |
elessair | 0:f269e3021894 | 857 | if get_module_avail('mbed_lstools') and self.opts_auto_detect: |
elessair | 0:f269e3021894 | 858 | platform_name_filter = [mcu] |
elessair | 0:f269e3021894 | 859 | muts_list = {} |
elessair | 0:f269e3021894 | 860 | found = False |
elessair | 0:f269e3021894 | 861 | |
elessair | 0:f269e3021894 | 862 | for i in range(0, 60): |
elessair | 0:f269e3021894 | 863 | print('Looking for %s with MBEDLS' % mcu) |
elessair | 0:f269e3021894 | 864 | muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter) |
elessair | 0:f269e3021894 | 865 | |
elessair | 0:f269e3021894 | 866 | if 1 not in muts_list: |
elessair | 0:f269e3021894 | 867 | sleep(3) |
elessair | 0:f269e3021894 | 868 | else: |
elessair | 0:f269e3021894 | 869 | found = True |
elessair | 0:f269e3021894 | 870 | break |
elessair | 0:f269e3021894 | 871 | |
elessair | 0:f269e3021894 | 872 | if not found: |
elessair | 0:f269e3021894 | 873 | print "Error: mbed not found with MBEDLS: %s" % data['mcu'] |
elessair | 0:f269e3021894 | 874 | return None |
elessair | 0:f269e3021894 | 875 | else: |
elessair | 0:f269e3021894 | 876 | mut = muts_list[1] |
elessair | 0:f269e3021894 | 877 | |
elessair | 0:f269e3021894 | 878 | disk = mut.get('disk') |
elessair | 0:f269e3021894 | 879 | port = mut.get('port') |
elessair | 0:f269e3021894 | 880 | |
elessair | 0:f269e3021894 | 881 | if disk is None or port is None: |
elessair | 0:f269e3021894 | 882 | return None |
elessair | 0:f269e3021894 | 883 | |
elessair | 0:f269e3021894 | 884 | target_by_mcu = TARGET_MAP[mut['mcu']] |
elessair | 0:f269e3021894 | 885 | target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu'] |
elessair | 0:f269e3021894 | 886 | # Some extra stuff can be declared in MUTs structure |
elessair | 0:f269e3021894 | 887 | reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt |
elessair | 0:f269e3021894 | 888 | reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT) |
elessair | 0:f269e3021894 | 889 | |
elessair | 0:f269e3021894 | 890 | # When the build and test system were separate, this was relative to a |
elessair | 0:f269e3021894 | 891 | # base network folder base path: join(NETWORK_BASE_PATH, ) |
elessair | 0:f269e3021894 | 892 | image_path = image |
elessair | 0:f269e3021894 | 893 | |
elessair | 0:f269e3021894 | 894 | # Host test execution |
elessair | 0:f269e3021894 | 895 | start_host_exec_time = time() |
elessair | 0:f269e3021894 | 896 | |
elessair | 0:f269e3021894 | 897 | single_test_result = self.TEST_RESULT_UNDEF # single test run result |
elessair | 0:f269e3021894 | 898 | _copy_method = selected_copy_method |
elessair | 0:f269e3021894 | 899 | |
elessair | 0:f269e3021894 | 900 | if not exists(image_path): |
elessair | 0:f269e3021894 | 901 | single_test_result = self.TEST_RESULT_NO_IMAGE |
elessair | 0:f269e3021894 | 902 | elapsed_time = 0 |
elessair | 0:f269e3021894 | 903 | single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path) |
elessair | 0:f269e3021894 | 904 | print single_test_output |
elessair | 0:f269e3021894 | 905 | else: |
elessair | 0:f269e3021894 | 906 | # Host test execution |
elessair | 0:f269e3021894 | 907 | start_host_exec_time = time() |
elessair | 0:f269e3021894 | 908 | |
elessair | 0:f269e3021894 | 909 | host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose |
elessair | 0:f269e3021894 | 910 | host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type |
elessair | 0:f269e3021894 | 911 | host_test_result = self.run_host_test(test.host_test, |
elessair | 0:f269e3021894 | 912 | image_path, disk, port, duration, |
elessair | 0:f269e3021894 | 913 | micro=target_name, |
elessair | 0:f269e3021894 | 914 | verbose=host_test_verbose, |
elessair | 0:f269e3021894 | 915 | reset=host_test_reset, |
elessair | 0:f269e3021894 | 916 | reset_tout=reset_tout, |
elessair | 0:f269e3021894 | 917 | copy_method=selected_copy_method, |
elessair | 0:f269e3021894 | 918 | program_cycle_s=target_by_mcu.program_cycle_s) |
elessair | 0:f269e3021894 | 919 | single_test_result, single_test_output, single_testduration, single_timeout = host_test_result |
elessair | 0:f269e3021894 | 920 | |
elessair | 0:f269e3021894 | 921 | # Store test result |
elessair | 0:f269e3021894 | 922 | test_all_result.append(single_test_result) |
elessair | 0:f269e3021894 | 923 | total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset |
elessair | 0:f269e3021894 | 924 | elapsed_time = single_testduration # TIme of single test case execution after reset |
elessair | 0:f269e3021894 | 925 | |
elessair | 0:f269e3021894 | 926 | detailed_test_results[test_index] = { |
elessair | 0:f269e3021894 | 927 | 'result' : single_test_result, |
elessair | 0:f269e3021894 | 928 | 'output' : single_test_output, |
elessair | 0:f269e3021894 | 929 | 'target_name' : target_name, |
elessair | 0:f269e3021894 | 930 | 'target_name_unique' : target_name_unique, |
elessair | 0:f269e3021894 | 931 | 'toolchain_name' : toolchain_name, |
elessair | 0:f269e3021894 | 932 | 'id' : test_id, |
elessair | 0:f269e3021894 | 933 | 'description' : test_description, |
elessair | 0:f269e3021894 | 934 | 'elapsed_time' : round(elapsed_time, 2), |
elessair | 0:f269e3021894 | 935 | 'duration' : single_timeout, |
elessair | 0:f269e3021894 | 936 | 'copy_method' : _copy_method, |
elessair | 0:f269e3021894 | 937 | } |
elessair | 0:f269e3021894 | 938 | |
elessair | 0:f269e3021894 | 939 | print self.print_test_result(single_test_result, target_name_unique, toolchain_name, |
elessair | 0:f269e3021894 | 940 | test_id, test_description, elapsed_time, single_timeout) |
elessair | 0:f269e3021894 | 941 | |
elessair | 0:f269e3021894 | 942 | # Update database entries for ongoing test |
elessair | 0:f269e3021894 | 943 | if self.db_logger and self.db_logger.is_connected(): |
elessair | 0:f269e3021894 | 944 | test_type = 'SingleTest' |
elessair | 0:f269e3021894 | 945 | self.db_logger.insert_test_entry(self.db_logger_build_id, |
elessair | 0:f269e3021894 | 946 | target_name, |
elessair | 0:f269e3021894 | 947 | toolchain_name, |
elessair | 0:f269e3021894 | 948 | test_type, |
elessair | 0:f269e3021894 | 949 | test_id, |
elessair | 0:f269e3021894 | 950 | single_test_result, |
elessair | 0:f269e3021894 | 951 | single_test_output, |
elessair | 0:f269e3021894 | 952 | elapsed_time, |
elessair | 0:f269e3021894 | 953 | single_timeout, |
elessair | 0:f269e3021894 | 954 | test_index) |
elessair | 0:f269e3021894 | 955 | |
elessair | 0:f269e3021894 | 956 | # If we perform waterfall test we test until we get OK and we stop testing |
elessair | 0:f269e3021894 | 957 | if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK: |
elessair | 0:f269e3021894 | 958 | break |
elessair | 0:f269e3021894 | 959 | |
elessair | 0:f269e3021894 | 960 | if self.db_logger: |
elessair | 0:f269e3021894 | 961 | self.db_logger.disconnect() |
elessair | 0:f269e3021894 | 962 | |
elessair | 0:f269e3021894 | 963 | return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test), |
elessair | 0:f269e3021894 | 964 | target_name_unique, |
elessair | 0:f269e3021894 | 965 | toolchain_name, |
elessair | 0:f269e3021894 | 966 | test_id, |
elessair | 0:f269e3021894 | 967 | test_description, |
elessair | 0:f269e3021894 | 968 | round(elapsed_time, 2), |
elessair | 0:f269e3021894 | 969 | single_timeout, |
elessair | 0:f269e3021894 | 970 | self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results |
elessair | 0:f269e3021894 | 971 | |
elessair | 0:f269e3021894 | 972 | def handle(self, test_spec, target_name, toolchain_name, test_loops=1): |
elessair | 0:f269e3021894 | 973 | """ Function determines MUT's mbed disk/port and copies binary to |
elessair | 0:f269e3021894 | 974 | target. |
elessair | 0:f269e3021894 | 975 | """ |
elessair | 0:f269e3021894 | 976 | handle_results = [] |
elessair | 0:f269e3021894 | 977 | data = json.loads(test_spec) |
elessair | 0:f269e3021894 | 978 | |
elessair | 0:f269e3021894 | 979 | # Find a suitable MUT: |
elessair | 0:f269e3021894 | 980 | mut = None |
elessair | 0:f269e3021894 | 981 | for id, m in self.muts.iteritems(): |
elessair | 0:f269e3021894 | 982 | if m['mcu'] == data['mcu']: |
elessair | 0:f269e3021894 | 983 | mut = m |
elessair | 0:f269e3021894 | 984 | handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops) |
elessair | 0:f269e3021894 | 985 | handle_results.append(handle_result) |
elessair | 0:f269e3021894 | 986 | |
elessair | 0:f269e3021894 | 987 | return handle_results |
elessair | 0:f269e3021894 | 988 | |
elessair | 0:f269e3021894 | 989 | def print_test_result(self, test_result, target_name, toolchain_name, |
elessair | 0:f269e3021894 | 990 | test_id, test_description, elapsed_time, duration): |
elessair | 0:f269e3021894 | 991 | """ Use specific convention to print test result and related data |
elessair | 0:f269e3021894 | 992 | """ |
elessair | 0:f269e3021894 | 993 | tokens = [] |
elessair | 0:f269e3021894 | 994 | tokens.append("TargetTest") |
elessair | 0:f269e3021894 | 995 | tokens.append(target_name) |
elessair | 0:f269e3021894 | 996 | tokens.append(toolchain_name) |
elessair | 0:f269e3021894 | 997 | tokens.append(test_id) |
elessair | 0:f269e3021894 | 998 | tokens.append(test_description) |
elessair | 0:f269e3021894 | 999 | separator = "::" |
elessair | 0:f269e3021894 | 1000 | time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration) |
elessair | 0:f269e3021894 | 1001 | result = separator.join(tokens) + " [" + test_result +"]" + time_info |
elessair | 0:f269e3021894 | 1002 | return Fore.MAGENTA + result + Fore.RESET |
elessair | 0:f269e3021894 | 1003 | |
elessair | 0:f269e3021894 | 1004 | def shape_test_loop_ok_result_count(self, test_all_result): |
elessair | 0:f269e3021894 | 1005 | """ Reformats list of results to simple string |
elessair | 0:f269e3021894 | 1006 | """ |
elessair | 0:f269e3021894 | 1007 | test_loop_count = len(test_all_result) |
elessair | 0:f269e3021894 | 1008 | test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK) |
elessair | 0:f269e3021894 | 1009 | return "%d/%d"% (test_loop_ok_result, test_loop_count) |
elessair | 0:f269e3021894 | 1010 | |
elessair | 0:f269e3021894 | 1011 | def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate): |
elessair | 0:f269e3021894 | 1012 | """ Reformats list of results to simple string |
elessair | 0:f269e3021894 | 1013 | """ |
elessair | 0:f269e3021894 | 1014 | result = self.TEST_RESULT_FAIL |
elessair | 0:f269e3021894 | 1015 | |
elessair | 0:f269e3021894 | 1016 | if all(test_all_result[0] == res for res in test_all_result): |
elessair | 0:f269e3021894 | 1017 | result = test_all_result[0] |
elessair | 0:f269e3021894 | 1018 | elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result): |
elessair | 0:f269e3021894 | 1019 | result = self.TEST_RESULT_OK |
elessair | 0:f269e3021894 | 1020 | |
elessair | 0:f269e3021894 | 1021 | return result |
elessair | 0:f269e3021894 | 1022 | |
elessair | 0:f269e3021894 | 1023 | def run_host_test(self, name, image_path, disk, port, duration, |
elessair | 0:f269e3021894 | 1024 | micro=None, reset=None, reset_tout=None, |
elessair | 0:f269e3021894 | 1025 | verbose=False, copy_method=None, program_cycle_s=None): |
elessair | 0:f269e3021894 | 1026 | """ Function creates new process with host test configured with particular test case. |
elessair | 0:f269e3021894 | 1027 | Function also is pooling for serial port activity from process to catch all data |
elessair | 0:f269e3021894 | 1028 | printed by test runner and host test during test execution |
elessair | 0:f269e3021894 | 1029 | """ |
elessair | 0:f269e3021894 | 1030 | |
elessair | 0:f269e3021894 | 1031 | def get_char_from_queue(obs): |
elessair | 0:f269e3021894 | 1032 | """ Get character from queue safe way |
elessair | 0:f269e3021894 | 1033 | """ |
elessair | 0:f269e3021894 | 1034 | try: |
elessair | 0:f269e3021894 | 1035 | c = obs.queue.get(block=True, timeout=0.5) |
elessair | 0:f269e3021894 | 1036 | except Empty, _: |
elessair | 0:f269e3021894 | 1037 | c = None |
elessair | 0:f269e3021894 | 1038 | return c |
elessair | 0:f269e3021894 | 1039 | |
elessair | 0:f269e3021894 | 1040 | def filter_queue_char(c): |
elessair | 0:f269e3021894 | 1041 | """ Filters out non ASCII characters from serial port |
elessair | 0:f269e3021894 | 1042 | """ |
elessair | 0:f269e3021894 | 1043 | if ord(c) not in range(128): |
elessair | 0:f269e3021894 | 1044 | c = ' ' |
elessair | 0:f269e3021894 | 1045 | return c |
elessair | 0:f269e3021894 | 1046 | |
elessair | 0:f269e3021894 | 1047 | def get_test_result(output): |
elessair | 0:f269e3021894 | 1048 | """ Parse test 'output' data |
elessair | 0:f269e3021894 | 1049 | """ |
elessair | 0:f269e3021894 | 1050 | result = self.TEST_RESULT_TIMEOUT |
elessair | 0:f269e3021894 | 1051 | for line in "".join(output).splitlines(): |
elessair | 0:f269e3021894 | 1052 | search_result = self.RE_DETECT_TESTCASE_RESULT.search(line) |
elessair | 0:f269e3021894 | 1053 | if search_result and len(search_result.groups()): |
elessair | 0:f269e3021894 | 1054 | result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]] |
elessair | 0:f269e3021894 | 1055 | break |
elessair | 0:f269e3021894 | 1056 | return result |
elessair | 0:f269e3021894 | 1057 | |
elessair | 0:f269e3021894 | 1058 | def get_auto_property_value(property_name, line): |
elessair | 0:f269e3021894 | 1059 | """ Scans auto detection line from MUT and returns scanned parameter 'property_name' |
elessair | 0:f269e3021894 | 1060 | Returns string |
elessair | 0:f269e3021894 | 1061 | """ |
elessair | 0:f269e3021894 | 1062 | result = None |
elessair | 0:f269e3021894 | 1063 | if re.search("HOST: Property '%s'"% property_name, line) is not None: |
elessair | 0:f269e3021894 | 1064 | property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line) |
elessair | 0:f269e3021894 | 1065 | if property is not None and len(property.groups()) == 1: |
elessair | 0:f269e3021894 | 1066 | result = property.groups()[0] |
elessair | 0:f269e3021894 | 1067 | return result |
elessair | 0:f269e3021894 | 1068 | |
elessair | 0:f269e3021894 | 1069 | # print "{%s} port:%s disk:%s" % (name, port, disk), |
elessair | 0:f269e3021894 | 1070 | cmd = ["python", |
elessair | 0:f269e3021894 | 1071 | '%s.py'% name, |
elessair | 0:f269e3021894 | 1072 | '-d', disk, |
elessair | 0:f269e3021894 | 1073 | '-f', '"%s"'% image_path, |
elessair | 0:f269e3021894 | 1074 | '-p', port, |
elessair | 0:f269e3021894 | 1075 | '-t', str(duration), |
elessair | 0:f269e3021894 | 1076 | '-C', str(program_cycle_s)] |
elessair | 0:f269e3021894 | 1077 | |
elessair | 0:f269e3021894 | 1078 | if get_module_avail('mbed_lstools') and self.opts_auto_detect: |
elessair | 0:f269e3021894 | 1079 | cmd += ['--auto'] |
elessair | 0:f269e3021894 | 1080 | |
elessair | 0:f269e3021894 | 1081 | # Add extra parameters to host_test |
elessair | 0:f269e3021894 | 1082 | if copy_method is not None: |
elessair | 0:f269e3021894 | 1083 | cmd += ["-c", copy_method] |
elessair | 0:f269e3021894 | 1084 | if micro is not None: |
elessair | 0:f269e3021894 | 1085 | cmd += ["-m", micro] |
elessair | 0:f269e3021894 | 1086 | if reset is not None: |
elessair | 0:f269e3021894 | 1087 | cmd += ["-r", reset] |
elessair | 0:f269e3021894 | 1088 | if reset_tout is not None: |
elessair | 0:f269e3021894 | 1089 | cmd += ["-R", str(reset_tout)] |
elessair | 0:f269e3021894 | 1090 | |
elessair | 0:f269e3021894 | 1091 | if verbose: |
elessair | 0:f269e3021894 | 1092 | print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET |
elessair | 0:f269e3021894 | 1093 | print "Test::Output::Start" |
elessair | 0:f269e3021894 | 1094 | |
elessair | 0:f269e3021894 | 1095 | proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS) |
elessair | 0:f269e3021894 | 1096 | obs = ProcessObserver(proc) |
elessair | 0:f269e3021894 | 1097 | update_once_flag = {} # Stores flags checking if some auto-parameter was already set |
elessair | 0:f269e3021894 | 1098 | line = '' |
elessair | 0:f269e3021894 | 1099 | output = [] |
elessair | 0:f269e3021894 | 1100 | start_time = time() |
elessair | 0:f269e3021894 | 1101 | while (time() - start_time) < (2 * duration): |
elessair | 0:f269e3021894 | 1102 | c = get_char_from_queue(obs) |
elessair | 0:f269e3021894 | 1103 | if c: |
elessair | 0:f269e3021894 | 1104 | if verbose: |
elessair | 0:f269e3021894 | 1105 | sys.stdout.write(c) |
elessair | 0:f269e3021894 | 1106 | c = filter_queue_char(c) |
elessair | 0:f269e3021894 | 1107 | output.append(c) |
elessair | 0:f269e3021894 | 1108 | # Give the mbed under test a way to communicate the end of the test |
elessair | 0:f269e3021894 | 1109 | if c in ['\n', '\r']: |
elessair | 0:f269e3021894 | 1110 | |
elessair | 0:f269e3021894 | 1111 | # Checking for auto-detection information from the test about MUT reset moment |
elessair | 0:f269e3021894 | 1112 | if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line: |
elessair | 0:f269e3021894 | 1113 | # We will update this marker only once to prevent multiple time resets |
elessair | 0:f269e3021894 | 1114 | update_once_flag['reset_target'] = True |
elessair | 0:f269e3021894 | 1115 | start_time = time() |
elessair | 0:f269e3021894 | 1116 | |
elessair | 0:f269e3021894 | 1117 | # Checking for auto-detection information from the test about timeout |
elessair | 0:f269e3021894 | 1118 | auto_timeout_val = get_auto_property_value('timeout', line) |
elessair | 0:f269e3021894 | 1119 | if 'timeout' not in update_once_flag and auto_timeout_val is not None: |
elessair | 0:f269e3021894 | 1120 | # We will update this marker only once to prevent multiple time resets |
elessair | 0:f269e3021894 | 1121 | update_once_flag['timeout'] = True |
elessair | 0:f269e3021894 | 1122 | duration = int(auto_timeout_val) |
elessair | 0:f269e3021894 | 1123 | |
elessair | 0:f269e3021894 | 1124 | # Detect mbed assert: |
elessair | 0:f269e3021894 | 1125 | if 'mbed assertation failed: ' in line: |
elessair | 0:f269e3021894 | 1126 | output.append('{{mbed_assert}}') |
elessair | 0:f269e3021894 | 1127 | break |
elessair | 0:f269e3021894 | 1128 | |
elessair | 0:f269e3021894 | 1129 | # Check for test end |
elessair | 0:f269e3021894 | 1130 | if '{end}' in line: |
elessair | 0:f269e3021894 | 1131 | break |
elessair | 0:f269e3021894 | 1132 | line = '' |
elessair | 0:f269e3021894 | 1133 | else: |
elessair | 0:f269e3021894 | 1134 | line += c |
elessair | 0:f269e3021894 | 1135 | end_time = time() |
elessair | 0:f269e3021894 | 1136 | testcase_duration = end_time - start_time # Test case duration from reset to {end} |
elessair | 0:f269e3021894 | 1137 | |
elessair | 0:f269e3021894 | 1138 | c = get_char_from_queue(obs) |
elessair | 0:f269e3021894 | 1139 | |
elessair | 0:f269e3021894 | 1140 | if c: |
elessair | 0:f269e3021894 | 1141 | if verbose: |
elessair | 0:f269e3021894 | 1142 | sys.stdout.write(c) |
elessair | 0:f269e3021894 | 1143 | c = filter_queue_char(c) |
elessair | 0:f269e3021894 | 1144 | output.append(c) |
elessair | 0:f269e3021894 | 1145 | |
elessair | 0:f269e3021894 | 1146 | if verbose: |
elessair | 0:f269e3021894 | 1147 | print "Test::Output::Finish" |
elessair | 0:f269e3021894 | 1148 | # Stop test process |
elessair | 0:f269e3021894 | 1149 | obs.stop() |
elessair | 0:f269e3021894 | 1150 | |
elessair | 0:f269e3021894 | 1151 | result = get_test_result(output) |
elessair | 0:f269e3021894 | 1152 | return (result, "".join(output), testcase_duration, duration) |
elessair | 0:f269e3021894 | 1153 | |
elessair | 0:f269e3021894 | 1154 | def is_peripherals_available(self, target_mcu_name, peripherals=None): |
elessair | 0:f269e3021894 | 1155 | """ Checks if specified target should run specific peripheral test case defined in MUTs file |
elessair | 0:f269e3021894 | 1156 | """ |
elessair | 0:f269e3021894 | 1157 | if peripherals is not None: |
elessair | 0:f269e3021894 | 1158 | peripherals = set(peripherals) |
elessair | 0:f269e3021894 | 1159 | for id, mut in self.muts.iteritems(): |
elessair | 0:f269e3021894 | 1160 | # Target MCU name check |
elessair | 0:f269e3021894 | 1161 | if mut["mcu"] != target_mcu_name: |
elessair | 0:f269e3021894 | 1162 | continue |
elessair | 0:f269e3021894 | 1163 | # Peripherals check |
elessair | 0:f269e3021894 | 1164 | if peripherals is not None: |
elessair | 0:f269e3021894 | 1165 | if 'peripherals' not in mut: |
elessair | 0:f269e3021894 | 1166 | continue |
elessair | 0:f269e3021894 | 1167 | if not peripherals.issubset(set(mut['peripherals'])): |
elessair | 0:f269e3021894 | 1168 | continue |
elessair | 0:f269e3021894 | 1169 | return True |
elessair | 0:f269e3021894 | 1170 | return False |
elessair | 0:f269e3021894 | 1171 | |
elessair | 0:f269e3021894 | 1172 | def shape_test_request(self, mcu, image_path, test_id, duration=10): |
elessair | 0:f269e3021894 | 1173 | """ Function prepares JSON structure describing test specification |
elessair | 0:f269e3021894 | 1174 | """ |
elessair | 0:f269e3021894 | 1175 | test_spec = { |
elessair | 0:f269e3021894 | 1176 | "mcu": mcu, |
elessair | 0:f269e3021894 | 1177 | "image": image_path, |
elessair | 0:f269e3021894 | 1178 | "duration": duration, |
elessair | 0:f269e3021894 | 1179 | "test_id": test_id, |
elessair | 0:f269e3021894 | 1180 | } |
elessair | 0:f269e3021894 | 1181 | return json.dumps(test_spec) |
elessair | 0:f269e3021894 | 1182 | |
elessair | 0:f269e3021894 | 1183 | |
elessair | 0:f269e3021894 | 1184 | def get_unique_value_from_summary(test_summary, index): |
elessair | 0:f269e3021894 | 1185 | """ Gets list of unique target names |
elessair | 0:f269e3021894 | 1186 | """ |
elessair | 0:f269e3021894 | 1187 | result = [] |
elessair | 0:f269e3021894 | 1188 | for test in test_summary: |
elessair | 0:f269e3021894 | 1189 | target_name = test[index] |
elessair | 0:f269e3021894 | 1190 | if target_name not in result: |
elessair | 0:f269e3021894 | 1191 | result.append(target_name) |
elessair | 0:f269e3021894 | 1192 | return sorted(result) |
elessair | 0:f269e3021894 | 1193 | |
elessair | 0:f269e3021894 | 1194 | |
elessair | 0:f269e3021894 | 1195 | def get_unique_value_from_summary_ext(test_summary, index_key, index_val): |
elessair | 0:f269e3021894 | 1196 | """ Gets list of unique target names and return dictionary |
elessair | 0:f269e3021894 | 1197 | """ |
elessair | 0:f269e3021894 | 1198 | result = {} |
elessair | 0:f269e3021894 | 1199 | for test in test_summary: |
elessair | 0:f269e3021894 | 1200 | key = test[index_key] |
elessair | 0:f269e3021894 | 1201 | val = test[index_val] |
elessair | 0:f269e3021894 | 1202 | if key not in result: |
elessair | 0:f269e3021894 | 1203 | result[key] = val |
elessair | 0:f269e3021894 | 1204 | return result |
elessair | 0:f269e3021894 | 1205 | |
elessair | 0:f269e3021894 | 1206 | |
elessair | 0:f269e3021894 | 1207 | def show_json_file_format_error(json_spec_filename, line, column): |
elessair | 0:f269e3021894 | 1208 | """ Prints JSON broken content |
elessair | 0:f269e3021894 | 1209 | """ |
elessair | 0:f269e3021894 | 1210 | with open(json_spec_filename) as data_file: |
elessair | 0:f269e3021894 | 1211 | line_no = 1 |
elessair | 0:f269e3021894 | 1212 | for json_line in data_file: |
elessair | 0:f269e3021894 | 1213 | if line_no + 5 >= line: # Print last few lines before error |
elessair | 0:f269e3021894 | 1214 | print 'Line %d:\t'%line_no + json_line, # Prints line |
elessair | 0:f269e3021894 | 1215 | if line_no == line: |
elessair | 0:f269e3021894 | 1216 | print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^' |
elessair | 0:f269e3021894 | 1217 | break |
elessair | 0:f269e3021894 | 1218 | line_no += 1 |
elessair | 0:f269e3021894 | 1219 | |
elessair | 0:f269e3021894 | 1220 | |
elessair | 0:f269e3021894 | 1221 | def json_format_error_defect_pos(json_error_msg): |
elessair | 0:f269e3021894 | 1222 | """ Gets first error line and column in JSON file format. |
elessair | 0:f269e3021894 | 1223 | Parsed from exception thrown by json.loads() string |
elessair | 0:f269e3021894 | 1224 | """ |
elessair | 0:f269e3021894 | 1225 | result = None |
elessair | 0:f269e3021894 | 1226 | line, column = 0, 0 |
elessair | 0:f269e3021894 | 1227 | # Line value search |
elessair | 0:f269e3021894 | 1228 | line_search = re.search('line [0-9]+', json_error_msg) |
elessair | 0:f269e3021894 | 1229 | if line_search is not None: |
elessair | 0:f269e3021894 | 1230 | ls = line_search.group().split(' ') |
elessair | 0:f269e3021894 | 1231 | if len(ls) == 2: |
elessair | 0:f269e3021894 | 1232 | line = int(ls[1]) |
elessair | 0:f269e3021894 | 1233 | # Column position search |
elessair | 0:f269e3021894 | 1234 | column_search = re.search('column [0-9]+', json_error_msg) |
elessair | 0:f269e3021894 | 1235 | if column_search is not None: |
elessair | 0:f269e3021894 | 1236 | cs = column_search.group().split(' ') |
elessair | 0:f269e3021894 | 1237 | if len(cs) == 2: |
elessair | 0:f269e3021894 | 1238 | column = int(cs[1]) |
elessair | 0:f269e3021894 | 1239 | result = [line, column] |
elessair | 0:f269e3021894 | 1240 | return result |
elessair | 0:f269e3021894 | 1241 | |
elessair | 0:f269e3021894 | 1242 | |
elessair | 0:f269e3021894 | 1243 | def get_json_data_from_file(json_spec_filename, verbose=False): |
elessair | 0:f269e3021894 | 1244 | """ Loads from file JSON formatted string to data structure |
elessair | 0:f269e3021894 | 1245 | """ |
elessair | 0:f269e3021894 | 1246 | result = None |
elessair | 0:f269e3021894 | 1247 | try: |
elessair | 0:f269e3021894 | 1248 | with open(json_spec_filename) as data_file: |
elessair | 0:f269e3021894 | 1249 | try: |
elessair | 0:f269e3021894 | 1250 | result = json.load(data_file) |
elessair | 0:f269e3021894 | 1251 | except ValueError as json_error_msg: |
elessair | 0:f269e3021894 | 1252 | result = None |
elessair | 0:f269e3021894 | 1253 | print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg) |
elessair | 0:f269e3021894 | 1254 | # We can print where error occurred inside JSON file if we can parse exception msg |
elessair | 0:f269e3021894 | 1255 | json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg)) |
elessair | 0:f269e3021894 | 1256 | if json_format_defect_pos is not None: |
elessair | 0:f269e3021894 | 1257 | line = json_format_defect_pos[0] |
elessair | 0:f269e3021894 | 1258 | column = json_format_defect_pos[1] |
elessair | 0:f269e3021894 | 1259 | |
elessair | 0:f269e3021894 | 1260 | show_json_file_format_error(json_spec_filename, line, column) |
elessair | 0:f269e3021894 | 1261 | |
elessair | 0:f269e3021894 | 1262 | except IOError as fileopen_error_msg: |
elessair | 0:f269e3021894 | 1263 | print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg) |
elessair | 0:f269e3021894 | 1264 | |
elessair | 0:f269e3021894 | 1265 | if verbose and result: |
elessair | 0:f269e3021894 | 1266 | pp = pprint.PrettyPrinter(indent=4) |
elessair | 0:f269e3021894 | 1267 | pp.pprint(result) |
elessair | 0:f269e3021894 | 1268 | return result |
elessair | 0:f269e3021894 | 1269 | |
elessair | 0:f269e3021894 | 1270 | |
elessair | 0:f269e3021894 | 1271 | def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None): |
elessair | 0:f269e3021894 | 1272 | """ Prints MUTs configuration passed to test script for verboseness |
elessair | 0:f269e3021894 | 1273 | """ |
elessair | 0:f269e3021894 | 1274 | muts_info_cols = [] |
elessair | 0:f269e3021894 | 1275 | # We need to check all unique properties for each defined MUT |
elessair | 0:f269e3021894 | 1276 | for k in json_data: |
elessair | 0:f269e3021894 | 1277 | mut_info = json_data[k] |
elessair | 0:f269e3021894 | 1278 | for mut_property in mut_info: |
elessair | 0:f269e3021894 | 1279 | if mut_property not in muts_info_cols: |
elessair | 0:f269e3021894 | 1280 | muts_info_cols.append(mut_property) |
elessair | 0:f269e3021894 | 1281 | |
elessair | 0:f269e3021894 | 1282 | # Prepare pretty table object to display all MUTs |
elessair | 0:f269e3021894 | 1283 | pt_cols = ["index"] + muts_info_cols |
elessair | 0:f269e3021894 | 1284 | pt = PrettyTable(pt_cols) |
elessair | 0:f269e3021894 | 1285 | for col in pt_cols: |
elessair | 0:f269e3021894 | 1286 | pt.align[col] = "l" |
elessair | 0:f269e3021894 | 1287 | |
elessair | 0:f269e3021894 | 1288 | # Add rows to pretty print object |
elessair | 0:f269e3021894 | 1289 | for k in json_data: |
elessair | 0:f269e3021894 | 1290 | row = [k] |
elessair | 0:f269e3021894 | 1291 | mut_info = json_data[k] |
elessair | 0:f269e3021894 | 1292 | |
elessair | 0:f269e3021894 | 1293 | add_row = True |
elessair | 0:f269e3021894 | 1294 | if platform_filter and 'mcu' in mut_info: |
elessair | 0:f269e3021894 | 1295 | add_row = re.search(platform_filter, mut_info['mcu']) is not None |
elessair | 0:f269e3021894 | 1296 | if add_row: |
elessair | 0:f269e3021894 | 1297 | for col in muts_info_cols: |
elessair | 0:f269e3021894 | 1298 | cell_val = mut_info[col] if col in mut_info else None |
elessair | 0:f269e3021894 | 1299 | if type(cell_val) == ListType: |
elessair | 0:f269e3021894 | 1300 | cell_val = join_delim.join(cell_val) |
elessair | 0:f269e3021894 | 1301 | row.append(cell_val) |
elessair | 0:f269e3021894 | 1302 | pt.add_row(row) |
elessair | 0:f269e3021894 | 1303 | return pt.get_string() |
elessair | 0:f269e3021894 | 1304 | |
elessair | 0:f269e3021894 | 1305 | |
elessair | 0:f269e3021894 | 1306 | def print_test_configuration_from_json(json_data, join_delim=", "): |
elessair | 0:f269e3021894 | 1307 | """ Prints test specification configuration passed to test script for verboseness |
elessair | 0:f269e3021894 | 1308 | """ |
elessair | 0:f269e3021894 | 1309 | toolchains_info_cols = [] |
elessair | 0:f269e3021894 | 1310 | # We need to check all toolchains for each device |
elessair | 0:f269e3021894 | 1311 | for k in json_data: |
elessair | 0:f269e3021894 | 1312 | # k should be 'targets' |
elessair | 0:f269e3021894 | 1313 | targets = json_data[k] |
elessair | 0:f269e3021894 | 1314 | for target in targets: |
elessair | 0:f269e3021894 | 1315 | toolchains = targets[target] |
elessair | 0:f269e3021894 | 1316 | for toolchain in toolchains: |
elessair | 0:f269e3021894 | 1317 | if toolchain not in toolchains_info_cols: |
elessair | 0:f269e3021894 | 1318 | toolchains_info_cols.append(toolchain) |
elessair | 0:f269e3021894 | 1319 | |
elessair | 0:f269e3021894 | 1320 | # Prepare pretty table object to display test specification |
elessair | 0:f269e3021894 | 1321 | pt_cols = ["mcu"] + sorted(toolchains_info_cols) |
elessair | 0:f269e3021894 | 1322 | pt = PrettyTable(pt_cols) |
elessair | 0:f269e3021894 | 1323 | for col in pt_cols: |
elessair | 0:f269e3021894 | 1324 | pt.align[col] = "l" |
elessair | 0:f269e3021894 | 1325 | |
elessair | 0:f269e3021894 | 1326 | # { target : [conflicted toolchains] } |
elessair | 0:f269e3021894 | 1327 | toolchain_conflicts = {} |
elessair | 0:f269e3021894 | 1328 | toolchain_path_conflicts = [] |
elessair | 0:f269e3021894 | 1329 | for k in json_data: |
elessair | 0:f269e3021894 | 1330 | # k should be 'targets' |
elessair | 0:f269e3021894 | 1331 | targets = json_data[k] |
elessair | 0:f269e3021894 | 1332 | for target in targets: |
elessair | 0:f269e3021894 | 1333 | target_supported_toolchains = get_target_supported_toolchains(target) |
elessair | 0:f269e3021894 | 1334 | if not target_supported_toolchains: |
elessair | 0:f269e3021894 | 1335 | target_supported_toolchains = [] |
elessair | 0:f269e3021894 | 1336 | target_name = target if target in TARGET_MAP else "%s*"% target |
elessair | 0:f269e3021894 | 1337 | row = [target_name] |
elessair | 0:f269e3021894 | 1338 | toolchains = targets[target] |
elessair | 0:f269e3021894 | 1339 | |
elessair | 0:f269e3021894 | 1340 | for toolchain in sorted(toolchains_info_cols): |
elessair | 0:f269e3021894 | 1341 | # Check for conflicts: target vs toolchain |
elessair | 0:f269e3021894 | 1342 | conflict = False |
elessair | 0:f269e3021894 | 1343 | conflict_path = False |
elessair | 0:f269e3021894 | 1344 | if toolchain in toolchains: |
elessair | 0:f269e3021894 | 1345 | if toolchain not in target_supported_toolchains: |
elessair | 0:f269e3021894 | 1346 | conflict = True |
elessair | 0:f269e3021894 | 1347 | if target not in toolchain_conflicts: |
elessair | 0:f269e3021894 | 1348 | toolchain_conflicts[target] = [] |
elessair | 0:f269e3021894 | 1349 | toolchain_conflicts[target].append(toolchain) |
elessair | 0:f269e3021894 | 1350 | # Add marker inside table about target usage / conflict |
elessair | 0:f269e3021894 | 1351 | cell_val = 'Yes' if toolchain in toolchains else '-' |
elessair | 0:f269e3021894 | 1352 | if conflict: |
elessair | 0:f269e3021894 | 1353 | cell_val += '*' |
elessair | 0:f269e3021894 | 1354 | # Check for conflicts: toolchain vs toolchain path |
elessair | 0:f269e3021894 | 1355 | if toolchain in TOOLCHAIN_PATHS: |
elessair | 0:f269e3021894 | 1356 | toolchain_path = TOOLCHAIN_PATHS[toolchain] |
elessair | 0:f269e3021894 | 1357 | if not os.path.isdir(toolchain_path): |
elessair | 0:f269e3021894 | 1358 | conflict_path = True |
elessair | 0:f269e3021894 | 1359 | if toolchain not in toolchain_path_conflicts: |
elessair | 0:f269e3021894 | 1360 | toolchain_path_conflicts.append(toolchain) |
elessair | 0:f269e3021894 | 1361 | if conflict_path: |
elessair | 0:f269e3021894 | 1362 | cell_val += '#' |
elessair | 0:f269e3021894 | 1363 | row.append(cell_val) |
elessair | 0:f269e3021894 | 1364 | pt.add_row(row) |
elessair | 0:f269e3021894 | 1365 | |
elessair | 0:f269e3021894 | 1366 | # generate result string |
elessair | 0:f269e3021894 | 1367 | result = pt.get_string() # Test specification table |
elessair | 0:f269e3021894 | 1368 | if toolchain_conflicts or toolchain_path_conflicts: |
elessair | 0:f269e3021894 | 1369 | result += "\n" |
elessair | 0:f269e3021894 | 1370 | result += "Toolchain conflicts:\n" |
elessair | 0:f269e3021894 | 1371 | for target in toolchain_conflicts: |
elessair | 0:f269e3021894 | 1372 | if target not in TARGET_MAP: |
elessair | 0:f269e3021894 | 1373 | result += "\t* Target %s unknown\n"% (target) |
elessair | 0:f269e3021894 | 1374 | conflict_target_list = join_delim.join(toolchain_conflicts[target]) |
elessair | 0:f269e3021894 | 1375 | sufix = 's' if len(toolchain_conflicts[target]) > 1 else '' |
elessair | 0:f269e3021894 | 1376 | result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix) |
elessair | 0:f269e3021894 | 1377 | |
elessair | 0:f269e3021894 | 1378 | for toolchain in toolchain_path_conflicts: |
elessair | 0:f269e3021894 | 1379 | # Let's check toolchain configuration |
elessair | 0:f269e3021894 | 1380 | if toolchain in TOOLCHAIN_PATHS: |
elessair | 0:f269e3021894 | 1381 | toolchain_path = TOOLCHAIN_PATHS[toolchain] |
elessair | 0:f269e3021894 | 1382 | if not os.path.isdir(toolchain_path): |
elessair | 0:f269e3021894 | 1383 | result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path) |
elessair | 0:f269e3021894 | 1384 | return result |
elessair | 0:f269e3021894 | 1385 | |
elessair | 0:f269e3021894 | 1386 | |
elessair | 0:f269e3021894 | 1387 | def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None): |
elessair | 0:f269e3021894 | 1388 | """ Generates table summary with all test cases and additional test cases |
elessair | 0:f269e3021894 | 1389 | information using pretty print functionality. Allows test suite user to |
elessair | 0:f269e3021894 | 1390 | see test cases |
elessair | 0:f269e3021894 | 1391 | """ |
elessair | 0:f269e3021894 | 1392 | # get all unique test ID prefixes |
elessair | 0:f269e3021894 | 1393 | unique_test_id = [] |
elessair | 0:f269e3021894 | 1394 | for test in TESTS: |
elessair | 0:f269e3021894 | 1395 | split = test['id'].split('_')[:-1] |
elessair | 0:f269e3021894 | 1396 | test_id_prefix = '_'.join(split) |
elessair | 0:f269e3021894 | 1397 | if test_id_prefix not in unique_test_id: |
elessair | 0:f269e3021894 | 1398 | unique_test_id.append(test_id_prefix) |
elessair | 0:f269e3021894 | 1399 | unique_test_id.sort() |
elessair | 0:f269e3021894 | 1400 | counter_dict_test_id_types = dict((t, 0) for t in unique_test_id) |
elessair | 0:f269e3021894 | 1401 | counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id) |
elessair | 0:f269e3021894 | 1402 | |
elessair | 0:f269e3021894 | 1403 | test_properties = ['id', |
elessair | 0:f269e3021894 | 1404 | 'automated', |
elessair | 0:f269e3021894 | 1405 | 'description', |
elessair | 0:f269e3021894 | 1406 | 'peripherals', |
elessair | 0:f269e3021894 | 1407 | 'host_test', |
elessair | 0:f269e3021894 | 1408 | 'duration'] if cols is None else cols |
elessair | 0:f269e3021894 | 1409 | |
elessair | 0:f269e3021894 | 1410 | # All tests status table print |
elessair | 0:f269e3021894 | 1411 | pt = PrettyTable(test_properties) |
elessair | 0:f269e3021894 | 1412 | for col in test_properties: |
elessair | 0:f269e3021894 | 1413 | pt.align[col] = "l" |
elessair | 0:f269e3021894 | 1414 | pt.align['duration'] = "r" |
elessair | 0:f269e3021894 | 1415 | |
elessair | 0:f269e3021894 | 1416 | counter_all = 0 |
elessair | 0:f269e3021894 | 1417 | counter_automated = 0 |
elessair | 0:f269e3021894 | 1418 | pt.padding_width = 1 # One space between column edges and contents (default) |
elessair | 0:f269e3021894 | 1419 | |
elessair | 0:f269e3021894 | 1420 | for test_id in sorted(TEST_MAP.keys()): |
elessair | 0:f269e3021894 | 1421 | if platform_filter is not None: |
elessair | 0:f269e3021894 | 1422 | # FIlter out platforms using regex |
elessair | 0:f269e3021894 | 1423 | if re.search(platform_filter, test_id) is None: |
elessair | 0:f269e3021894 | 1424 | continue |
elessair | 0:f269e3021894 | 1425 | row = [] |
elessair | 0:f269e3021894 | 1426 | test = TEST_MAP[test_id] |
elessair | 0:f269e3021894 | 1427 | split = test_id.split('_')[:-1] |
elessair | 0:f269e3021894 | 1428 | test_id_prefix = '_'.join(split) |
elessair | 0:f269e3021894 | 1429 | |
elessair | 0:f269e3021894 | 1430 | for col in test_properties: |
elessair | 0:f269e3021894 | 1431 | col_value = test[col] |
elessair | 0:f269e3021894 | 1432 | if type(test[col]) == ListType: |
elessair | 0:f269e3021894 | 1433 | col_value = join_delim.join(test[col]) |
elessair | 0:f269e3021894 | 1434 | elif test[col] == None: |
elessair | 0:f269e3021894 | 1435 | col_value = "-" |
elessair | 0:f269e3021894 | 1436 | |
elessair | 0:f269e3021894 | 1437 | row.append(col_value) |
elessair | 0:f269e3021894 | 1438 | if test['automated'] == True: |
elessair | 0:f269e3021894 | 1439 | counter_dict_test_id_types[test_id_prefix] += 1 |
elessair | 0:f269e3021894 | 1440 | counter_automated += 1 |
elessair | 0:f269e3021894 | 1441 | pt.add_row(row) |
elessair | 0:f269e3021894 | 1442 | # Update counters |
elessair | 0:f269e3021894 | 1443 | counter_all += 1 |
elessair | 0:f269e3021894 | 1444 | counter_dict_test_id_types_all[test_id_prefix] += 1 |
elessair | 0:f269e3021894 | 1445 | result = pt.get_string() |
elessair | 0:f269e3021894 | 1446 | result += "\n\n" |
elessair | 0:f269e3021894 | 1447 | |
elessair | 0:f269e3021894 | 1448 | if result_summary and not platform_filter: |
elessair | 0:f269e3021894 | 1449 | # Automation result summary |
elessair | 0:f269e3021894 | 1450 | test_id_cols = ['automated', 'all', 'percent [%]', 'progress'] |
elessair | 0:f269e3021894 | 1451 | pt = PrettyTable(test_id_cols) |
elessair | 0:f269e3021894 | 1452 | pt.align['automated'] = "r" |
elessair | 0:f269e3021894 | 1453 | pt.align['all'] = "r" |
elessair | 0:f269e3021894 | 1454 | pt.align['percent [%]'] = "r" |
elessair | 0:f269e3021894 | 1455 | |
elessair | 0:f269e3021894 | 1456 | percent_progress = round(100.0 * counter_automated / float(counter_all), 1) |
elessair | 0:f269e3021894 | 1457 | str_progress = progress_bar(percent_progress, 75) |
elessair | 0:f269e3021894 | 1458 | pt.add_row([counter_automated, counter_all, percent_progress, str_progress]) |
elessair | 0:f269e3021894 | 1459 | result += "Automation coverage:\n" |
elessair | 0:f269e3021894 | 1460 | result += pt.get_string() |
elessair | 0:f269e3021894 | 1461 | result += "\n\n" |
elessair | 0:f269e3021894 | 1462 | |
elessair | 0:f269e3021894 | 1463 | # Test automation coverage table print |
elessair | 0:f269e3021894 | 1464 | test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress'] |
elessair | 0:f269e3021894 | 1465 | pt = PrettyTable(test_id_cols) |
elessair | 0:f269e3021894 | 1466 | pt.align['id'] = "l" |
elessair | 0:f269e3021894 | 1467 | pt.align['automated'] = "r" |
elessair | 0:f269e3021894 | 1468 | pt.align['all'] = "r" |
elessair | 0:f269e3021894 | 1469 | pt.align['percent [%]'] = "r" |
elessair | 0:f269e3021894 | 1470 | for unique_id in unique_test_id: |
elessair | 0:f269e3021894 | 1471 | # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id]) |
elessair | 0:f269e3021894 | 1472 | percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1) |
elessair | 0:f269e3021894 | 1473 | str_progress = progress_bar(percent_progress, 75) |
elessair | 0:f269e3021894 | 1474 | row = [unique_id, |
elessair | 0:f269e3021894 | 1475 | counter_dict_test_id_types[unique_id], |
elessair | 0:f269e3021894 | 1476 | counter_dict_test_id_types_all[unique_id], |
elessair | 0:f269e3021894 | 1477 | percent_progress, |
elessair | 0:f269e3021894 | 1478 | "[" + str_progress + "]"] |
elessair | 0:f269e3021894 | 1479 | pt.add_row(row) |
elessair | 0:f269e3021894 | 1480 | result += "Test automation coverage:\n" |
elessair | 0:f269e3021894 | 1481 | result += pt.get_string() |
elessair | 0:f269e3021894 | 1482 | result += "\n\n" |
elessair | 0:f269e3021894 | 1483 | return result |
elessair | 0:f269e3021894 | 1484 | |
elessair | 0:f269e3021894 | 1485 | |
elessair | 0:f269e3021894 | 1486 | def progress_bar(percent_progress, saturation=0): |
elessair | 0:f269e3021894 | 1487 | """ This function creates progress bar with optional simple saturation mark |
elessair | 0:f269e3021894 | 1488 | """ |
elessair | 0:f269e3021894 | 1489 | step = int(percent_progress / 2) # Scale by to (scale: 1 - 50) |
elessair | 0:f269e3021894 | 1490 | str_progress = '#' * step + '.' * int(50 - step) |
elessair | 0:f269e3021894 | 1491 | c = '!' if str_progress[38] == '.' else '|' |
elessair | 0:f269e3021894 | 1492 | if saturation > 0: |
elessair | 0:f269e3021894 | 1493 | saturation = saturation / 2 |
elessair | 0:f269e3021894 | 1494 | str_progress = str_progress[:saturation] + c + str_progress[saturation:] |
elessair | 0:f269e3021894 | 1495 | return str_progress |
elessair | 0:f269e3021894 | 1496 | |
elessair | 0:f269e3021894 | 1497 | |
elessair | 0:f269e3021894 | 1498 | def singletest_in_cli_mode(single_test): |
elessair | 0:f269e3021894 | 1499 | """ Runs SingleTestRunner object in CLI (Command line interface) mode |
elessair | 0:f269e3021894 | 1500 | |
elessair | 0:f269e3021894 | 1501 | @return returns success code (0 == success) for building and running tests |
elessair | 0:f269e3021894 | 1502 | """ |
elessair | 0:f269e3021894 | 1503 | start = time() |
elessair | 0:f269e3021894 | 1504 | # Execute tests depending on options and filter applied |
elessair | 0:f269e3021894 | 1505 | test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute() |
elessair | 0:f269e3021894 | 1506 | elapsed_time = time() - start |
elessair | 0:f269e3021894 | 1507 | |
elessair | 0:f269e3021894 | 1508 | # Human readable summary |
elessair | 0:f269e3021894 | 1509 | if not single_test.opts_suppress_summary: |
elessair | 0:f269e3021894 | 1510 | # prints well-formed summary with results (SQL table like) |
elessair | 0:f269e3021894 | 1511 | print single_test.generate_test_summary(test_summary, shuffle_seed) |
elessair | 0:f269e3021894 | 1512 | if single_test.opts_test_x_toolchain_summary: |
elessair | 0:f269e3021894 | 1513 | # prints well-formed summary with results (SQL table like) |
elessair | 0:f269e3021894 | 1514 | # table shows text x toolchain test result matrix |
elessair | 0:f269e3021894 | 1515 | print single_test.generate_test_summary_by_target(test_summary, shuffle_seed) |
elessair | 0:f269e3021894 | 1516 | |
elessair | 0:f269e3021894 | 1517 | print "Completed in %.2f sec"% (elapsed_time) |
elessair | 0:f269e3021894 | 1518 | |
elessair | 0:f269e3021894 | 1519 | # Write summary of the builds |
elessair | 0:f269e3021894 | 1520 | |
elessair | 0:f269e3021894 | 1521 | print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build") |
elessair | 0:f269e3021894 | 1522 | status = print_report_exporter.report(build_report) |
elessair | 0:f269e3021894 | 1523 | |
elessair | 0:f269e3021894 | 1524 | # Store extra reports in files |
elessair | 0:f269e3021894 | 1525 | if single_test.opts_report_html_file_name: |
elessair | 0:f269e3021894 | 1526 | # Export results in form of HTML report to separate file |
elessair | 0:f269e3021894 | 1527 | report_exporter = ReportExporter(ResultExporterType.HTML) |
elessair | 0:f269e3021894 | 1528 | report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext) |
elessair | 0:f269e3021894 | 1529 | if single_test.opts_report_junit_file_name: |
elessair | 0:f269e3021894 | 1530 | # Export results in form of JUnit XML report to separate file |
elessair | 0:f269e3021894 | 1531 | report_exporter = ReportExporter(ResultExporterType.JUNIT) |
elessair | 0:f269e3021894 | 1532 | report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext) |
elessair | 0:f269e3021894 | 1533 | if single_test.opts_report_text_file_name: |
elessair | 0:f269e3021894 | 1534 | # Export results in form of a text file |
elessair | 0:f269e3021894 | 1535 | report_exporter = ReportExporter(ResultExporterType.TEXT) |
elessair | 0:f269e3021894 | 1536 | report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext) |
elessair | 0:f269e3021894 | 1537 | if single_test.opts_report_build_file_name: |
elessair | 0:f269e3021894 | 1538 | # Export build results as html report to sparate file |
elessair | 0:f269e3021894 | 1539 | report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build") |
elessair | 0:f269e3021894 | 1540 | report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties) |
elessair | 0:f269e3021894 | 1541 | |
elessair | 0:f269e3021894 | 1542 | # Returns True if no build failures of the test projects or their dependencies |
elessair | 0:f269e3021894 | 1543 | return status |
elessair | 0:f269e3021894 | 1544 | |
elessair | 0:f269e3021894 | 1545 | class TestLogger(): |
elessair | 0:f269e3021894 | 1546 | """ Super-class for logging and printing ongoing events for test suite pass |
elessair | 0:f269e3021894 | 1547 | """ |
elessair | 0:f269e3021894 | 1548 | def __init__(self, store_log=True): |
elessair | 0:f269e3021894 | 1549 | """ We can control if logger actually stores log in memory |
elessair | 0:f269e3021894 | 1550 | or just handled all log entries immediately |
elessair | 0:f269e3021894 | 1551 | """ |
elessair | 0:f269e3021894 | 1552 | self.log = [] |
elessair | 0:f269e3021894 | 1553 | self.log_to_file = False |
elessair | 0:f269e3021894 | 1554 | self.log_file_name = None |
elessair | 0:f269e3021894 | 1555 | self.store_log = store_log |
elessair | 0:f269e3021894 | 1556 | |
elessair | 0:f269e3021894 | 1557 | self.LogType = construct_enum(INFO='Info', |
elessair | 0:f269e3021894 | 1558 | WARN='Warning', |
elessair | 0:f269e3021894 | 1559 | NOTIF='Notification', |
elessair | 0:f269e3021894 | 1560 | ERROR='Error', |
elessair | 0:f269e3021894 | 1561 | EXCEPT='Exception') |
elessair | 0:f269e3021894 | 1562 | |
elessair | 0:f269e3021894 | 1563 | self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file |
elessair | 0:f269e3021894 | 1564 | APPEND=2) # Append to existing log file |
elessair | 0:f269e3021894 | 1565 | |
elessair | 0:f269e3021894 | 1566 | def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'): |
elessair | 0:f269e3021894 | 1567 | """ Log one line of text |
elessair | 0:f269e3021894 | 1568 | """ |
elessair | 0:f269e3021894 | 1569 | log_timestamp = time() |
elessair | 0:f269e3021894 | 1570 | log_entry = {'log_type' : LogType, |
elessair | 0:f269e3021894 | 1571 | 'log_timestamp' : log_timestamp, |
elessair | 0:f269e3021894 | 1572 | 'log_line' : log_line, |
elessair | 0:f269e3021894 | 1573 | '_future' : None |
elessair | 0:f269e3021894 | 1574 | } |
elessair | 0:f269e3021894 | 1575 | # Store log in memory |
elessair | 0:f269e3021894 | 1576 | if self.store_log: |
elessair | 0:f269e3021894 | 1577 | self.log.append(log_entry) |
elessair | 0:f269e3021894 | 1578 | return log_entry |
elessair | 0:f269e3021894 | 1579 | |
elessair | 0:f269e3021894 | 1580 | |
elessair | 0:f269e3021894 | 1581 | class CLITestLogger(TestLogger): |
elessair | 0:f269e3021894 | 1582 | """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed |
elessair | 0:f269e3021894 | 1583 | """ |
elessair | 0:f269e3021894 | 1584 | def __init__(self, store_log=True, file_name=None): |
elessair | 0:f269e3021894 | 1585 | TestLogger.__init__(self) |
elessair | 0:f269e3021894 | 1586 | self.log_file_name = file_name |
elessair | 0:f269e3021894 | 1587 | #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time |
elessair | 0:f269e3021894 | 1588 | self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only |
elessair | 0:f269e3021894 | 1589 | |
elessair | 0:f269e3021894 | 1590 | def log_print(self, log_entry, timestamp=True): |
elessair | 0:f269e3021894 | 1591 | """ Prints on screen formatted log entry |
elessair | 0:f269e3021894 | 1592 | """ |
elessair | 0:f269e3021894 | 1593 | ts = log_entry['log_timestamp'] |
elessair | 0:f269e3021894 | 1594 | timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else '' |
elessair | 0:f269e3021894 | 1595 | log_line_str = "%(log_type)s: %(log_line)s"% (log_entry) |
elessair | 0:f269e3021894 | 1596 | return timestamp_str + log_line_str |
elessair | 0:f269e3021894 | 1597 | |
elessair | 0:f269e3021894 | 1598 | def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'): |
elessair | 0:f269e3021894 | 1599 | """ Logs line, if log file output was specified log line will be appended |
elessair | 0:f269e3021894 | 1600 | at the end of log file |
elessair | 0:f269e3021894 | 1601 | """ |
elessair | 0:f269e3021894 | 1602 | log_entry = TestLogger.log_line(self, LogType, log_line) |
elessair | 0:f269e3021894 | 1603 | log_line_str = self.log_print(log_entry, timestamp) |
elessair | 0:f269e3021894 | 1604 | if self.log_file_name is not None: |
elessair | 0:f269e3021894 | 1605 | try: |
elessair | 0:f269e3021894 | 1606 | with open(self.log_file_name, 'a') as f: |
elessair | 0:f269e3021894 | 1607 | f.write(log_line_str + line_delim) |
elessair | 0:f269e3021894 | 1608 | except IOError: |
elessair | 0:f269e3021894 | 1609 | pass |
elessair | 0:f269e3021894 | 1610 | return log_line_str |
elessair | 0:f269e3021894 | 1611 | |
elessair | 0:f269e3021894 | 1612 | |
elessair | 0:f269e3021894 | 1613 | def factory_db_logger(db_url): |
elessair | 0:f269e3021894 | 1614 | """ Factory database driver depending on database type supplied in database connection string db_url |
elessair | 0:f269e3021894 | 1615 | """ |
elessair | 0:f269e3021894 | 1616 | if db_url is not None: |
elessair | 0:f269e3021894 | 1617 | from tools.test_mysql import MySQLDBAccess |
elessair | 0:f269e3021894 | 1618 | connection_info = BaseDBAccess().parse_db_connection_string(db_url) |
elessair | 0:f269e3021894 | 1619 | if connection_info is not None: |
elessair | 0:f269e3021894 | 1620 | (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url) |
elessair | 0:f269e3021894 | 1621 | if db_type == 'mysql': |
elessair | 0:f269e3021894 | 1622 | return MySQLDBAccess() |
elessair | 0:f269e3021894 | 1623 | return None |
elessair | 0:f269e3021894 | 1624 | |
elessair | 0:f269e3021894 | 1625 | |
elessair | 0:f269e3021894 | 1626 | def detect_database_verbose(db_url): |
elessair | 0:f269e3021894 | 1627 | """ uses verbose mode (prints) database detection sequence to check it database connection string is valid |
elessair | 0:f269e3021894 | 1628 | """ |
elessair | 0:f269e3021894 | 1629 | result = BaseDBAccess().parse_db_connection_string(db_url) |
elessair | 0:f269e3021894 | 1630 | if result is not None: |
elessair | 0:f269e3021894 | 1631 | # Parsing passed |
elessair | 0:f269e3021894 | 1632 | (db_type, username, password, host, db_name) = result |
elessair | 0:f269e3021894 | 1633 | #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result |
elessair | 0:f269e3021894 | 1634 | # Let's try to connect |
elessair | 0:f269e3021894 | 1635 | db_ = factory_db_logger(db_url) |
elessair | 0:f269e3021894 | 1636 | if db_ is not None: |
elessair | 0:f269e3021894 | 1637 | print "Connecting to database '%s'..."% db_url, |
elessair | 0:f269e3021894 | 1638 | db_.connect(host, username, password, db_name) |
elessair | 0:f269e3021894 | 1639 | if db_.is_connected(): |
elessair | 0:f269e3021894 | 1640 | print "ok" |
elessair | 0:f269e3021894 | 1641 | print "Detecting database..." |
elessair | 0:f269e3021894 | 1642 | print db_.detect_database(verbose=True) |
elessair | 0:f269e3021894 | 1643 | print "Disconnecting...", |
elessair | 0:f269e3021894 | 1644 | db_.disconnect() |
elessair | 0:f269e3021894 | 1645 | print "done" |
elessair | 0:f269e3021894 | 1646 | else: |
elessair | 0:f269e3021894 | 1647 | print "Database type '%s' unknown"% db_type |
elessair | 0:f269e3021894 | 1648 | else: |
elessair | 0:f269e3021894 | 1649 | print "Parse error: '%s' - DB Url error"% (db_url) |
elessair | 0:f269e3021894 | 1650 | |
elessair | 0:f269e3021894 | 1651 | |
elessair | 0:f269e3021894 | 1652 | def get_module_avail(module_name): |
elessair | 0:f269e3021894 | 1653 | """ This function returns True if module_name is already impored module |
elessair | 0:f269e3021894 | 1654 | """ |
elessair | 0:f269e3021894 | 1655 | return module_name in sys.modules.keys() |
elessair | 0:f269e3021894 | 1656 | |
elessair | 0:f269e3021894 | 1657 | |
elessair | 0:f269e3021894 | 1658 | def get_autodetected_MUTS_list(platform_name_filter=None): |
elessair | 0:f269e3021894 | 1659 | oldError = None |
elessair | 0:f269e3021894 | 1660 | if os.name == 'nt': |
elessair | 0:f269e3021894 | 1661 | # Disable Windows error box temporarily |
elessair | 0:f269e3021894 | 1662 | oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1 |
elessair | 0:f269e3021894 | 1663 | |
elessair | 0:f269e3021894 | 1664 | mbeds = mbed_lstools.create() |
elessair | 0:f269e3021894 | 1665 | detect_muts_list = mbeds.list_mbeds() |
elessair | 0:f269e3021894 | 1666 | |
elessair | 0:f269e3021894 | 1667 | if os.name == 'nt': |
elessair | 0:f269e3021894 | 1668 | ctypes.windll.kernel32.SetErrorMode(oldError) |
elessair | 0:f269e3021894 | 1669 | |
elessair | 0:f269e3021894 | 1670 | return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter) |
elessair | 0:f269e3021894 | 1671 | |
elessair | 0:f269e3021894 | 1672 | def get_autodetected_MUTS(mbeds_list, platform_name_filter=None): |
elessair | 0:f269e3021894 | 1673 | """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file. |
elessair | 0:f269e3021894 | 1674 | If function fails to auto-detect devices it will return empty dictionary. |
elessair | 0:f269e3021894 | 1675 | |
elessair | 0:f269e3021894 | 1676 | if get_module_avail('mbed_lstools'): |
elessair | 0:f269e3021894 | 1677 | mbeds = mbed_lstools.create() |
elessair | 0:f269e3021894 | 1678 | mbeds_list = mbeds.list_mbeds() |
elessair | 0:f269e3021894 | 1679 | |
elessair | 0:f269e3021894 | 1680 | @param mbeds_list list of mbeds captured from mbed_lstools |
elessair | 0:f269e3021894 | 1681 | @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter' |
elessair | 0:f269e3021894 | 1682 | """ |
elessair | 0:f269e3021894 | 1683 | result = {} # Should be in muts_all.json format |
elessair | 0:f269e3021894 | 1684 | # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts) |
elessair | 0:f269e3021894 | 1685 | # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}] |
elessair | 0:f269e3021894 | 1686 | index = 1 |
elessair | 0:f269e3021894 | 1687 | for mut in mbeds_list: |
elessair | 0:f269e3021894 | 1688 | # Filter the MUTS if a filter is specified |
elessair | 0:f269e3021894 | 1689 | |
elessair | 0:f269e3021894 | 1690 | if platform_name_filter and not mut['platform_name'] in platform_name_filter: |
elessair | 0:f269e3021894 | 1691 | continue |
elessair | 0:f269e3021894 | 1692 | |
elessair | 0:f269e3021894 | 1693 | # For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing) |
elessair | 0:f269e3021894 | 1694 | # if not we are creating our own unique value (last few chars from platform's target_id). |
elessair | 0:f269e3021894 | 1695 | m = {'mcu': mut['platform_name'], |
elessair | 0:f269e3021894 | 1696 | 'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]), |
elessair | 0:f269e3021894 | 1697 | 'port': mut['serial_port'], |
elessair | 0:f269e3021894 | 1698 | 'disk': mut['mount_point'], |
elessair | 0:f269e3021894 | 1699 | 'peripherals': [] # No peripheral detection |
elessair | 0:f269e3021894 | 1700 | } |
elessair | 0:f269e3021894 | 1701 | if index not in result: |
elessair | 0:f269e3021894 | 1702 | result[index] = {} |
elessair | 0:f269e3021894 | 1703 | result[index] = m |
elessair | 0:f269e3021894 | 1704 | index += 1 |
elessair | 0:f269e3021894 | 1705 | return result |
elessair | 0:f269e3021894 | 1706 | |
elessair | 0:f269e3021894 | 1707 | |
elessair | 0:f269e3021894 | 1708 | def get_autodetected_TEST_SPEC(mbeds_list, |
elessair | 0:f269e3021894 | 1709 | use_default_toolchain=True, |
elessair | 0:f269e3021894 | 1710 | use_supported_toolchains=False, |
elessair | 0:f269e3021894 | 1711 | toolchain_filter=None, |
elessair | 0:f269e3021894 | 1712 | platform_name_filter=None): |
elessair | 0:f269e3021894 | 1713 | """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file. |
elessair | 0:f269e3021894 | 1714 | If function fails to auto-detect devices it will return empty 'targets' test_spec description. |
elessair | 0:f269e3021894 | 1715 | |
elessair | 0:f269e3021894 | 1716 | use_default_toolchain - if True add default toolchain to test_spec |
elessair | 0:f269e3021894 | 1717 | use_supported_toolchains - if True add all supported toolchains to test_spec |
elessair | 0:f269e3021894 | 1718 | toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec |
elessair | 0:f269e3021894 | 1719 | """ |
elessair | 0:f269e3021894 | 1720 | result = {'targets': {} } |
elessair | 0:f269e3021894 | 1721 | |
elessair | 0:f269e3021894 | 1722 | for mut in mbeds_list: |
elessair | 0:f269e3021894 | 1723 | mcu = mut['mcu'] |
elessair | 0:f269e3021894 | 1724 | if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter): |
elessair | 0:f269e3021894 | 1725 | if mcu in TARGET_MAP: |
elessair | 0:f269e3021894 | 1726 | default_toolchain = TARGET_MAP[mcu].default_toolchain |
elessair | 0:f269e3021894 | 1727 | supported_toolchains = TARGET_MAP[mcu].supported_toolchains |
elessair | 0:f269e3021894 | 1728 | |
elessair | 0:f269e3021894 | 1729 | # Decide which toolchains should be added to test specification toolchain pool for each target |
elessair | 0:f269e3021894 | 1730 | toolchains = [] |
elessair | 0:f269e3021894 | 1731 | if use_default_toolchain: |
elessair | 0:f269e3021894 | 1732 | toolchains.append(default_toolchain) |
elessair | 0:f269e3021894 | 1733 | if use_supported_toolchains: |
elessair | 0:f269e3021894 | 1734 | toolchains += supported_toolchains |
elessair | 0:f269e3021894 | 1735 | if toolchain_filter is not None: |
elessair | 0:f269e3021894 | 1736 | all_toolchains = supported_toolchains + [default_toolchain] |
elessair | 0:f269e3021894 | 1737 | for toolchain in toolchain_filter: |
elessair | 0:f269e3021894 | 1738 | if toolchain in all_toolchains: |
elessair | 0:f269e3021894 | 1739 | toolchains.append(toolchain) |
elessair | 0:f269e3021894 | 1740 | |
elessair | 0:f269e3021894 | 1741 | result['targets'][mcu] = list(set(toolchains)) |
elessair | 0:f269e3021894 | 1742 | return result |
elessair | 0:f269e3021894 | 1743 | |
elessair | 0:f269e3021894 | 1744 | |
elessair | 0:f269e3021894 | 1745 | def get_default_test_options_parser(): |
elessair | 0:f269e3021894 | 1746 | """ Get common test script options used by CLI, web services etc. |
elessair | 0:f269e3021894 | 1747 | """ |
elessair | 0:f269e3021894 | 1748 | parser = argparse.ArgumentParser() |
elessair | 0:f269e3021894 | 1749 | parser.add_argument('-i', '--tests', |
elessair | 0:f269e3021894 | 1750 | dest='test_spec_filename', |
elessair | 0:f269e3021894 | 1751 | metavar="FILE", |
elessair | 0:f269e3021894 | 1752 | type=argparse_filestring_type, |
elessair | 0:f269e3021894 | 1753 | help='Points to file with test specification') |
elessair | 0:f269e3021894 | 1754 | |
elessair | 0:f269e3021894 | 1755 | parser.add_argument('-M', '--MUTS', |
elessair | 0:f269e3021894 | 1756 | dest='muts_spec_filename', |
elessair | 0:f269e3021894 | 1757 | metavar="FILE", |
elessair | 0:f269e3021894 | 1758 | type=argparse_filestring_type, |
elessair | 0:f269e3021894 | 1759 | help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)') |
elessair | 0:f269e3021894 | 1760 | |
elessair | 0:f269e3021894 | 1761 | parser.add_argument("-j", "--jobs", |
elessair | 0:f269e3021894 | 1762 | dest='jobs', |
elessair | 0:f269e3021894 | 1763 | metavar="NUMBER", |
elessair | 0:f269e3021894 | 1764 | type=int, |
elessair | 0:f269e3021894 | 1765 | help="Define number of compilation jobs. Default value is 1") |
elessair | 0:f269e3021894 | 1766 | |
elessair | 0:f269e3021894 | 1767 | if get_module_avail('mbed_lstools'): |
elessair | 0:f269e3021894 | 1768 | # Additional features available when mbed_lstools is installed on host and imported |
elessair | 0:f269e3021894 | 1769 | # mbed_lstools allow users to detect connected to host mbed-enabled devices |
elessair | 0:f269e3021894 | 1770 | parser.add_argument('--auto', |
elessair | 0:f269e3021894 | 1771 | dest='auto_detect', |
elessair | 0:f269e3021894 | 1772 | action="store_true", |
elessair | 0:f269e3021894 | 1773 | help='Use mbed-ls module to detect all connected mbed devices') |
elessair | 0:f269e3021894 | 1774 | |
elessair | 0:f269e3021894 | 1775 | toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"] |
elessair | 0:f269e3021894 | 1776 | parser.add_argument('--tc', |
elessair | 0:f269e3021894 | 1777 | dest='toolchains_filter', |
elessair | 0:f269e3021894 | 1778 | type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")), |
elessair | 0:f269e3021894 | 1779 | help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains") |
elessair | 0:f269e3021894 | 1780 | |
elessair | 0:f269e3021894 | 1781 | test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()]) |
elessair | 0:f269e3021894 | 1782 | parser.add_argument('--oper', |
elessair | 0:f269e3021894 | 1783 | dest='operability_checks', |
elessair | 0:f269e3021894 | 1784 | type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"), |
elessair | 0:f269e3021894 | 1785 | help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes) |
elessair | 0:f269e3021894 | 1786 | |
elessair | 0:f269e3021894 | 1787 | parser.add_argument('--clean', |
elessair | 0:f269e3021894 | 1788 | dest='clean', |
elessair | 0:f269e3021894 | 1789 | action="store_true", |
elessair | 0:f269e3021894 | 1790 | help='Clean the build directory') |
elessair | 0:f269e3021894 | 1791 | |
elessair | 0:f269e3021894 | 1792 | parser.add_argument('-P', '--only-peripherals', |
elessair | 0:f269e3021894 | 1793 | dest='test_only_peripheral', |
elessair | 0:f269e3021894 | 1794 | default=False, |
elessair | 0:f269e3021894 | 1795 | action="store_true", |
elessair | 0:f269e3021894 | 1796 | help='Test only peripheral declared for MUT and skip common tests') |
elessair | 0:f269e3021894 | 1797 | |
elessair | 0:f269e3021894 | 1798 | parser.add_argument("--profile", dest="profile", action="append", |
elessair | 0:f269e3021894 | 1799 | type=argparse_filestring_type, |
elessair | 0:f269e3021894 | 1800 | default=[]) |
elessair | 0:f269e3021894 | 1801 | |
elessair | 0:f269e3021894 | 1802 | parser.add_argument('-C', '--only-commons', |
elessair | 0:f269e3021894 | 1803 | dest='test_only_common', |
elessair | 0:f269e3021894 | 1804 | default=False, |
elessair | 0:f269e3021894 | 1805 | action="store_true", |
elessair | 0:f269e3021894 | 1806 | help='Test only board internals. Skip perpherials tests and perform common tests') |
elessair | 0:f269e3021894 | 1807 | |
elessair | 0:f269e3021894 | 1808 | parser.add_argument('-n', '--test-by-names', |
elessair | 0:f269e3021894 | 1809 | dest='test_by_names', |
elessair | 0:f269e3021894 | 1810 | type=argparse_many(str), |
elessair | 0:f269e3021894 | 1811 | help='Runs only test enumerated it this switch. Use comma to separate test case names') |
elessair | 0:f269e3021894 | 1812 | |
elessair | 0:f269e3021894 | 1813 | parser.add_argument('-p', '--peripheral-by-names', |
elessair | 0:f269e3021894 | 1814 | dest='peripheral_by_names', |
elessair | 0:f269e3021894 | 1815 | type=argparse_many(str), |
elessair | 0:f269e3021894 | 1816 | help='Forces discovery of particular peripherals. Use comma to separate peripheral names') |
elessair | 0:f269e3021894 | 1817 | |
elessair | 0:f269e3021894 | 1818 | copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod') |
elessair | 0:f269e3021894 | 1819 | copy_methods_str = "Plugin support: " + ', '.join(copy_methods) |
elessair | 0:f269e3021894 | 1820 | |
elessair | 0:f269e3021894 | 1821 | parser.add_argument('-c', '--copy-method', |
elessair | 0:f269e3021894 | 1822 | dest='copy_method', |
elessair | 0:f269e3021894 | 1823 | type=argparse_uppercase_type(copy_methods, "flash method"), |
elessair | 0:f269e3021894 | 1824 | help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str) |
elessair | 0:f269e3021894 | 1825 | |
elessair | 0:f269e3021894 | 1826 | reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod') |
elessair | 0:f269e3021894 | 1827 | reset_methods_str = "Plugin support: " + ', '.join(reset_methods) |
elessair | 0:f269e3021894 | 1828 | |
elessair | 0:f269e3021894 | 1829 | parser.add_argument('-r', '--reset-type', |
elessair | 0:f269e3021894 | 1830 | dest='mut_reset_type', |
elessair | 0:f269e3021894 | 1831 | default=None, |
elessair | 0:f269e3021894 | 1832 | type=argparse_uppercase_type(reset_methods, "reset method"), |
elessair | 0:f269e3021894 | 1833 | help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str) |
elessair | 0:f269e3021894 | 1834 | |
elessair | 0:f269e3021894 | 1835 | parser.add_argument('-g', '--goanna-for-tests', |
elessair | 0:f269e3021894 | 1836 | dest='goanna_for_tests', |
elessair | 0:f269e3021894 | 1837 | action="store_true", |
elessair | 0:f269e3021894 | 1838 | help='Run Goanna static analyse tool for tests. (Project will be rebuilded)') |
elessair | 0:f269e3021894 | 1839 | |
elessair | 0:f269e3021894 | 1840 | parser.add_argument('-G', '--goanna-for-sdk', |
elessair | 0:f269e3021894 | 1841 | dest='goanna_for_mbed_sdk', |
elessair | 0:f269e3021894 | 1842 | action="store_true", |
elessair | 0:f269e3021894 | 1843 | help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)') |
elessair | 0:f269e3021894 | 1844 | |
elessair | 0:f269e3021894 | 1845 | parser.add_argument('-s', '--suppress-summary', |
elessair | 0:f269e3021894 | 1846 | dest='suppress_summary', |
elessair | 0:f269e3021894 | 1847 | default=False, |
elessair | 0:f269e3021894 | 1848 | action="store_true", |
elessair | 0:f269e3021894 | 1849 | help='Suppresses display of wellformatted table with test results') |
elessair | 0:f269e3021894 | 1850 | |
elessair | 0:f269e3021894 | 1851 | parser.add_argument('-t', '--test-summary', |
elessair | 0:f269e3021894 | 1852 | dest='test_x_toolchain_summary', |
elessair | 0:f269e3021894 | 1853 | default=False, |
elessair | 0:f269e3021894 | 1854 | action="store_true", |
elessair | 0:f269e3021894 | 1855 | help='Displays wellformatted table with test x toolchain test result per target') |
elessair | 0:f269e3021894 | 1856 | |
elessair | 0:f269e3021894 | 1857 | parser.add_argument('-A', '--test-automation-report', |
elessair | 0:f269e3021894 | 1858 | dest='test_automation_report', |
elessair | 0:f269e3021894 | 1859 | default=False, |
elessair | 0:f269e3021894 | 1860 | action="store_true", |
elessair | 0:f269e3021894 | 1861 | help='Prints information about all tests and exits') |
elessair | 0:f269e3021894 | 1862 | |
elessair | 0:f269e3021894 | 1863 | parser.add_argument('-R', '--test-case-report', |
elessair | 0:f269e3021894 | 1864 | dest='test_case_report', |
elessair | 0:f269e3021894 | 1865 | default=False, |
elessair | 0:f269e3021894 | 1866 | action="store_true", |
elessair | 0:f269e3021894 | 1867 | help='Prints information about all test cases and exits') |
elessair | 0:f269e3021894 | 1868 | |
elessair | 0:f269e3021894 | 1869 | parser.add_argument("-S", "--supported-toolchains", |
elessair | 0:f269e3021894 | 1870 | action="store_true", |
elessair | 0:f269e3021894 | 1871 | dest="supported_toolchains", |
elessair | 0:f269e3021894 | 1872 | default=False, |
elessair | 0:f269e3021894 | 1873 | help="Displays supported matrix of MCUs and toolchains") |
elessair | 0:f269e3021894 | 1874 | |
elessair | 0:f269e3021894 | 1875 | parser.add_argument("-O", "--only-build", |
elessair | 0:f269e3021894 | 1876 | action="store_true", |
elessair | 0:f269e3021894 | 1877 | dest="only_build_tests", |
elessair | 0:f269e3021894 | 1878 | default=False, |
elessair | 0:f269e3021894 | 1879 | help="Only build tests, skips actual test procedures (flashing etc.)") |
elessair | 0:f269e3021894 | 1880 | |
elessair | 0:f269e3021894 | 1881 | parser.add_argument('--parallel', |
elessair | 0:f269e3021894 | 1882 | dest='parallel_test_exec', |
elessair | 0:f269e3021894 | 1883 | default=False, |
elessair | 0:f269e3021894 | 1884 | action="store_true", |
elessair | 0:f269e3021894 | 1885 | help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)') |
elessair | 0:f269e3021894 | 1886 | |
elessair | 0:f269e3021894 | 1887 | parser.add_argument('--config', |
elessair | 0:f269e3021894 | 1888 | dest='verbose_test_configuration_only', |
elessair | 0:f269e3021894 | 1889 | default=False, |
elessair | 0:f269e3021894 | 1890 | action="store_true", |
elessair | 0:f269e3021894 | 1891 | help='Displays full test specification and MUTs configration and exits') |
elessair | 0:f269e3021894 | 1892 | |
elessair | 0:f269e3021894 | 1893 | parser.add_argument('--loops', |
elessair | 0:f269e3021894 | 1894 | dest='test_loops_list', |
elessair | 0:f269e3021894 | 1895 | type=argparse_many(str), |
elessair | 0:f269e3021894 | 1896 | help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3') |
elessair | 0:f269e3021894 | 1897 | |
elessair | 0:f269e3021894 | 1898 | parser.add_argument('--global-loops', |
elessair | 0:f269e3021894 | 1899 | dest='test_global_loops_value', |
elessair | 0:f269e3021894 | 1900 | type=int, |
elessair | 0:f269e3021894 | 1901 | help='Set global number of test loops per test. Default value is set 1') |
elessair | 0:f269e3021894 | 1902 | |
elessair | 0:f269e3021894 | 1903 | parser.add_argument('--consolidate-waterfall', |
elessair | 0:f269e3021894 | 1904 | dest='consolidate_waterfall_test', |
elessair | 0:f269e3021894 | 1905 | default=False, |
elessair | 0:f269e3021894 | 1906 | action="store_true", |
elessair | 0:f269e3021894 | 1907 | help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.') |
elessair | 0:f269e3021894 | 1908 | |
elessair | 0:f269e3021894 | 1909 | parser.add_argument('-W', '--waterfall', |
elessair | 0:f269e3021894 | 1910 | dest='waterfall_test', |
elessair | 0:f269e3021894 | 1911 | default=False, |
elessair | 0:f269e3021894 | 1912 | action="store_true", |
elessair | 0:f269e3021894 | 1913 | help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed') |
elessair | 0:f269e3021894 | 1914 | |
elessair | 0:f269e3021894 | 1915 | parser.add_argument('-N', '--firmware-name', |
elessair | 0:f269e3021894 | 1916 | dest='firmware_global_name', |
elessair | 0:f269e3021894 | 1917 | help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts') |
elessair | 0:f269e3021894 | 1918 | |
elessair | 0:f269e3021894 | 1919 | parser.add_argument('-u', '--shuffle', |
elessair | 0:f269e3021894 | 1920 | dest='shuffle_test_order', |
elessair | 0:f269e3021894 | 1921 | default=False, |
elessair | 0:f269e3021894 | 1922 | action="store_true", |
elessair | 0:f269e3021894 | 1923 | help='Shuffles test execution order') |
elessair | 0:f269e3021894 | 1924 | |
elessair | 0:f269e3021894 | 1925 | parser.add_argument('--shuffle-seed', |
elessair | 0:f269e3021894 | 1926 | dest='shuffle_test_seed', |
elessair | 0:f269e3021894 | 1927 | default=None, |
elessair | 0:f269e3021894 | 1928 | help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)') |
elessair | 0:f269e3021894 | 1929 | |
elessair | 0:f269e3021894 | 1930 | parser.add_argument('-f', '--filter', |
elessair | 0:f269e3021894 | 1931 | dest='general_filter_regex', |
elessair | 0:f269e3021894 | 1932 | type=argparse_many(str), |
elessair | 0:f269e3021894 | 1933 | default=None, |
elessair | 0:f269e3021894 | 1934 | help='For some commands you can use filter to filter out results') |
elessair | 0:f269e3021894 | 1935 | |
elessair | 0:f269e3021894 | 1936 | parser.add_argument('--inc-timeout', |
elessair | 0:f269e3021894 | 1937 | dest='extend_test_timeout', |
elessair | 0:f269e3021894 | 1938 | metavar="NUMBER", |
elessair | 0:f269e3021894 | 1939 | type=int, |
elessair | 0:f269e3021894 | 1940 | help='You can increase global timeout for each test by specifying additional test timeout in seconds') |
elessair | 0:f269e3021894 | 1941 | |
elessair | 0:f269e3021894 | 1942 | parser.add_argument('--db', |
elessair | 0:f269e3021894 | 1943 | dest='db_url', |
elessair | 0:f269e3021894 | 1944 | help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'') |
elessair | 0:f269e3021894 | 1945 | |
elessair | 0:f269e3021894 | 1946 | parser.add_argument('-l', '--log', |
elessair | 0:f269e3021894 | 1947 | dest='log_file_name', |
elessair | 0:f269e3021894 | 1948 | help='Log events to external file (note not all console entries may be visible in log file)') |
elessair | 0:f269e3021894 | 1949 | |
elessair | 0:f269e3021894 | 1950 | parser.add_argument('--report-html', |
elessair | 0:f269e3021894 | 1951 | dest='report_html_file_name', |
elessair | 0:f269e3021894 | 1952 | help='You can log test suite results in form of HTML report') |
elessair | 0:f269e3021894 | 1953 | |
elessair | 0:f269e3021894 | 1954 | parser.add_argument('--report-junit', |
elessair | 0:f269e3021894 | 1955 | dest='report_junit_file_name', |
elessair | 0:f269e3021894 | 1956 | help='You can log test suite results in form of JUnit compliant XML report') |
elessair | 0:f269e3021894 | 1957 | |
elessair | 0:f269e3021894 | 1958 | parser.add_argument("--report-build", |
elessair | 0:f269e3021894 | 1959 | dest="report_build_file_name", |
elessair | 0:f269e3021894 | 1960 | help="Output the build results to a junit xml file") |
elessair | 0:f269e3021894 | 1961 | |
elessair | 0:f269e3021894 | 1962 | parser.add_argument("--report-text", |
elessair | 0:f269e3021894 | 1963 | dest="report_text_file_name", |
elessair | 0:f269e3021894 | 1964 | help="Output the build results to a text file") |
elessair | 0:f269e3021894 | 1965 | |
elessair | 0:f269e3021894 | 1966 | parser.add_argument('--verbose-skipped', |
elessair | 0:f269e3021894 | 1967 | dest='verbose_skipped_tests', |
elessair | 0:f269e3021894 | 1968 | default=False, |
elessair | 0:f269e3021894 | 1969 | action="store_true", |
elessair | 0:f269e3021894 | 1970 | help='Prints some extra information about skipped tests') |
elessair | 0:f269e3021894 | 1971 | |
elessair | 0:f269e3021894 | 1972 | parser.add_argument('-V', '--verbose-test-result', |
elessair | 0:f269e3021894 | 1973 | dest='verbose_test_result_only', |
elessair | 0:f269e3021894 | 1974 | default=False, |
elessair | 0:f269e3021894 | 1975 | action="store_true", |
elessair | 0:f269e3021894 | 1976 | help='Prints test serial output') |
elessair | 0:f269e3021894 | 1977 | |
elessair | 0:f269e3021894 | 1978 | parser.add_argument('-v', '--verbose', |
elessair | 0:f269e3021894 | 1979 | dest='verbose', |
elessair | 0:f269e3021894 | 1980 | default=False, |
elessair | 0:f269e3021894 | 1981 | action="store_true", |
elessair | 0:f269e3021894 | 1982 | help='Verbose mode (prints some extra information)') |
elessair | 0:f269e3021894 | 1983 | |
elessair | 0:f269e3021894 | 1984 | parser.add_argument('--version', |
elessair | 0:f269e3021894 | 1985 | dest='version', |
elessair | 0:f269e3021894 | 1986 | default=False, |
elessair | 0:f269e3021894 | 1987 | action="store_true", |
elessair | 0:f269e3021894 | 1988 | help='Prints script version and exits') |
elessair | 0:f269e3021894 | 1989 | return parser |
elessair | 0:f269e3021894 | 1990 | |
elessair | 0:f269e3021894 | 1991 | def test_path_to_name(path, base): |
elessair | 0:f269e3021894 | 1992 | """Change all slashes in a path into hyphens |
elessair | 0:f269e3021894 | 1993 | This creates a unique cross-platform test name based on the path |
elessair | 0:f269e3021894 | 1994 | This can eventually be overriden by a to-be-determined meta-data mechanism""" |
elessair | 0:f269e3021894 | 1995 | name_parts = [] |
elessair | 0:f269e3021894 | 1996 | head, tail = os.path.split(relpath(path,base)) |
elessair | 0:f269e3021894 | 1997 | while (tail and tail != "."): |
elessair | 0:f269e3021894 | 1998 | name_parts.insert(0, tail) |
elessair | 0:f269e3021894 | 1999 | head, tail = os.path.split(head) |
elessair | 0:f269e3021894 | 2000 | |
elessair | 0:f269e3021894 | 2001 | return "-".join(name_parts).lower() |
elessair | 0:f269e3021894 | 2002 | |
elessair | 0:f269e3021894 | 2003 | def find_tests(base_dir, target_name, toolchain_name, app_config=None): |
elessair | 0:f269e3021894 | 2004 | """ Finds all tests in a directory recursively |
elessair | 0:f269e3021894 | 2005 | base_dir: path to the directory to scan for tests (ex. 'path/to/project') |
elessair | 0:f269e3021894 | 2006 | target_name: name of the target to use for scanning (ex. 'K64F') |
elessair | 0:f269e3021894 | 2007 | toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM') |
elessair | 0:f269e3021894 | 2008 | options: Compile options to pass to the toolchain (ex. ['debug-info']) |
elessair | 0:f269e3021894 | 2009 | app_config - location of a chosen mbed_app.json file |
elessair | 0:f269e3021894 | 2010 | """ |
elessair | 0:f269e3021894 | 2011 | |
elessair | 0:f269e3021894 | 2012 | tests = {} |
elessair | 0:f269e3021894 | 2013 | |
elessair | 0:f269e3021894 | 2014 | # Prepare the toolchain |
elessair | 0:f269e3021894 | 2015 | toolchain = prepare_toolchain([base_dir], target_name, toolchain_name, |
elessair | 0:f269e3021894 | 2016 | silent=True, app_config=app_config) |
elessair | 0:f269e3021894 | 2017 | |
elessair | 0:f269e3021894 | 2018 | # Scan the directory for paths to probe for 'TESTS' folders |
elessair | 0:f269e3021894 | 2019 | base_resources = scan_resources([base_dir], toolchain) |
elessair | 0:f269e3021894 | 2020 | |
elessair | 0:f269e3021894 | 2021 | dirs = base_resources.inc_dirs |
elessair | 0:f269e3021894 | 2022 | for directory in dirs: |
elessair | 0:f269e3021894 | 2023 | subdirs = os.listdir(directory) |
elessair | 0:f269e3021894 | 2024 | |
elessair | 0:f269e3021894 | 2025 | # If the directory contains a subdirectory called 'TESTS', scan it for test cases |
elessair | 0:f269e3021894 | 2026 | if 'TESTS' in subdirs: |
elessair | 0:f269e3021894 | 2027 | walk_base_dir = join(directory, 'TESTS') |
elessair | 0:f269e3021894 | 2028 | test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir) |
elessair | 0:f269e3021894 | 2029 | |
elessair | 0:f269e3021894 | 2030 | # Loop through all subdirectories |
elessair | 0:f269e3021894 | 2031 | for d in test_resources.inc_dirs: |
elessair | 0:f269e3021894 | 2032 | |
elessair | 0:f269e3021894 | 2033 | # If the test case folder is not called 'host_tests' and it is |
elessair | 0:f269e3021894 | 2034 | # located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase) |
elessair | 0:f269e3021894 | 2035 | # then add it to the tests |
elessair | 0:f269e3021894 | 2036 | path_depth = get_path_depth(relpath(d, walk_base_dir)) |
elessair | 0:f269e3021894 | 2037 | if path_depth == 2: |
elessair | 0:f269e3021894 | 2038 | test_group_directory_path, test_case_directory = os.path.split(d) |
elessair | 0:f269e3021894 | 2039 | test_group_directory = os.path.basename(test_group_directory_path) |
elessair | 0:f269e3021894 | 2040 | |
elessair | 0:f269e3021894 | 2041 | # Check to make sure discoverd folder is not in a host test directory |
elessair | 0:f269e3021894 | 2042 | if test_case_directory != 'host_tests' and test_group_directory != 'host_tests': |
elessair | 0:f269e3021894 | 2043 | test_name = test_path_to_name(d, base_dir) |
elessair | 0:f269e3021894 | 2044 | tests[test_name] = d |
elessair | 0:f269e3021894 | 2045 | |
elessair | 0:f269e3021894 | 2046 | return tests |
elessair | 0:f269e3021894 | 2047 | |
elessair | 0:f269e3021894 | 2048 | def print_tests(tests, format="list", sort=True): |
elessair | 0:f269e3021894 | 2049 | """Given a dictionary of tests (as returned from "find_tests"), print them |
elessair | 0:f269e3021894 | 2050 | in the specified format""" |
elessair | 0:f269e3021894 | 2051 | if format == "list": |
elessair | 0:f269e3021894 | 2052 | for test_name in sorted(tests.keys()): |
elessair | 0:f269e3021894 | 2053 | test_path = tests[test_name] |
elessair | 0:f269e3021894 | 2054 | print "Test Case:" |
elessair | 0:f269e3021894 | 2055 | print " Name: %s" % test_name |
elessair | 0:f269e3021894 | 2056 | print " Path: %s" % test_path |
elessair | 0:f269e3021894 | 2057 | elif format == "json": |
elessair | 0:f269e3021894 | 2058 | print json.dumps(tests, indent=2) |
elessair | 0:f269e3021894 | 2059 | else: |
elessair | 0:f269e3021894 | 2060 | print "Unknown format '%s'" % format |
elessair | 0:f269e3021894 | 2061 | sys.exit(1) |
elessair | 0:f269e3021894 | 2062 | |
elessair | 0:f269e3021894 | 2063 | def norm_relative_path(path, start): |
elessair | 0:f269e3021894 | 2064 | """This function will create a normalized, relative path. It mimics the |
elessair | 0:f269e3021894 | 2065 | python os.path.relpath function, but also normalizes a Windows-syle path |
elessair | 0:f269e3021894 | 2066 | that use backslashes to a Unix style path that uses forward slashes.""" |
elessair | 0:f269e3021894 | 2067 | path = os.path.normpath(path) |
elessair | 0:f269e3021894 | 2068 | path = os.path.relpath(path, start) |
elessair | 0:f269e3021894 | 2069 | path = path.replace("\\", "/") |
elessair | 0:f269e3021894 | 2070 | return path |
elessair | 0:f269e3021894 | 2071 | |
elessair | 0:f269e3021894 | 2072 | |
elessair | 0:f269e3021894 | 2073 | def build_test_worker(*args, **kwargs): |
elessair | 0:f269e3021894 | 2074 | """This is a worker function for the parallel building of tests. The `args` |
elessair | 0:f269e3021894 | 2075 | and `kwargs` are passed directly to `build_project`. It returns a dictionary |
elessair | 0:f269e3021894 | 2076 | with the following structure: |
elessair | 0:f269e3021894 | 2077 | |
elessair | 0:f269e3021894 | 2078 | { |
elessair | 0:f269e3021894 | 2079 | 'result': `True` if no exceptions were thrown, `False` otherwise |
elessair | 0:f269e3021894 | 2080 | 'reason': Instance of exception that was thrown on failure |
elessair | 0:f269e3021894 | 2081 | 'bin_file': Path to the created binary if `build_project` was |
elessair | 0:f269e3021894 | 2082 | successful. Not present otherwise |
elessair | 0:f269e3021894 | 2083 | 'kwargs': The keyword arguments that were passed to `build_project`. |
elessair | 0:f269e3021894 | 2084 | This includes arguments that were modified (ex. report) |
elessair | 0:f269e3021894 | 2085 | } |
elessair | 0:f269e3021894 | 2086 | """ |
elessair | 0:f269e3021894 | 2087 | bin_file = None |
elessair | 0:f269e3021894 | 2088 | ret = { |
elessair | 0:f269e3021894 | 2089 | 'result': False, |
elessair | 0:f269e3021894 | 2090 | 'args': args, |
elessair | 0:f269e3021894 | 2091 | 'kwargs': kwargs |
elessair | 0:f269e3021894 | 2092 | } |
elessair | 0:f269e3021894 | 2093 | |
elessair | 0:f269e3021894 | 2094 | try: |
elessair | 0:f269e3021894 | 2095 | bin_file = build_project(*args, **kwargs) |
elessair | 0:f269e3021894 | 2096 | ret['result'] = True |
elessair | 0:f269e3021894 | 2097 | ret['bin_file'] = bin_file |
elessair | 0:f269e3021894 | 2098 | ret['kwargs'] = kwargs |
elessair | 0:f269e3021894 | 2099 | |
elessair | 0:f269e3021894 | 2100 | except NotSupportedException, e: |
elessair | 0:f269e3021894 | 2101 | ret['reason'] = e |
elessair | 0:f269e3021894 | 2102 | except ToolException, e: |
elessair | 0:f269e3021894 | 2103 | ret['reason'] = e |
elessair | 0:f269e3021894 | 2104 | except KeyboardInterrupt, e: |
elessair | 0:f269e3021894 | 2105 | ret['reason'] = e |
elessair | 0:f269e3021894 | 2106 | except: |
elessair | 0:f269e3021894 | 2107 | # Print unhandled exceptions here |
elessair | 0:f269e3021894 | 2108 | import traceback |
elessair | 0:f269e3021894 | 2109 | traceback.print_exc(file=sys.stdout) |
elessair | 0:f269e3021894 | 2110 | |
elessair | 0:f269e3021894 | 2111 | return ret |
elessair | 0:f269e3021894 | 2112 | |
elessair | 0:f269e3021894 | 2113 | |
elessair | 0:f269e3021894 | 2114 | def build_tests(tests, base_source_paths, build_path, target, toolchain_name, |
elessair | 0:f269e3021894 | 2115 | clean=False, notify=None, verbose=False, jobs=1, macros=None, |
elessair | 0:f269e3021894 | 2116 | silent=False, report=None, properties=None, |
elessair | 0:f269e3021894 | 2117 | continue_on_build_fail=False, app_config=None, |
elessair | 0:f269e3021894 | 2118 | build_profile=None): |
elessair | 0:f269e3021894 | 2119 | """Given the data structure from 'find_tests' and the typical build parameters, |
elessair | 0:f269e3021894 | 2120 | build all the tests |
elessair | 0:f269e3021894 | 2121 | |
elessair | 0:f269e3021894 | 2122 | Returns a tuple of the build result (True or False) followed by the test |
elessair | 0:f269e3021894 | 2123 | build data structure""" |
elessair | 0:f269e3021894 | 2124 | |
elessair | 0:f269e3021894 | 2125 | execution_directory = "." |
elessair | 0:f269e3021894 | 2126 | base_path = norm_relative_path(build_path, execution_directory) |
elessair | 0:f269e3021894 | 2127 | |
elessair | 0:f269e3021894 | 2128 | target_name = target if isinstance(target, str) else target.name |
elessair | 0:f269e3021894 | 2129 | |
elessair | 0:f269e3021894 | 2130 | test_build = { |
elessair | 0:f269e3021894 | 2131 | "platform": target_name, |
elessair | 0:f269e3021894 | 2132 | "toolchain": toolchain_name, |
elessair | 0:f269e3021894 | 2133 | "base_path": base_path, |
elessair | 0:f269e3021894 | 2134 | "baud_rate": 9600, |
elessair | 0:f269e3021894 | 2135 | "binary_type": "bootable", |
elessair | 0:f269e3021894 | 2136 | "tests": {} |
elessair | 0:f269e3021894 | 2137 | } |
elessair | 0:f269e3021894 | 2138 | |
elessair | 0:f269e3021894 | 2139 | result = True |
elessair | 0:f269e3021894 | 2140 | |
elessair | 0:f269e3021894 | 2141 | jobs_count = int(jobs if jobs else cpu_count()) |
elessair | 0:f269e3021894 | 2142 | p = Pool(processes=jobs_count) |
elessair | 0:f269e3021894 | 2143 | results = [] |
elessair | 0:f269e3021894 | 2144 | for test_name, test_path in tests.iteritems(): |
elessair | 0:f269e3021894 | 2145 | test_build_path = os.path.join(build_path, test_path) |
elessair | 0:f269e3021894 | 2146 | src_path = base_source_paths + [test_path] |
elessair | 0:f269e3021894 | 2147 | bin_file = None |
elessair | 0:f269e3021894 | 2148 | test_case_folder_name = os.path.basename(test_path) |
elessair | 0:f269e3021894 | 2149 | |
elessair | 0:f269e3021894 | 2150 | args = (src_path, test_build_path, target, toolchain_name) |
elessair | 0:f269e3021894 | 2151 | kwargs = { |
elessair | 0:f269e3021894 | 2152 | 'jobs': jobs, |
elessair | 0:f269e3021894 | 2153 | 'clean': clean, |
elessair | 0:f269e3021894 | 2154 | 'macros': macros, |
elessair | 0:f269e3021894 | 2155 | 'name': test_case_folder_name, |
elessair | 0:f269e3021894 | 2156 | 'project_id': test_name, |
elessair | 0:f269e3021894 | 2157 | 'report': report, |
elessair | 0:f269e3021894 | 2158 | 'properties': properties, |
elessair | 0:f269e3021894 | 2159 | 'verbose': verbose, |
elessair | 0:f269e3021894 | 2160 | 'app_config': app_config, |
elessair | 0:f269e3021894 | 2161 | 'build_profile': build_profile, |
elessair | 0:f269e3021894 | 2162 | 'silent': True |
elessair | 0:f269e3021894 | 2163 | } |
elessair | 0:f269e3021894 | 2164 | |
elessair | 0:f269e3021894 | 2165 | results.append(p.apply_async(build_test_worker, args, kwargs)) |
elessair | 0:f269e3021894 | 2166 | |
elessair | 0:f269e3021894 | 2167 | p.close() |
elessair | 0:f269e3021894 | 2168 | result = True |
elessair | 0:f269e3021894 | 2169 | itr = 0 |
elessair | 0:f269e3021894 | 2170 | while len(results): |
elessair | 0:f269e3021894 | 2171 | itr += 1 |
elessair | 0:f269e3021894 | 2172 | if itr > 360000: |
elessair | 0:f269e3021894 | 2173 | p.terminate() |
elessair | 0:f269e3021894 | 2174 | p.join() |
elessair | 0:f269e3021894 | 2175 | raise ToolException("Compile did not finish in 10 minutes") |
elessair | 0:f269e3021894 | 2176 | else: |
elessair | 0:f269e3021894 | 2177 | sleep(0.01) |
elessair | 0:f269e3021894 | 2178 | pending = 0 |
elessair | 0:f269e3021894 | 2179 | for r in results: |
elessair | 0:f269e3021894 | 2180 | if r.ready() is True: |
elessair | 0:f269e3021894 | 2181 | try: |
elessair | 0:f269e3021894 | 2182 | worker_result = r.get() |
elessair | 0:f269e3021894 | 2183 | results.remove(r) |
elessair | 0:f269e3021894 | 2184 | |
elessair | 0:f269e3021894 | 2185 | # Take report from the kwargs and merge it into existing report |
elessair | 0:f269e3021894 | 2186 | report_entry = worker_result['kwargs']['report'][target_name][toolchain_name] |
elessair | 0:f269e3021894 | 2187 | for test_key in report_entry.keys(): |
elessair | 0:f269e3021894 | 2188 | report[target_name][toolchain_name][test_key] = report_entry[test_key] |
elessair | 0:f269e3021894 | 2189 | |
elessair | 0:f269e3021894 | 2190 | # Set the overall result to a failure if a build failure occurred |
elessair | 0:f269e3021894 | 2191 | if not worker_result['result'] and not isinstance(worker_result['reason'], NotSupportedException): |
elessair | 0:f269e3021894 | 2192 | result = False |
elessair | 0:f269e3021894 | 2193 | break |
elessair | 0:f269e3021894 | 2194 | |
elessair | 0:f269e3021894 | 2195 | # Adding binary path to test build result |
elessair | 0:f269e3021894 | 2196 | if worker_result['result'] and 'bin_file' in worker_result: |
elessair | 0:f269e3021894 | 2197 | bin_file = norm_relative_path(worker_result['bin_file'], execution_directory) |
elessair | 0:f269e3021894 | 2198 | |
elessair | 0:f269e3021894 | 2199 | test_build['tests'][worker_result['kwargs']['project_id']] = { |
elessair | 0:f269e3021894 | 2200 | "binaries": [ |
elessair | 0:f269e3021894 | 2201 | { |
elessair | 0:f269e3021894 | 2202 | "path": bin_file |
elessair | 0:f269e3021894 | 2203 | } |
elessair | 0:f269e3021894 | 2204 | ] |
elessair | 0:f269e3021894 | 2205 | } |
elessair | 0:f269e3021894 | 2206 | |
elessair | 0:f269e3021894 | 2207 | test_key = worker_result['kwargs']['project_id'].upper() |
elessair | 0:f269e3021894 | 2208 | print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip() |
elessair | 0:f269e3021894 | 2209 | print 'Image: %s\n' % bin_file |
elessair | 0:f269e3021894 | 2210 | |
elessair | 0:f269e3021894 | 2211 | except: |
elessair | 0:f269e3021894 | 2212 | if p._taskqueue.queue: |
elessair | 0:f269e3021894 | 2213 | p._taskqueue.queue.clear() |
elessair | 0:f269e3021894 | 2214 | sleep(0.5) |
elessair | 0:f269e3021894 | 2215 | p.terminate() |
elessair | 0:f269e3021894 | 2216 | p.join() |
elessair | 0:f269e3021894 | 2217 | raise |
elessair | 0:f269e3021894 | 2218 | else: |
elessair | 0:f269e3021894 | 2219 | pending += 1 |
elessair | 0:f269e3021894 | 2220 | if pending >= jobs_count: |
elessair | 0:f269e3021894 | 2221 | break |
elessair | 0:f269e3021894 | 2222 | |
elessair | 0:f269e3021894 | 2223 | # Break as soon as possible if there is a failure and we are not |
elessair | 0:f269e3021894 | 2224 | # continuing on build failures |
elessair | 0:f269e3021894 | 2225 | if not result and not continue_on_build_fail: |
elessair | 0:f269e3021894 | 2226 | if p._taskqueue.queue: |
elessair | 0:f269e3021894 | 2227 | p._taskqueue.queue.clear() |
elessair | 0:f269e3021894 | 2228 | sleep(0.5) |
elessair | 0:f269e3021894 | 2229 | p.terminate() |
elessair | 0:f269e3021894 | 2230 | break |
elessair | 0:f269e3021894 | 2231 | |
elessair | 0:f269e3021894 | 2232 | p.join() |
elessair | 0:f269e3021894 | 2233 | |
elessair | 0:f269e3021894 | 2234 | test_builds = {} |
elessair | 0:f269e3021894 | 2235 | test_builds["%s-%s" % (target_name, toolchain_name)] = test_build |
elessair | 0:f269e3021894 | 2236 | |
elessair | 0:f269e3021894 | 2237 | return result, test_builds |
elessair | 0:f269e3021894 | 2238 | |
elessair | 0:f269e3021894 | 2239 | |
elessair | 0:f269e3021894 | 2240 | def test_spec_from_test_builds(test_builds): |
elessair | 0:f269e3021894 | 2241 | return { |
elessair | 0:f269e3021894 | 2242 | "builds": test_builds |
elessair | 0:f269e3021894 | 2243 | } |