Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
test.py
00001 #! /usr/bin/env python2 00002 """ 00003 mbed SDK 00004 Copyright (c) 2011-2013 ARM Limited 00005 00006 Licensed under the Apache License, Version 2.0 (the "License"); 00007 you may not use this file except in compliance with the License. 00008 You may obtain a copy of the License at 00009 00010 http://www.apache.org/licenses/LICENSE-2.0 00011 00012 Unless required by applicable law or agreed to in writing, software 00013 distributed under the License is distributed on an "AS IS" BASIS, 00014 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00015 See the License for the specific language governing permissions and 00016 limitations under the License. 00017 00018 00019 TEST BUILD & RUN 00020 """ 00021 from __future__ import print_function, division, absolute_import 00022 import sys 00023 import os 00024 import json 00025 import fnmatch 00026 00027 ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 00028 sys.path.insert(0, ROOT) 00029 00030 from tools.config import ConfigException, Config 00031 from tools.test_api import test_path_to_name, find_tests, get_test_config, print_tests, build_tests, test_spec_from_test_builds 00032 from tools.test_configs import get_default_config 00033 from tools.options import get_default_options_parser, extract_profile, extract_mcus 00034 from tools.build_api import build_project, build_library 00035 from tools.build_api import print_build_memory_usage 00036 from tools.build_api import merge_build_data 00037 from tools.targets import TARGET_MAP 00038 from tools.notifier.term import TerminalNotifier 00039 from tools.utils import mkdir, ToolException, NotSupportedException, args_error 00040 from tools.test_exporters import ReportExporter, ResultExporterType 00041 from tools.utils import argparse_filestring_type, argparse_lowercase_type, argparse_many 00042 from tools.utils import argparse_dir_not_parent 00043 from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS, TOOLCHAIN_CLASSES 00044 from tools.settings import CLI_COLOR_MAP 00045 00046 if __name__ == '__main__': 00047 try: 00048 # Parse Options 00049 parser = get_default_options_parser(add_app_config=True) 00050 00051 parser.add_argument("-D", 00052 action="append", 00053 dest="macros", 00054 help="Add a macro definition") 00055 00056 parser.add_argument("-j", "--jobs", 00057 type=int, 00058 dest="jobs", 00059 default=0, 00060 help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)") 00061 00062 parser.add_argument("--source", dest="source_dir", 00063 type=argparse_filestring_type, 00064 default=None, help="The source (input) directory (for sources other than tests). Defaults to current directory.", action="append") 00065 00066 parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT), 00067 default=None, help="The build (output) directory") 00068 00069 parser.add_argument("-l", "--list", action="store_true", dest="list", 00070 default=False, help="List (recursively) available tests in order and exit") 00071 00072 parser.add_argument("-p", "--paths", dest="paths", 00073 type=argparse_many(argparse_filestring_type), 00074 default=None, help="Limit the tests to those within the specified comma separated list of paths") 00075 00076 format_choices = ["list", "json"] 00077 format_default_choice = "list" 00078 format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice) 00079 parser.add_argument("-f", "--format", dest="format", 00080 type=argparse_lowercase_type(format_choices, "format"), 00081 default=format_default_choice, help=format_help) 00082 00083 parser.add_argument("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail", 00084 default=None, help="Continue trying to build all tests if a build failure occurs") 00085 00086 #TODO validate the names instead of just passing through str 00087 parser.add_argument("-n", "--names", dest="names", type=argparse_many(str), 00088 default=None, help="Limit the tests to a comma separated list of names") 00089 00090 parser.add_argument("--test-config", dest="test_config", type=str, 00091 default=None, help="Test config for a module") 00092 00093 parser.add_argument("--test-spec", dest="test_spec", 00094 default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool") 00095 00096 parser.add_argument("--build-report-junit", dest="build_report_junit", 00097 default=None, help="Destination path for a build report in the JUnit xml format") 00098 parser.add_argument("--build-data", 00099 dest="build_data", 00100 default=None, 00101 help="Dump build_data to this file") 00102 00103 parser.add_argument("-v", "--verbose", 00104 action="store_true", 00105 dest="verbose", 00106 default=False, 00107 help="Verbose diagnostic output") 00108 00109 parser.add_argument("--stats-depth", 00110 type=int, 00111 dest="stats_depth", 00112 default=2, 00113 help="Depth level for static memory report") 00114 00115 parser.add_argument("--ignore", dest="ignore", type=argparse_many(str), 00116 default=None, help="Comma separated list of patterns to add to mbedignore (eg. ./main.cpp)") 00117 00118 options = parser.parse_args() 00119 00120 # Filter tests by path if specified 00121 if options.paths: 00122 all_paths = options.paths 00123 else: 00124 all_paths = ["."] 00125 00126 all_tests = {} 00127 tests = {} 00128 00129 # Target 00130 if options.mcu is None : 00131 args_error(parser, "argument -m/--mcu is required") 00132 mcu = extract_mcus(parser, options)[0] 00133 00134 # Toolchain 00135 if options.tool is None: 00136 args_error(parser, "argument -t/--tool is required") 00137 toolchain = options.tool[0] 00138 00139 if not TOOLCHAIN_CLASSES[toolchain].check_executable(): 00140 search_path = TOOLCHAIN_PATHS[toolchain] or "No path set" 00141 args_error(parser, "Could not find executable for %s.\n" 00142 "Currently set search path: %s" 00143 % (toolchain, search_path)) 00144 00145 # Assign config file. Precedence: test_config>app_config 00146 # TODO: merge configs if both given 00147 if options.test_config: 00148 config = get_test_config(options.test_config, mcu) 00149 if not config: 00150 args_error(parser, "argument --test-config contains invalid path or identifier") 00151 elif options.app_config: 00152 config = options.app_config 00153 else: 00154 config = Config.find_app_config(options.source_dir) 00155 00156 if not config: 00157 config = get_default_config(options.source_dir or ['.'], mcu) 00158 00159 00160 # Find all tests in the relevant paths 00161 for path in all_paths: 00162 all_tests.update(find_tests(path, mcu, toolchain, 00163 app_config=config)) 00164 00165 # Filter tests by name if specified 00166 if options.names: 00167 all_names = options.names 00168 all_names = [x.lower() for x in all_names] 00169 00170 for name in all_names: 00171 if any(fnmatch.fnmatch(testname, name) for testname in all_tests): 00172 for testname, test in all_tests.items(): 00173 if fnmatch.fnmatch(testname, name): 00174 tests[testname] = test 00175 else: 00176 print("[Warning] Test with name '%s' was not found in the " 00177 "available tests" % (name)) 00178 else: 00179 tests = all_tests 00180 00181 00182 if options.list: 00183 # Print available tests in order and exit 00184 print_tests(tests, options.format) 00185 sys.exit(0) 00186 else: 00187 # Build all tests 00188 if not options.build_dir: 00189 args_error(parser, "argument --build is required") 00190 00191 base_source_paths = options.source_dir 00192 00193 # Default base source path is the current directory 00194 if not base_source_paths: 00195 base_source_paths = ['.'] 00196 00197 build_report = {} 00198 build_properties = {} 00199 00200 library_build_success = False 00201 profile = extract_profile(parser, options, toolchain) 00202 try: 00203 # Build sources 00204 notify = TerminalNotifier(options.verbose) 00205 build_library(base_source_paths, options.build_dir, mcu, 00206 toolchain, jobs=options.jobs, 00207 clean=options.clean, report=build_report, 00208 properties=build_properties, name="mbed-build", 00209 macros=options.macros, 00210 notify=notify, archive=False, 00211 app_config=config, 00212 build_profile=profile, 00213 ignore=options.ignore) 00214 00215 library_build_success = True 00216 except ToolException as e: 00217 # ToolException output is handled by the build log 00218 pass 00219 except NotSupportedException as e: 00220 # NotSupportedException is handled by the build log 00221 pass 00222 except Exception as e: 00223 # Some other exception occurred, print the error message 00224 print(e) 00225 00226 if not library_build_success: 00227 print("Failed to build library") 00228 else: 00229 # Build all the tests 00230 notify = TerminalNotifier(options.verbose) 00231 test_build_success, test_build = build_tests( 00232 tests, 00233 [os.path.relpath(options.build_dir)], 00234 options.build_dir, 00235 mcu, 00236 toolchain, 00237 clean=options.clean, 00238 report=build_report, 00239 properties=build_properties, 00240 macros=options.macros, 00241 notify=notify, 00242 jobs=options.jobs, 00243 continue_on_build_fail=options.continue_on_build_fail, 00244 app_config=config, 00245 build_profile=profile, 00246 stats_depth=options.stats_depth, 00247 ignore=options.ignore) 00248 00249 # If a path to a test spec is provided, write it to a file 00250 if options.test_spec: 00251 test_spec_data = test_spec_from_test_builds(test_build) 00252 00253 # Create the target dir for the test spec if necessary 00254 # mkdir will not create the dir if it already exists 00255 test_spec_dir = os.path.dirname(options.test_spec) 00256 if test_spec_dir: 00257 mkdir(test_spec_dir) 00258 00259 try: 00260 with open(options.test_spec, 'w') as f: 00261 f.write(json.dumps(test_spec_data, indent=2)) 00262 except IOError as e: 00263 print("[ERROR] Error writing test spec to file") 00264 print(e) 00265 00266 # If a path to a JUnit build report spec is provided, write it to a file 00267 if options.build_report_junit: 00268 report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build") 00269 report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties) 00270 00271 # Print memory map summary on screen 00272 if build_report: 00273 print 00274 print(print_build_memory_usage(build_report)) 00275 00276 print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build") 00277 status = print_report_exporter.report(build_report) 00278 if options.build_data: 00279 merge_build_data(options.build_data, build_report, "test") 00280 00281 if status: 00282 sys.exit(0) 00283 else: 00284 sys.exit(1) 00285 00286 except KeyboardInterrupt as e: 00287 print("\n[CTRL+c] exit") 00288 except ConfigException as e: 00289 # Catching ConfigException here to prevent a traceback 00290 print("[ERROR] %s" % str(e)) 00291 except Exception as e: 00292 import traceback 00293 traceback.print_exc(file=sys.stdout) 00294 print("[ERROR] %s" % str(e)) 00295 sys.exit(1)
Generated on Tue Jul 12 2022 12:45:51 by
