Denislam Valeev / Mbed OS Nucleo_rtos_basic
Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test.py Source File

test.py

00001 #! /usr/bin/env python2
00002 """
00003 mbed SDK
00004 Copyright (c) 2011-2013 ARM Limited
00005 
00006 Licensed under the Apache License, Version 2.0 (the "License");
00007 you may not use this file except in compliance with the License.
00008 You may obtain a copy of the License at
00009 
00010     http://www.apache.org/licenses/LICENSE-2.0
00011 
00012 Unless required by applicable law or agreed to in writing, software
00013 distributed under the License is distributed on an "AS IS" BASIS,
00014 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00015 See the License for the specific language governing permissions and
00016 limitations under the License.
00017 
00018 
00019 TEST BUILD & RUN
00020 """
00021 from __future__ import print_function, division, absolute_import
00022 import sys
00023 import os
00024 import json
00025 import fnmatch
00026 
00027 ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
00028 sys.path.insert(0, ROOT)
00029 
00030 from tools.config import ConfigException
00031 from tools.test_api import test_path_to_name, find_tests, get_test_config, print_tests, build_tests, test_spec_from_test_builds
00032 import tools.test_configs as TestConfig
00033 from tools.options import get_default_options_parser, extract_profile, extract_mcus
00034 from tools.build_api import build_project, build_library
00035 from tools.build_api import print_build_memory_usage
00036 from tools.build_api import merge_build_data
00037 from tools.targets import TARGET_MAP
00038 from tools.utils import mkdir, ToolException, NotSupportedException, args_error
00039 from tools.test_exporters import ReportExporter, ResultExporterType
00040 from tools.utils import argparse_filestring_type, argparse_lowercase_type, argparse_many
00041 from tools.utils import argparse_dir_not_parent
00042 from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS, TOOLCHAIN_CLASSES
00043 from tools.settings import CLI_COLOR_MAP
00044 
00045 if __name__ == '__main__':
00046     try:
00047         # Parse Options
00048         parser = get_default_options_parser(add_app_config=True)
00049 
00050         parser.add_argument("-D",
00051                           action="append",
00052                           dest="macros",
00053                           help="Add a macro definition")
00054 
00055         parser.add_argument("-j", "--jobs",
00056                           type=int,
00057                           dest="jobs",
00058                           default=0,
00059                           help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
00060 
00061         parser.add_argument("--source", dest="source_dir",
00062                           type=argparse_filestring_type,
00063                             default=None, help="The source (input) directory (for sources other than tests). Defaults to current directory.", action="append")
00064 
00065         parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT),
00066                           default=None, help="The build (output) directory")
00067 
00068         parser.add_argument("-l", "--list", action="store_true", dest="list",
00069                           default=False, help="List (recursively) available tests in order and exit")
00070 
00071         parser.add_argument("-p", "--paths", dest="paths",
00072                           type=argparse_many(argparse_filestring_type),
00073                           default=None, help="Limit the tests to those within the specified comma separated list of paths")
00074 
00075         format_choices = ["list", "json"]
00076         format_default_choice = "list"
00077         format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice)
00078         parser.add_argument("-f", "--format", dest="format",
00079                             type=argparse_lowercase_type(format_choices, "format"),
00080                             default=format_default_choice, help=format_help)
00081 
00082         parser.add_argument("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
00083                           default=None, help="Continue trying to build all tests if a build failure occurs")
00084 
00085         #TODO validate the names instead of just passing through str
00086         parser.add_argument("-n", "--names", dest="names", type=argparse_many(str),
00087                           default=None, help="Limit the tests to a comma separated list of names")
00088 
00089         parser.add_argument("--test-config", dest="test_config", type=str,
00090                           default=None, help="Test config for a module")
00091 
00092         parser.add_argument("--test-spec", dest="test_spec",
00093                           default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
00094 
00095         parser.add_argument("--build-report-junit", dest="build_report_junit",
00096                           default=None, help="Destination path for a build report in the JUnit xml format")
00097         parser.add_argument("--build-data",
00098                             dest="build_data",
00099                             default=None,
00100                             help="Dump build_data to this file")
00101 
00102         parser.add_argument("-v", "--verbose",
00103                           action="store_true",
00104                           dest="verbose",
00105                           default=False,
00106                           help="Verbose diagnostic output")
00107 
00108         parser.add_argument("--stats-depth",
00109                             type=int,
00110                             dest="stats_depth",
00111                             default=2,
00112                             help="Depth level for static memory report")
00113 
00114         options = parser.parse_args()
00115 
00116         # Filter tests by path if specified
00117         if options.paths:
00118             all_paths = options.paths
00119         else:
00120             all_paths = ["."]
00121 
00122         all_tests = {}
00123         tests = {}
00124 
00125         # Target
00126         if options.mcu is None :
00127             args_error(parser, "argument -m/--mcu is required")
00128         mcu = extract_mcus(parser, options)[0]
00129 
00130         # Toolchain
00131         if options.tool is None:
00132             args_error(parser, "argument -t/--tool is required")
00133         toolchain = options.tool[0]
00134 
00135         if not TOOLCHAIN_CLASSES[toolchain].check_executable():
00136             search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
00137             args_error(parser, "Could not find executable for %s.\n"
00138                                "Currently set search path: %s"
00139                        % (toolchain, search_path))
00140 
00141         # Assign config file. Precedence: test_config>app_config
00142         # TODO: merge configs if both given
00143         if options.test_config:
00144             config = get_test_config(options.test_config, mcu)
00145             if not config:
00146                 args_error(parser, "argument --test-config contains invalid path or identifier")
00147         elif not options.app_config:
00148             config = TestConfig.get_default_config(options.source_dir or ['.'], mcu)
00149         else:
00150             config = options.app_config
00151 
00152         # Find all tests in the relevant paths
00153         for path in all_paths:
00154             all_tests.update(find_tests(path, mcu, toolchain,
00155                                         app_config=config))
00156 
00157         # Filter tests by name if specified
00158         if options.names:
00159             all_names = options.names
00160             all_names = [x.lower() for x in all_names]
00161 
00162             for name in all_names:
00163                 if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
00164                     for testname, test in all_tests.items():
00165                         if fnmatch.fnmatch(testname, name):
00166                             tests[testname] = test
00167                 else:
00168                     print("[Warning] Test with name '%s' was not found in the "
00169                           "available tests" % (name))
00170         else:
00171             tests = all_tests
00172 
00173         if options.color:
00174             # This import happens late to prevent initializing colorization when we don't need it
00175             import colorize
00176             if options.verbose:
00177                 notify = mbedToolchain.print_notify_verbose
00178             else:
00179                 notify = mbedToolchain.print_notify
00180             notify = colorize.print_in_color_notifier(CLI_COLOR_MAP, notify)
00181         else:
00182             notify = None
00183 
00184         if options.list:
00185             # Print available tests in order and exit
00186             print_tests(tests, options.format)
00187             sys.exit(0)
00188         else:
00189             # Build all tests
00190             if not options.build_dir:
00191                 args_error(parser, "argument --build is required")
00192 
00193             base_source_paths = options.source_dir
00194 
00195             # Default base source path is the current directory
00196             if not base_source_paths:
00197                 base_source_paths = ['.']
00198 
00199             build_report = {}
00200             build_properties = {}
00201 
00202             library_build_success = False
00203             profile = extract_profile(parser, options, toolchain)
00204             try:
00205                 # Build sources
00206                 build_library(base_source_paths, options.build_dir, mcu,
00207                               toolchain, jobs=options.jobs,
00208                               clean=options.clean, report=build_report,
00209                               properties=build_properties, name="mbed-build",
00210                               macros=options.macros, verbose=options.verbose,
00211                               notify=notify, archive=False,
00212                               app_config=config,
00213                               build_profile=profile)
00214 
00215                 library_build_success = True
00216             except ToolException as e:
00217                 # ToolException output is handled by the build log
00218                 pass
00219             except NotSupportedException as e:
00220                 # NotSupportedException is handled by the build log
00221                 pass
00222             except Exception as e:
00223                 # Some other exception occurred, print the error message
00224                 print(e)
00225 
00226             if not library_build_success:
00227                 print("Failed to build library")
00228             else:
00229                 # Build all the tests
00230 
00231                 test_build_success, test_build = build_tests(tests, [options.build_dir], options.build_dir, mcu, toolchain,
00232                         clean=options.clean,
00233                         report=build_report,
00234                         properties=build_properties,
00235                         macros=options.macros,
00236                         verbose=options.verbose,
00237                         notify=notify,
00238                         jobs=options.jobs,
00239                         continue_on_build_fail=options.continue_on_build_fail,
00240                         app_config=config,
00241                         build_profile=profile,
00242                         stats_depth=options.stats_depth)
00243 
00244                 # If a path to a test spec is provided, write it to a file
00245                 if options.test_spec:
00246                     test_spec_data = test_spec_from_test_builds(test_build)
00247 
00248                     # Create the target dir for the test spec if necessary
00249                     # mkdir will not create the dir if it already exists
00250                     test_spec_dir = os.path.dirname(options.test_spec)
00251                     if test_spec_dir:
00252                         mkdir(test_spec_dir)
00253 
00254                     try:
00255                         with open(options.test_spec, 'w') as f:
00256                             f.write(json.dumps(test_spec_data, indent=2))
00257                     except IOError as e:
00258                         print("[ERROR] Error writing test spec to file")
00259                         print(e)
00260 
00261             # If a path to a JUnit build report spec is provided, write it to a file
00262             if options.build_report_junit:
00263                 report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
00264                 report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)
00265 
00266             # Print memory map summary on screen
00267             if build_report:
00268                 print
00269                 print(print_build_memory_usage(build_report))
00270 
00271             print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
00272             status = print_report_exporter.report(build_report)
00273             if options.build_data:
00274                 merge_build_data(options.build_data, build_report, "test")
00275 
00276             if status:
00277                 sys.exit(0)
00278             else:
00279                 sys.exit(1)
00280 
00281     except KeyboardInterrupt as e:
00282         print("\n[CTRL+c] exit")
00283     except ConfigException as e:
00284         # Catching ConfigException here to prevent a traceback
00285         print("[ERROR] %s" % str(e))
00286     except Exception as e:
00287         import traceback
00288         traceback.print_exc(file=sys.stdout)
00289         print("[ERROR] %s" % str(e))
00290         sys.exit(1)