Nicolas Borla / Mbed OS BBR_1Ebene
Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test.py Source File

test.py

00001 #! /usr/bin/env python2
00002 """
00003 mbed SDK
00004 Copyright (c) 2011-2013 ARM Limited
00005 
00006 Licensed under the Apache License, Version 2.0 (the "License");
00007 you may not use this file except in compliance with the License.
00008 You may obtain a copy of the License at
00009 
00010     http://www.apache.org/licenses/LICENSE-2.0
00011 
00012 Unless required by applicable law or agreed to in writing, software
00013 distributed under the License is distributed on an "AS IS" BASIS,
00014 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00015 See the License for the specific language governing permissions and
00016 limitations under the License.
00017 
00018 
00019 TEST BUILD & RUN
00020 """
00021 from __future__ import print_function, division, absolute_import
00022 import sys
00023 import os
00024 import json
00025 import fnmatch
00026 
00027 ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
00028 sys.path.insert(0, ROOT)
00029 
00030 from tools.config import ConfigException, Config
00031 from tools.test_api import test_path_to_name, find_tests, get_test_config, print_tests, build_tests, test_spec_from_test_builds
00032 from tools.test_configs import get_default_config
00033 from tools.options import get_default_options_parser, extract_profile, extract_mcus
00034 from tools.build_api import build_project, build_library
00035 from tools.build_api import print_build_memory_usage
00036 from tools.build_api import merge_build_data
00037 from tools.targets import TARGET_MAP
00038 from tools.notifier.term import TerminalNotifier
00039 from tools.utils import mkdir, ToolException, NotSupportedException, args_error
00040 from tools.test_exporters import ReportExporter, ResultExporterType
00041 from tools.utils import argparse_filestring_type, argparse_lowercase_type, argparse_many
00042 from tools.utils import argparse_dir_not_parent
00043 from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS, TOOLCHAIN_CLASSES
00044 from tools.settings import CLI_COLOR_MAP
00045 
00046 if __name__ == '__main__':
00047     try:
00048         # Parse Options
00049         parser = get_default_options_parser(add_app_config=True)
00050 
00051         parser.add_argument("-D",
00052                           action="append",
00053                           dest="macros",
00054                           help="Add a macro definition")
00055 
00056         parser.add_argument("-j", "--jobs",
00057                           type=int,
00058                           dest="jobs",
00059                           default=0,
00060                           help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
00061 
00062         parser.add_argument("--source", dest="source_dir",
00063                           type=argparse_filestring_type,
00064                             default=None, help="The source (input) directory (for sources other than tests). Defaults to current directory.", action="append")
00065 
00066         parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT),
00067                           default=None, help="The build (output) directory")
00068 
00069         parser.add_argument("-l", "--list", action="store_true", dest="list",
00070                           default=False, help="List (recursively) available tests in order and exit")
00071 
00072         parser.add_argument("-p", "--paths", dest="paths",
00073                           type=argparse_many(argparse_filestring_type),
00074                           default=None, help="Limit the tests to those within the specified comma separated list of paths")
00075 
00076         format_choices = ["list", "json"]
00077         format_default_choice = "list"
00078         format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice)
00079         parser.add_argument("-f", "--format", dest="format",
00080                             type=argparse_lowercase_type(format_choices, "format"),
00081                             default=format_default_choice, help=format_help)
00082 
00083         parser.add_argument("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
00084                           default=None, help="Continue trying to build all tests if a build failure occurs")
00085 
00086         #TODO validate the names instead of just passing through str
00087         parser.add_argument("-n", "--names", dest="names", type=argparse_many(str),
00088                           default=None, help="Limit the tests to a comma separated list of names")
00089 
00090         parser.add_argument("--test-config", dest="test_config", type=str,
00091                           default=None, help="Test config for a module")
00092 
00093         parser.add_argument("--test-spec", dest="test_spec",
00094                           default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
00095 
00096         parser.add_argument("--build-report-junit", dest="build_report_junit",
00097                           default=None, help="Destination path for a build report in the JUnit xml format")
00098         parser.add_argument("--build-data",
00099                             dest="build_data",
00100                             default=None,
00101                             help="Dump build_data to this file")
00102 
00103         parser.add_argument("-v", "--verbose",
00104                           action="store_true",
00105                           dest="verbose",
00106                           default=False,
00107                           help="Verbose diagnostic output")
00108 
00109         parser.add_argument("--stats-depth",
00110                             type=int,
00111                             dest="stats_depth",
00112                             default=2,
00113                             help="Depth level for static memory report")
00114 
00115         options = parser.parse_args()
00116 
00117         # Filter tests by path if specified
00118         if options.paths:
00119             all_paths = options.paths
00120         else:
00121             all_paths = ["."]
00122 
00123         all_tests = {}
00124         tests = {}
00125 
00126         # Target
00127         if options.mcu is None :
00128             args_error(parser, "argument -m/--mcu is required")
00129         mcu = extract_mcus(parser, options)[0]
00130 
00131         # Toolchain
00132         if options.tool is None:
00133             args_error(parser, "argument -t/--tool is required")
00134         toolchain = options.tool[0]
00135 
00136         if not TOOLCHAIN_CLASSES[toolchain].check_executable():
00137             search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
00138             args_error(parser, "Could not find executable for %s.\n"
00139                                "Currently set search path: %s"
00140                        % (toolchain, search_path))
00141 
00142         # Assign config file. Precedence: test_config>app_config
00143         # TODO: merge configs if both given
00144         if options.test_config:
00145             config = get_test_config(options.test_config, mcu)
00146             if not config:
00147                 args_error(parser, "argument --test-config contains invalid path or identifier")
00148         elif options.app_config:
00149             config = options.app_config
00150         else:
00151             config = Config.find_app_config(options.source_dir)
00152 
00153         if not config:
00154             config = get_default_config(options.source_dir or ['.'], mcu)
00155 
00156         # Find all tests in the relevant paths
00157         for path in all_paths:
00158             all_tests.update(find_tests(path, mcu, toolchain,
00159                                         app_config=config))
00160 
00161         # Filter tests by name if specified
00162         if options.names:
00163             all_names = options.names
00164             all_names = [x.lower() for x in all_names]
00165 
00166             for name in all_names:
00167                 if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
00168                     for testname, test in all_tests.items():
00169                         if fnmatch.fnmatch(testname, name):
00170                             tests[testname] = test
00171                 else:
00172                     print("[Warning] Test with name '%s' was not found in the "
00173                           "available tests" % (name))
00174         else:
00175             tests = all_tests
00176 
00177 
00178         if options.list:
00179             # Print available tests in order and exit
00180             print_tests(tests, options.format)
00181             sys.exit(0)
00182         else:
00183             # Build all tests
00184             if not options.build_dir:
00185                 args_error(parser, "argument --build is required")
00186 
00187             base_source_paths = options.source_dir
00188 
00189             # Default base source path is the current directory
00190             if not base_source_paths:
00191                 base_source_paths = ['.']
00192 
00193             build_report = {}
00194             build_properties = {}
00195 
00196             library_build_success = False
00197             profile = extract_profile(parser, options, toolchain)
00198             try:
00199                 # Build sources
00200                 notify = TerminalNotifier(options.verbose)
00201                 build_library(base_source_paths, options.build_dir, mcu,
00202                               toolchain, jobs=options.jobs,
00203                               clean=options.clean, report=build_report,
00204                               properties=build_properties, name="mbed-build",
00205                               macros=options.macros,
00206                               notify=notify, archive=False,
00207                               app_config=config,
00208                               build_profile=profile)
00209 
00210                 library_build_success = True
00211             except ToolException as e:
00212                 # ToolException output is handled by the build log
00213                 pass
00214             except NotSupportedException as e:
00215                 # NotSupportedException is handled by the build log
00216                 pass
00217             except Exception as e:
00218                 # Some other exception occurred, print the error message
00219                 print(e)
00220 
00221             if not library_build_success:
00222                 print("Failed to build library")
00223             else:
00224                 # Build all the tests
00225                 notify = TerminalNotifier(options.verbose)
00226                 test_build_success, test_build = build_tests(tests, [options.build_dir], options.build_dir, mcu, toolchain,
00227                         clean=options.clean,
00228                         report=build_report,
00229                         properties=build_properties,
00230                         macros=options.macros,
00231                         notify=notify,
00232                         jobs=options.jobs,
00233                         continue_on_build_fail=options.continue_on_build_fail,
00234                         app_config=config,
00235                         build_profile=profile,
00236                         stats_depth=options.stats_depth)
00237 
00238                 # If a path to a test spec is provided, write it to a file
00239                 if options.test_spec:
00240                     test_spec_data = test_spec_from_test_builds(test_build)
00241 
00242                     # Create the target dir for the test spec if necessary
00243                     # mkdir will not create the dir if it already exists
00244                     test_spec_dir = os.path.dirname(options.test_spec)
00245                     if test_spec_dir:
00246                         mkdir(test_spec_dir)
00247 
00248                     try:
00249                         with open(options.test_spec, 'w') as f:
00250                             f.write(json.dumps(test_spec_data, indent=2))
00251                     except IOError as e:
00252                         print("[ERROR] Error writing test spec to file")
00253                         print(e)
00254 
00255             # If a path to a JUnit build report spec is provided, write it to a file
00256             if options.build_report_junit:
00257                 report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
00258                 report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)
00259 
00260             # Print memory map summary on screen
00261             if build_report:
00262                 print
00263                 print(print_build_memory_usage(build_report))
00264 
00265             print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
00266             status = print_report_exporter.report(build_report)
00267             if options.build_data:
00268                 merge_build_data(options.build_data, build_report, "test")
00269 
00270             if status:
00271                 sys.exit(0)
00272             else:
00273                 sys.exit(1)
00274 
00275     except KeyboardInterrupt as e:
00276         print("\n[CTRL+c] exit")
00277     except ConfigException as e:
00278         # Catching ConfigException here to prevent a traceback
00279         print("[ERROR] %s" % str(e))
00280     except Exception as e:
00281         import traceback
00282         traceback.print_exc(file=sys.stdout)
00283         print("[ERROR] %s" % str(e))
00284         sys.exit(1)