Rtos API example

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test.py Source File

test.py

00001 #! /usr/bin/env python2
00002 """
00003 mbed SDK
00004 Copyright (c) 2011-2013 ARM Limited
00005 
00006 Licensed under the Apache License, Version 2.0 (the "License");
00007 you may not use this file except in compliance with the License.
00008 You may obtain a copy of the License at
00009 
00010     http://www.apache.org/licenses/LICENSE-2.0
00011 
00012 Unless required by applicable law or agreed to in writing, software
00013 distributed under the License is distributed on an "AS IS" BASIS,
00014 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00015 See the License for the specific language governing permissions and
00016 limitations under the License.
00017 
00018 
00019 TEST BUILD & RUN
00020 """
00021 import sys
00022 import os
00023 import json
00024 import fnmatch
00025 
00026 ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
00027 sys.path.insert(0, ROOT)
00028 
00029 from tools.config import ConfigException
00030 from tools.test_api import test_path_to_name, find_tests, get_test_config, print_tests, build_tests, test_spec_from_test_builds
00031 import tools.test_configs as TestConfig
00032 from tools.options import get_default_options_parser, extract_profile, extract_mcus
00033 from tools.build_api import build_project, build_library
00034 from tools.build_api import print_build_memory_usage
00035 from tools.build_api import merge_build_data
00036 from tools.targets import TARGET_MAP
00037 from tools.utils import mkdir, ToolException, NotSupportedException, args_error
00038 from tools.test_exporters import ReportExporter, ResultExporterType
00039 from utils import argparse_filestring_type, argparse_lowercase_type, argparse_many
00040 from utils import argparse_dir_not_parent
00041 from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS, TOOLCHAIN_CLASSES
00042 from tools.settings import CLI_COLOR_MAP
00043 
00044 if __name__ == '__main__':
00045     try:
00046         # Parse Options
00047         parser = get_default_options_parser(add_app_config=True)
00048 
00049         parser.add_argument("-D",
00050                           action="append",
00051                           dest="macros",
00052                           help="Add a macro definition")
00053 
00054         parser.add_argument("-j", "--jobs",
00055                           type=int,
00056                           dest="jobs",
00057                           default=0,
00058                           help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
00059 
00060         parser.add_argument("--source", dest="source_dir",
00061                           type=argparse_filestring_type,
00062                             default=None, help="The source (input) directory (for sources other than tests). Defaults to current directory.", action="append")
00063 
00064         parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT),
00065                           default=None, help="The build (output) directory")
00066 
00067         parser.add_argument("-l", "--list", action="store_true", dest="list",
00068                           default=False, help="List (recursively) available tests in order and exit")
00069 
00070         parser.add_argument("-p", "--paths", dest="paths",
00071                           type=argparse_many(argparse_filestring_type),
00072                           default=None, help="Limit the tests to those within the specified comma separated list of paths")
00073 
00074         format_choices = ["list", "json"]
00075         format_default_choice = "list"
00076         format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice)
00077         parser.add_argument("-f", "--format", dest="format",
00078                             type=argparse_lowercase_type(format_choices, "format"),
00079                             default=format_default_choice, help=format_help)
00080 
00081         parser.add_argument("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
00082                           default=None, help="Continue trying to build all tests if a build failure occurs")
00083 
00084         #TODO validate the names instead of just passing through str
00085         parser.add_argument("-n", "--names", dest="names", type=argparse_many(str),
00086                           default=None, help="Limit the tests to a comma separated list of names")
00087 
00088         parser.add_argument("--test-config", dest="test_config", type=str,
00089                           default=None, help="Test config for a module")
00090 
00091         parser.add_argument("--test-spec", dest="test_spec",
00092                           default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
00093 
00094         parser.add_argument("--build-report-junit", dest="build_report_junit",
00095                           default=None, help="Destination path for a build report in the JUnit xml format")
00096         parser.add_argument("--build-data",
00097                             dest="build_data",
00098                             default=None,
00099                             help="Dump build_data to this file")
00100 
00101         parser.add_argument("-v", "--verbose",
00102                           action="store_true",
00103                           dest="verbose",
00104                           default=False,
00105                           help="Verbose diagnostic output")
00106 
00107         parser.add_argument("--stats-depth",
00108                             type=int,
00109                             dest="stats_depth",
00110                             default=2,
00111                             help="Depth level for static memory report")
00112 
00113         options = parser.parse_args()
00114 
00115         # Filter tests by path if specified
00116         if options.paths:
00117             all_paths = options.paths
00118         else:
00119             all_paths = ["."]
00120 
00121         all_tests = {}
00122         tests = {}
00123 
00124         # Target
00125         if options.mcu is None :
00126             args_error(parser, "argument -m/--mcu is required")
00127         mcu = extract_mcus(parser, options)[0]
00128 
00129         # Toolchain
00130         if options.tool is None:
00131             args_error(parser, "argument -t/--tool is required")
00132         toolchain = options.tool[0]
00133 
00134         if not TOOLCHAIN_CLASSES[toolchain].check_executable():
00135             search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
00136             args_error(parser, "Could not find executable for %s.\n"
00137                                "Currently set search path: %s"
00138                        % (toolchain, search_path))
00139 
00140         # Assign config file. Precedence: test_config>app_config
00141         # TODO: merge configs if both given
00142         if options.test_config:
00143             config = get_test_config(options.test_config, mcu)
00144             if not config:
00145                 args_error(parser, "argument --test-config contains invalid path or identifier")
00146         elif not options.app_config:
00147             config = TestConfig.get_default_config(mcu)
00148         else:
00149             config = options.app_config
00150 
00151         # Find all tests in the relevant paths
00152         for path in all_paths:
00153             all_tests.update(find_tests(path, mcu, toolchain,
00154                                         app_config=config))
00155 
00156         # Filter tests by name if specified
00157         if options.names:
00158             all_names = options.names
00159             all_names = [x.lower() for x in all_names]
00160 
00161             for name in all_names:
00162                 if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
00163                     for testname, test in all_tests.items():
00164                         if fnmatch.fnmatch(testname, name):
00165                             tests[testname] = test
00166                 else:
00167                     print "[Warning] Test with name '%s' was not found in the available tests" % (name)
00168         else:
00169             tests = all_tests
00170 
00171         if options.color:
00172             # This import happens late to prevent initializing colorization when we don't need it
00173             import colorize
00174             if options.verbose:
00175                 notify = mbedToolchain.print_notify_verbose
00176             else:
00177                 notify = mbedToolchain.print_notify
00178             notify = colorize.print_in_color_notifier(CLI_COLOR_MAP, notify)
00179         else:
00180             notify = None
00181 
00182         if options.list:
00183             # Print available tests in order and exit
00184             print_tests(tests, options.format)
00185             sys.exit(0)
00186         else:
00187             # Build all tests
00188             if not options.build_dir:
00189                 args_error(parser, "argument --build is required")
00190 
00191             base_source_paths = options.source_dir
00192 
00193             # Default base source path is the current directory
00194             if not base_source_paths:
00195                 base_source_paths = ['.']
00196 
00197             build_report = {}
00198             build_properties = {}
00199 
00200             library_build_success = False
00201             profile = extract_profile(parser, options, toolchain)
00202             try:
00203                 # Build sources
00204                 build_library(base_source_paths, options.build_dir, mcu,
00205                               toolchain, jobs=options.jobs,
00206                               clean=options.clean, report=build_report,
00207                               properties=build_properties, name="mbed-build",
00208                               macros=options.macros, verbose=options.verbose,
00209                               notify=notify, archive=False,
00210                               app_config=config,
00211                               build_profile=profile)
00212 
00213                 library_build_success = True
00214             except ToolException, e:
00215                 # ToolException output is handled by the build log
00216                 pass
00217             except NotSupportedException, e:
00218                 # NotSupportedException is handled by the build log
00219                 pass
00220             except Exception, e:
00221                 # Some other exception occurred, print the error message
00222                 print e
00223 
00224             if not library_build_success:
00225                 print "Failed to build library"
00226             else:
00227                 # Build all the tests
00228 
00229                 test_build_success, test_build = build_tests(tests, [options.build_dir], options.build_dir, mcu, toolchain,
00230                         clean=options.clean,
00231                         report=build_report,
00232                         properties=build_properties,
00233                         macros=options.macros,
00234                         verbose=options.verbose,
00235                         notify=notify,
00236                         jobs=options.jobs,
00237                         continue_on_build_fail=options.continue_on_build_fail,
00238                         app_config=config,
00239                         build_profile=profile,
00240                         stats_depth=options.stats_depth)
00241 
00242                 # If a path to a test spec is provided, write it to a file
00243                 if options.test_spec:
00244                     test_spec_data = test_spec_from_test_builds(test_build)
00245 
00246                     # Create the target dir for the test spec if necessary
00247                     # mkdir will not create the dir if it already exists
00248                     test_spec_dir = os.path.dirname(options.test_spec)
00249                     if test_spec_dir:
00250                         mkdir(test_spec_dir)
00251 
00252                     try:
00253                         with open(options.test_spec, 'w') as f:
00254                             f.write(json.dumps(test_spec_data, indent=2))
00255                     except IOError, e:
00256                         print "[ERROR] Error writing test spec to file"
00257                         print e
00258 
00259             # If a path to a JUnit build report spec is provided, write it to a file
00260             if options.build_report_junit:
00261                 report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
00262                 report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)
00263 
00264             # Print memory map summary on screen
00265             if build_report:
00266                 print
00267                 print print_build_memory_usage(build_report)
00268 
00269             print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
00270             status = print_report_exporter.report(build_report)
00271             if options.build_data:
00272                 merge_build_data(options.build_data, build_report, "test")
00273 
00274             if status:
00275                 sys.exit(0)
00276             else:
00277                 sys.exit(1)
00278 
00279     except KeyboardInterrupt, e:
00280         print "\n[CTRL+c] exit"
00281     except ConfigException, e:
00282         # Catching ConfigException here to prevent a traceback
00283         print "[ERROR] %s" % str(e)
00284     except Exception,e:
00285         import traceback
00286         traceback.print_exc(file=sys.stdout)
00287         print "[ERROR] %s" % str(e)
00288         sys.exit(1)