Rtos API example

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers upload_results.py Source File

upload_results.py

00001 """
00002 mbed SDK
00003 Copyright (c) 2011-2013 ARM Limited
00004 
00005 Licensed under the Apache License, Version 2.0 (the "License");
00006 you may not use this file except in compliance with the License.
00007 You may obtain a copy of the License at
00008 
00009     http://www.apache.org/licenses/LICENSE-2.0
00010 
00011 Unless required by applicable law or agreed to in writing, software
00012 distributed under the License is distributed on an "AS IS" BASIS,
00013 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014 See the License for the specific language governing permissions and
00015 limitations under the License.
00016 """
00017 import sys
00018 import argparse
00019 import xml.etree.ElementTree as ET
00020 import requests
00021 import urlparse
00022 
00023 def create_headers(args):
00024     return { 'X-Api-Key':  args.api_key }
00025 
00026 def finish_command(command, response):
00027     print(command, response.status_code, response.reason)
00028     print(response.text)
00029 
00030     if response.status_code < 400:
00031         sys.exit(0)
00032     else:
00033         sys.exit(2)
00034 
00035 def create_build(args):
00036     build = {}
00037     build['buildType'] = args.build_type
00038     build['number'] = args.build_number
00039     build['source'] = args.build_source
00040     build['status'] = 'running'
00041 
00042     r = requests.post(urlparse.urljoin(args.url, "api/builds"), headers=create_headers(args), json=build)
00043 
00044     if r.status_code < 400:
00045         if args.property_file_format:
00046             print("MBED_BUILD_ID=" + r.text)
00047         else:
00048             print(r.text)
00049 
00050         sys.exit(0)
00051     else:
00052         sys.exit(2)
00053 
00054 def finish_build(args):
00055     data = {}
00056     data['status'] = 'completed'
00057 
00058     r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
00059     finish_command('finish-build', r)
00060 
00061 def promote_build(args):
00062     data = {}
00063     data['buildType'] = 'Release'
00064 
00065     r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
00066     finish_command('promote-build', r)
00067 
00068 def abort_build(args):
00069     data = {}
00070     data['status'] = 'aborted'
00071 
00072     r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
00073     finish_command('abort-build', r)
00074 
00075 def add_project_runs (args):
00076     '''
00077     -------------------------------------
00078     Notes on 'project_run_data' structure:
00079     --------------------------------------
00080         'projectRuns' - Tree structure used to keep track of what projects have
00081             been logged in different report files. The tree is organized as follows:
00082 
00083             'projectRuns': {                - Root element of tree
00084 
00085                 'hostOs': {                 - Host OS on which project was built/tested
00086                                                 - ex. windows, linux, or mac
00087 
00088                     'platform': {           - Platform for which project was built/tested
00089                                               (Corresponds to platform names in targets.py)
00090                                                 - ex. K64F, LPC1768, NRF51822, etc.
00091 
00092                         'toolchain': {      - Toolchain with which project was built/tested
00093                                               (Corresponds to TOOLCHAIN_CLASSES names in toolchains/__init__.py)
00094                                                 - ex. ARM, uARM, GCC_ARM, etc.
00095 
00096                             'project': {    - Project that was build/tested
00097                                               (Corresponds to test id in tests.py or library id in libraries.py)
00098                                                 - For tests, ex. MBED_A1, MBED_11, DTCT_1 etc.
00099                                                 - For libraries, ex. MBED, RTX, RTOS, etc.
00100 
00101                             },
00102                             ...
00103                         },
00104                         ...
00105                     },
00106                     ...
00107                 }
00108             }
00109 
00110         'platforms_set' - Set of all the platform names mentioned in the given report files
00111 
00112         'toolchains_set' - Set of all the toolchain names mentioned in the given report files
00113 
00114         'names_set' - Set of all the project names mentioned in the given report files
00115 
00116         'hostOses_set' - Set of all the host names given (only given by the command line arguments)
00117     '''
00118 
00119     project_run_data = {}
00120     project_run_data['projectRuns'] = {}
00121     project_run_data['platforms_set'] = set()
00122     project_run_data['vendors_set'] = set()
00123     project_run_data['toolchains_set'] = set()
00124     project_run_data['names_set'] = set()
00125     project_run_data['hostOses_set'] = set()
00126     project_run_data['hostOses_set'].add(args.host_os)
00127 
00128     if args.build_report:
00129         add_report(project_run_data, args.build_report, True, args.build_id, args.host_os)
00130 
00131     if args.test_report:
00132         add_report(project_run_data, args.test_report, False, args.build_id, args.host_os)
00133 
00134     ts_data = format_project_run_data(project_run_data, args.limit)
00135     total_result = True
00136     
00137     total_parts = len(ts_data)
00138     print "Uploading project runs in %d parts" % total_parts
00139     
00140     for index, data in enumerate(ts_data):
00141         r = requests.post(urlparse.urljoin(args.url, "api/projectRuns"), headers=create_headers(args), json=data)
00142         print("add-project-runs part %d/%d" % (index + 1, total_parts), r.status_code, r.reason)
00143         print(r.text)
00144     
00145         if r.status_code >= 400:
00146             total_result = False
00147     
00148     if total_result:
00149         print "'add-project-runs' completed successfully"
00150         sys.exit(0)
00151     else:
00152         print "'add-project-runs' failed"
00153         sys.exit(2)
00154 
00155 def prep_ts_data():
00156     ts_data = {}
00157     ts_data['projectRuns'] = []
00158     ts_data['platforms'] = set()
00159     ts_data['vendors'] = set()
00160     ts_data['toolchains'] = set()
00161     ts_data['names'] = set()
00162     ts_data['hostOses'] = set()
00163     return ts_data
00164 
00165 def finish_ts_data(ts_data, project_run_data):
00166     ts_data['platforms'] = list(ts_data['platforms'])
00167     ts_data['vendors'] = list(ts_data['vendors'])
00168     ts_data['toolchains'] = list(ts_data['toolchains'])
00169     ts_data['names'] = list(ts_data['names'])
00170     ts_data['hostOses'] = list(ts_data['hostOses'])
00171     
00172     # Add all vendors to every projectRun submission
00173     # TODO Either add "vendor" to the "project_run_data"
00174     #      or remove "vendor" entirely from the viewer
00175     ts_data['vendors'] = list(project_run_data['vendors_set'])
00176     
00177 def format_project_run_data(project_run_data, limit):
00178     all_ts_data = []
00179     current_limit_count = 0
00180     
00181     ts_data = prep_ts_data()
00182     ts_data['projectRuns'] = []
00183 
00184     for hostOs_name, hostOs in project_run_data['projectRuns'].iteritems():
00185         for platform_name, platform in hostOs.iteritems():
00186             for toolchain_name, toolchain in platform.iteritems():
00187                 for project_name, project in toolchain.iteritems():
00188                     if current_limit_count >= limit:
00189                         finish_ts_data(ts_data, project_run_data)
00190                         all_ts_data.append(ts_data)
00191                         ts_data = prep_ts_data()
00192                         current_limit_count = 0
00193                     
00194                     ts_data['projectRuns'].append(project)
00195                     ts_data['platforms'].add(platform_name)
00196                     ts_data['toolchains'].add(toolchain_name)
00197                     ts_data['names'].add(project_name)
00198                     ts_data['hostOses'].add(hostOs_name)
00199                     current_limit_count += 1
00200     
00201     if current_limit_count > 0:
00202         finish_ts_data(ts_data, project_run_data)
00203         all_ts_data.append(ts_data)
00204     
00205     return all_ts_data
00206 
00207 def find_project_run(projectRuns, project):
00208     keys = ['hostOs', 'platform', 'toolchain', 'project']
00209 
00210     elem = projectRuns
00211 
00212     for key in keys:
00213         if not project[key] in elem:
00214             return None
00215 
00216         elem = elem[project[key]]
00217 
00218     return elem
00219 
00220 def add_project_run(projectRuns, project):
00221     keys = ['hostOs', 'platform', 'toolchain']
00222 
00223     elem = projectRuns
00224 
00225     for key in keys:
00226         if not project[key] in elem:
00227             elem[project[key]] = {}
00228 
00229         elem = elem[project[key]]
00230 
00231     elem[project['project']] = project
00232 
00233 def update_project_run_results(project_to_update, project, is_build):
00234     if is_build:
00235         project_to_update['buildPass'] = project['buildPass']
00236         project_to_update['buildResult'] = project['buildResult']
00237         project_to_update['buildOutput'] = project['buildOutput']
00238     else:
00239         project_to_update['testPass'] = project['testPass']
00240         project_to_update['testResult'] = project['testResult']
00241         project_to_update['testOutput'] = project['testOutput']
00242 
00243 def update_project_run(projectRuns, project, is_build):
00244     found_project = find_project_run(projectRuns, project)
00245     if found_project:
00246         update_project_run_results(found_project, project, is_build)
00247     else:
00248         add_project_run(projectRuns, project)
00249 
00250 def add_report(project_run_data, report_file, is_build, build_id, host_os):
00251     tree = None
00252 
00253     try:
00254         tree = ET.parse(report_file)
00255     except:
00256         print(sys.exc_info()[0])
00257         print('Invalid path to report: %s', report_file)
00258         sys.exit(1)
00259 
00260     test_suites = tree.getroot()
00261 
00262     for test_suite in test_suites:
00263         platform = ""
00264         toolchain = ""
00265         vendor = ""
00266         for properties in test_suite.findall('properties'):
00267             for property in properties.findall('property'):
00268                 if property.attrib['name'] == 'target':
00269                     platform = property.attrib['value']
00270                     project_run_data['platforms_set'].add(platform)
00271                 elif property.attrib['name'] == 'toolchain':
00272                     toolchain = property.attrib['value']
00273                     project_run_data['toolchains_set'].add(toolchain)
00274                 elif property.attrib['name'] == 'vendor':
00275                     vendor = property.attrib['value']
00276                     project_run_data['vendors_set'].add(vendor)
00277 
00278         for test_case in test_suite.findall('testcase'):
00279             projectRun = {}
00280             projectRun['build'] = build_id
00281             projectRun['hostOs'] = host_os
00282             projectRun['platform'] = platform
00283             projectRun['toolchain'] = toolchain
00284             projectRun['project'] = test_case.attrib['classname'].split('.')[-1]
00285             projectRun['vendor'] = vendor
00286 
00287             project_run_data['names_set'].add(projectRun['project'])
00288 
00289             should_skip = False
00290             skips = test_case.findall('skipped')
00291 
00292             if skips:
00293                 should_skip = skips[0].attrib['message'] == 'SKIP'
00294 
00295             if not should_skip:
00296                 system_outs = test_case.findall('system-out')
00297 
00298                 output = ""
00299                 if system_outs:
00300                     output = system_outs[0].text
00301 
00302                 if is_build:
00303                     projectRun['buildOutput'] = output
00304                 else:
00305                     projectRun['testOutput'] = output
00306 
00307                 errors = test_case.findall('error')
00308                 failures = test_case.findall('failure')
00309                 projectRunPass = None
00310                 result = None
00311 
00312                 if errors:
00313                     projectRunPass = False
00314                     result = errors[0].attrib['message']
00315                 elif failures:
00316                     projectRunPass = False
00317                     result = failures[0].attrib['message']
00318                 elif skips:
00319                     projectRunPass = True
00320                     result = skips[0].attrib['message']
00321                 else:
00322                     projectRunPass = True
00323                     result = 'OK'
00324 
00325                 if is_build:
00326                     projectRun['buildPass'] = projectRunPass
00327                     projectRun['buildResult'] = result
00328                 else:
00329                     projectRun['testPass'] = projectRunPass
00330                     projectRun['testResult'] = result
00331 
00332                 update_project_run(project_run_data['projectRuns'], projectRun, is_build)
00333 
00334 def main(arguments):
00335     # Register and parse command line arguments
00336     parser = argparse.ArgumentParser()
00337     parser.add_argument('-u', '--url', required=True, help='url to ci site')
00338     parser.add_argument('-k', '--api-key', required=True, help='api-key for posting data')
00339 
00340     subparsers = parser.add_subparsers(help='subcommand help')
00341 
00342     create_build_parser = subparsers.add_parser('create-build', help='create a new build')
00343     create_build_parser.add_argument('-b', '--build-number', required=True, help='build number')
00344     create_build_parser.add_argument('-T', '--build-type', choices=['Nightly', 'Limited', 'Pull_Request', 'Release_Candidate'], required=True, help='type of build')
00345     create_build_parser.add_argument('-s', '--build-source', required=True, help='url to source of build')
00346     create_build_parser.add_argument('-p', '--property-file-format', action='store_true', help='print result in the property file format')
00347     create_build_parser.set_defaults(func=create_build)
00348 
00349     finish_build_parser = subparsers.add_parser('finish-build', help='finish a running build')
00350     finish_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
00351     finish_build_parser.set_defaults(func=finish_build)
00352 
00353     finish_build_parser = subparsers.add_parser('promote-build', help='promote a build to a release')
00354     finish_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
00355     finish_build_parser.set_defaults(func=promote_build)
00356 
00357     abort_build_parser = subparsers.add_parser('abort-build', help='abort a running build')
00358     abort_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
00359     abort_build_parser.set_defaults(func=abort_build)
00360 
00361     add_project_runs_parser = subparsers.add_parser('add-project-runs', help='add project runs to a build')
00362     add_project_runs_parser.add_argument('-b', '--build-id', required=True, help='build id')
00363     add_project_runs_parser.add_argument('-r', '--build-report', required=False, help='path to junit xml build report')
00364     add_project_runs_parser.add_argument('-t', '--test-report', required=False, help='path to junit xml test report')
00365     add_project_runs_parser.add_argument('-o', '--host-os', required=True, help='host os on which test was run')
00366     add_project_runs_parser.add_argument('-l', '--limit', required=False, type=int, default=1000, help='Limit the number of project runs sent at a time to avoid HTTP errors (default is 1000)')
00367     add_project_runs_parser.set_defaults(func=add_project_runs)
00368 
00369     args = parser.parse_args(arguments)
00370     args.func(args)
00371 
00372 if __name__ == '__main__':
00373     main(sys.argv[1:])