Commit 1063d5e1 authored by dpranke@chromium.org's avatar dpranke@chromium.org

Modify telemetry tests to write test-results-compatible json files.

This will allow us to be able to intelligently de-apply patches,
re-run the tests, and compare the results on the bot. A follow-on
patch will also enable uploading the results to
test-results.appspot.com

R=dtu@chromium.org
BUG=323212

Review URL: https://codereview.chromium.org/407243004

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@284871 0039d316-1c4b-4281-b951-d872f2087c98
parent 5428e443
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import time
import unittest
# TODO(dpranke): This code is largely cloned from, and redundant with,
# src/mojo/tools/run_mojo_python_tests.py, and also duplicates logic
# in test-webkitpy and run-webkit-tests. We should consolidate the
# python TestResult parsing/converting/uploading code as much as possible.
def AddOptions(parser):
parser.add_option('--metadata', action='append', default=[],
help=('optional key=value metadata that will be stored '
'in the results files (can be used for revision '
'numbers, etc.)'))
parser.add_option('--write-full-results-to', metavar='FILENAME',
action='store',
help='path to write the list of full results to.')
def ValidateArgs(parser, args):
for val in args.metadata:
if '=' not in val:
parser.error('Error: malformed metadata "%s"' % val)
def WriteandUploadResultsIfNecessary(args, test_suite, result):
if not args.write_full_results_to:
return
full_results = _FullResults(test_suite, result, args.metadata)
with open(args.write_full_results_to, 'w') as fp:
json.dump(full_results, fp, indent=2)
fp.write("\n")
# TODO(dpranke): upload to test-results.appspot.com if requested as well.
TEST_SEPARATOR = '.'
def _FullResults(suite, result, metadata):
"""Convert the unittest results to the Chromium JSON test result format.
This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
"""
full_results = {}
full_results['interrupted'] = False
full_results['path_delimiter'] = TEST_SEPARATOR
full_results['version'] = 3
full_results['seconds_since_epoch'] = time.time()
for md in metadata:
key, val = md.split('=', 1)
full_results[key] = val
all_test_names = _AllTestNames(suite)
failed_test_names = _FailedTestNames(result)
full_results['num_failures_by_type'] = {
'Failure': len(failed_test_names),
'Pass': len(all_test_names) - len(failed_test_names),
}
full_results['tests'] = {}
for test_name in all_test_names:
value = {
'expected': 'PASS',
'actual': 'FAIL' if (test_name in failed_test_names) else 'PASS',
}
_AddPathToTrie(full_results['tests'], test_name, value)
return full_results
def _AllTestNames(suite):
test_names = []
# _tests is protected pylint: disable=W0212
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
test_names.extend(_AllTestNames(test))
else:
test_names.append(test.id())
return test_names
def _FailedTestNames(result):
return set(test.id() for test, _ in result.failures + result.errors)
def _AddPathToTrie(trie, path, value):
if TEST_SEPARATOR not in path:
trie[path] = value
return
directory, rest = path.split(TEST_SEPARATOR, 1)
if directory not in trie:
trie[directory] = {}
_AddPathToTrie(trie[directory], rest, value)
...@@ -10,6 +10,7 @@ from telemetry.core import browser_finder ...@@ -10,6 +10,7 @@ from telemetry.core import browser_finder
from telemetry.core import browser_options from telemetry.core import browser_options
from telemetry.core import command_line from telemetry.core import command_line
from telemetry.core import discover from telemetry.core import discover
from telemetry.unittest import json_results
from telemetry.unittest import output_formatter from telemetry.unittest import output_formatter
...@@ -125,6 +126,7 @@ class RunTestsCommand(command_line.OptparseCommand): ...@@ -125,6 +126,7 @@ class RunTestsCommand(command_line.OptparseCommand):
dest='run_disabled_tests', dest='run_disabled_tests',
action='store_true', default=False, action='store_true', default=False,
help='Ignore @Disabled and @Enabled restrictions.') help='Ignore @Disabled and @Enabled restrictions.')
json_results.AddOptions(parser)
@classmethod @classmethod
def ProcessCommandLineArgs(cls, parser, args): def ProcessCommandLineArgs(cls, parser, args):
...@@ -141,6 +143,8 @@ class RunTestsCommand(command_line.OptparseCommand): ...@@ -141,6 +143,8 @@ class RunTestsCommand(command_line.OptparseCommand):
'Re-run with --browser=list to see ' 'Re-run with --browser=list to see '
'available browser types.' % args.browser_type) 'available browser types.' % args.browser_type)
json_results.ValidateArgs(parser, args)
def Run(self, args): def Run(self, args):
possible_browser = browser_finder.FindBrowser(args) possible_browser = browser_finder.FindBrowser(args)
test_suite = DiscoverTests( test_suite = DiscoverTests(
...@@ -149,6 +153,9 @@ class RunTestsCommand(command_line.OptparseCommand): ...@@ -149,6 +153,9 @@ class RunTestsCommand(command_line.OptparseCommand):
runner = output_formatter.TestRunner() runner = output_formatter.TestRunner()
result = runner.run( result = runner.run(
test_suite, config.output_formatters, args.repeat_count, args) test_suite, config.output_formatters, args.repeat_count, args)
json_results.WriteandUploadResultsIfNecessary(args, test_suite, result)
return len(result.failures_and_errors) return len(result.failures_and_errors)
@classmethod @classmethod
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment