Commit 4d6dc631 authored by Jamie Madill's avatar Jamie Madill Committed by Commit Bot

Support JSON test results format in Android runner.

The standalone ANGLE GTest-based tests run as isolated scripts.
This change adds support to the Android test_runner.py to store the
result in the "base_test_result" format then convert these base test
results back to the JSON test result format.

Bug: 931731
Bug: angleproject:3162
Change-Id: I4d05902db0ce60c259d7127776eafc5d7864b43e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2407181
Commit-Queue: Jamie Madill <jmadill@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@google.com>
Reviewed-by: default avatarAndrew Grieve <agrieve@chromium.org>
Cr-Commit-Position: refs/heads/master@{#806735}
parent 6cc12ccb
......@@ -3,6 +3,7 @@
# found in the LICENSE file.
import HTMLParser
import json
import logging
import os
import re
......@@ -254,6 +255,29 @@ def ParseGTestXML(xml_content):
return results
def ParseGTestJSON(json_content):
"""Parse results in the JSON Test Results format."""
results = []
if not json_content:
return results
json_data = json.loads(json_content)
openstack = json_data['tests'].items()
while openstack:
name, value = openstack.pop()
if 'expected' in value and 'actual' in value:
result_type = base_test_result.ResultType.PASS if value[
'actual'] == 'PASS' else base_test_result.ResultType.FAIL
results.append(base_test_result.BaseTestResult(name, result_type))
else:
openstack += [("%s.%s" % (name, k), v) for k, v in value.iteritems()]
return results
def TestNameWithoutDisabledPrefix(test_name):
"""Modify the test name without disabled prefix if prefix 'DISABLED_' or
'FLAKY_' presents.
......@@ -281,6 +305,7 @@ class GtestTestInstance(test_instance.TestInstance):
self._extract_test_list_from_filter = args.extract_test_list_from_filter
self._filter_tests_lock = threading.Lock()
self._gs_test_artifacts_bucket = args.gs_test_artifacts_bucket
self._isolated_script_test_output = args.isolated_script_test_output
self._isolated_script_test_perf_output = (
args.isolated_script_test_perf_output)
self._shard_timeout = args.shard_timeout
......@@ -427,6 +452,10 @@ class GtestTestInstance(test_instance.TestInstance):
def gtest_filter(self):
return self._gtest_filter
@property
def isolated_script_test_output(self):
return self._isolated_script_test_output
@property
def isolated_script_test_perf_output(self):
return self._isolated_script_test_perf_output
......
......@@ -216,6 +216,48 @@ class GtestTestInstanceTests(unittest.TestCase):
actual = gtest_test_instance.ParseGTestXML(None)
self.assertEquals([], actual)
def testParseGTestJSON_none(self):
actual = gtest_test_instance.ParseGTestJSON(None)
self.assertEquals([], actual)
def testParseGTestJSON_example(self):
raw_json = """
{
"tests": {
"mojom_tests": {
"parse": {
"ast_unittest": {
"ASTTest": {
"testNodeBase": {
"expected": "PASS",
"actual": "PASS",
"artifacts": {
"screenshot": ["screenshots/page.png"]
}
}
}
}
}
}
},
"interrupted": false,
"path_delimiter": ".",
"version": 3,
"seconds_since_epoch": 1406662283.764424,
"num_failures_by_type": {
"FAIL": 0,
"PASS": 1
},
"artifact_types": {
"screenshot": "image/png"
}
}"""
actual = gtest_test_instance.ParseGTestJSON(raw_json)
self.assertEquals(1, len(actual))
self.assertEquals('mojom_tests.parse.ast_unittest.ASTTest.testNodeBase',
actual[0].GetName())
self.assertEquals(base_test_result.ResultType.PASS, actual[0].GetType())
def testTestNameWithoutDisabledPrefix_disabled(self):
test_name_list = [
'A.DISABLED_B',
......
......@@ -621,10 +621,15 @@ class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
tombstones.ClearAllTombstones(device)
test_perf_output_filename = next(self._test_perf_output_filenames)
if self._test_instance.isolated_script_test_output:
suffix = '.json'
else:
suffix = '.xml'
with device_temp_file.DeviceTempFile(
adb=device.adb,
dir=self._delegate.ResultsDirectory(device),
suffix='.xml') as device_tmp_results_file:
suffix=suffix) as device_tmp_results_file:
with contextlib_ext.Optional(
device_temp_file.NamedDeviceTemporaryDirectory(
adb=device.adb, dir='/sdcard/'),
......@@ -641,9 +646,13 @@ class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
if self._test_instance.gs_test_artifacts_bucket:
flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name)
if self._test_instance.isolated_script_test_output:
flags.append('--isolated-script-test-output=%s' %
device_tmp_results_file.name)
if test_perf_output_filename:
flags.append('--isolated_script_test_perf_output=%s'
% isolated_script_test_perf_output.name)
flags.append('--isolated-script-test-perf-output=%s' %
isolated_script_test_perf_output.name)
logging.info('flags:')
for f in flags:
......@@ -658,24 +667,27 @@ class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
if self._test_instance.enable_xml_result_parsing:
try:
gtest_xml = device.ReadFile(
device_tmp_results_file.name,
as_root=True)
except device_errors.CommandFailedError as e:
logging.warning(
'Failed to pull gtest results XML file %s: %s',
device_tmp_results_file.name,
str(e))
gtest_xml = device.ReadFile(device_tmp_results_file.name)
except device_errors.CommandFailedError:
logging.exception('Failed to pull gtest results XML file %s',
device_tmp_results_file.name)
gtest_xml = None
if self._test_instance.isolated_script_test_output:
try:
gtest_json = device.ReadFile(device_tmp_results_file.name)
except device_errors.CommandFailedError:
logging.exception('Failed to pull gtest results JSON file %s',
device_tmp_results_file.name)
gtest_json = None
if test_perf_output_filename:
try:
device.PullFile(isolated_script_test_perf_output.name,
test_perf_output_filename)
except device_errors.CommandFailedError as e:
logging.warning(
'Failed to pull chartjson results %s: %s',
isolated_script_test_perf_output.name, str(e))
except device_errors.CommandFailedError:
logging.exception('Failed to pull chartjson results %s',
isolated_script_test_perf_output.name)
test_artifacts_url = self._UploadTestArtifacts(device,
test_artifacts_dir)
......@@ -695,6 +707,8 @@ class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
# TODO(jbudorick): Transition test scripts away from parsing stdout.
if self._test_instance.enable_xml_result_parsing:
results = gtest_test_instance.ParseGTestXML(gtest_xml)
elif self._test_instance.isolated_script_test_output:
results = gtest_test_instance.ParseGTestJSON(gtest_json)
else:
results = gtest_test_instance.ParseGTestOutput(
output, self._test_instance.symbolizer, device.product_cpu_abi)
......
......@@ -6,6 +6,7 @@ import collections
import itertools
import json
import logging
import time
from pylib.base import base_test_result
......@@ -111,6 +112,58 @@ def GenerateResultsDict(test_run_results, global_tags=None):
}
def GenerateJsonTestResultFormatDict(test_run_results):
"""Create a results dict from |test_run_results| suitable for writing to JSON.
Args:
test_run_results: a list of base_test_result.TestRunResults objects.
Returns:
A results dict that mirrors the standard JSON Test Results Format.
"""
tests = {}
pass_count = 0
fail_count = 0
for test_run_result in test_run_results:
if isinstance(test_run_result, list):
results_iterable = itertools.chain(*(t.GetAll() for t in test_run_result))
else:
results_iterable = test_run_result.GetAll()
for r in results_iterable:
element = tests
for key in r.GetName().split('.'):
if key not in element:
element[key] = {}
element = element[key]
element['expected'] = 'PASS'
if r.GetType() == base_test_result.ResultType.PASS:
element['actual'] = 'PASS'
pass_count += 1
else:
element['actual'] = 'FAIL'
fail_count += 1
if r.GetDuration() != 0:
element['time'] = r.GetDuration()
# Fill in required fields.
return {
'interrupted': False,
'num_failures_by_type': {
'FAIL': fail_count,
'PASS': pass_count,
},
'path_delimiter': '.',
'seconds_since_epoch': time.time(),
'tests': tests,
'version': 3,
}
def GenerateJsonResultsFile(test_run_result, file_path, global_tags=None,
**kwargs):
"""Write |test_run_result| to JSON.
......@@ -129,6 +182,21 @@ def GenerateJsonResultsFile(test_run_result, file_path, global_tags=None,
logging.info('Generated json results file at %s', file_path)
def GenerateJsonTestResultFormatFile(test_run_result, file_path, **kwargs):
"""Write |test_run_result| to JSON.
This uses the official Chromium Test Results Format.
Args:
test_run_result: a base_test_result.TestRunResults object.
file_path: The path to the JSON file to write.
"""
with open(file_path, 'w') as json_result_file:
json_result_file.write(
json.dumps(GenerateJsonTestResultFormatDict(test_run_result), **kwargs))
logging.info('Generated json results file at %s', file_path)
def ParseResultsFromJson(json_results):
"""Creates a list of BaseTestResult objects from JSON.
......
......@@ -202,6 +202,40 @@ class JsonResultsTest(unittest.TestCase):
self.assertTrue('output_snippet_base64' in test_iteration_result)
self.assertEquals('', test_iteration_result['output_snippet_base64'])
def testGenerateJsonTestResultFormatDict_passedResult(self):
result = base_test_result.BaseTestResult('test.package.TestName',
base_test_result.ResultType.PASS)
all_results = base_test_result.TestRunResults()
all_results.AddResult(result)
results_dict = json_results.GenerateJsonTestResultFormatDict([all_results])
self.assertEquals(1, len(results_dict['tests']))
self.assertEquals(1, len(results_dict['tests']['test']))
self.assertEquals(1, len(results_dict['tests']['test']['package']))
self.assertEquals(
'PASS',
results_dict['tests']['test']['package']['TestName']['expected'])
self.assertEquals(
'PASS', results_dict['tests']['test']['package']['TestName']['actual'])
def testGenerateJsonTestResultFormatDict_failedResult(self):
result = base_test_result.BaseTestResult('test.package.TestName',
base_test_result.ResultType.FAIL)
all_results = base_test_result.TestRunResults()
all_results.AddResult(result)
results_dict = json_results.GenerateJsonTestResultFormatDict([all_results])
self.assertEquals(1, len(results_dict['tests']))
self.assertEquals(1, len(results_dict['tests']['test']))
self.assertEquals(1, len(results_dict['tests']['test']['package']))
self.assertEquals(
'PASS',
results_dict['tests']['test']['package']['TestName']['expected'])
self.assertEquals(
'FAIL', results_dict['tests']['test']['package']['TestName']['actual'])
if __name__ == '__main__':
unittest.main(verbosity=2)
......@@ -227,6 +227,12 @@ def AddCommonOptions(parser):
dest='run_disabled', action='store_true',
help='Also run disabled tests if applicable.')
# This is currently only implemented for gtests.
parser.add_argument('--isolated-script-test-output',
help='If present, store test results on this path.')
parser.add_argument('--isolated-script-test-perf-output',
help='If present, store chartjson results on this path.')
AddTestLauncherOptions(parser)
......@@ -348,9 +354,6 @@ def AddGTestOptions(parser):
'--app-data-file-dir',
help='Host directory to which app data files will be'
' saved. Used with --app-data-file.')
parser.add_argument(
'--isolated-script-test-perf-output',
help='If present, store chartjson results on this path.')
parser.add_argument(
'--delete-stale-data',
dest='delete_stale_data', action='store_true',
......@@ -835,6 +838,8 @@ def RunTestsInPlatformMode(args, result_sink_client=None):
finally:
if args.json_results_file and os.path.exists(json_file.name):
shutil.move(json_file.name, args.json_results_file)
elif args.isolated_script_test_output and os.path.exists(json_file.name):
shutil.move(json_file.name, args.isolated_script_test_output)
else:
os.remove(json_file.name)
......@@ -846,10 +851,16 @@ def RunTestsInPlatformMode(args, result_sink_client=None):
global_results_tags.add('UNRELIABLE_RESULTS')
raise
finally:
json_results.GenerateJsonResultsFile(
all_raw_results, json_file.name,
global_tags=list(global_results_tags),
indent=2)
if args.isolated_script_test_output:
json_results.GenerateJsonTestResultFormatFile(all_raw_results,
json_file.name,
indent=2)
else:
json_results.GenerateJsonResultsFile(
all_raw_results,
json_file.name,
global_tags=list(global_results_tags),
indent=2)
@contextlib.contextmanager
def upload_logcats_file():
......@@ -953,7 +964,8 @@ def RunTestsInPlatformMode(args, result_sink_client=None):
str(tot_tests),
str(iteration_count))
if args.local_output or not local_utils.IsOnSwarming():
if (args.local_output or not local_utils.IsOnSwarming()
) and not args.isolated_script_test_output:
with out_manager.ArchivedTempfile(
'test_results_presentation.html',
'test_results_presentation',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment