Commit ed401edb authored by bsheedy's avatar bsheedy Committed by Commit Bot

Update VR latency summary format

Updates the VR motion-to-photon latency test's output.json format to
match the newer version used by Telemetry and other script tests. This
is necessary because the chromium_tests recipe was recently updated to
assume that a test is disabled if the results don't include the "tests"
key, which is causing results to not be uploaded to the dashboard.

Bug: 
Change-Id: I15102724d3a688bd80b87e583c85dd159cc9feba
Reviewed-on: https://chromium-review.googlesource.com/662879Reviewed-by: default avatarChristopher Grant <cjgrant@chromium.org>
Commit-Queue: Brian Sheedy <bsheedy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#501345}
parent af1b4342
...@@ -102,6 +102,7 @@ class WebVrLatencyTest(object): ...@@ -102,6 +102,7 @@ class WebVrLatencyTest(object):
self._test_urls = args.urls or DEFAULT_URLS self._test_urls = args.urls or DEFAULT_URLS
assert (self._num_samples > 0),'Number of samples must be greater than 0' assert (self._num_samples > 0),'Number of samples must be greater than 0'
self._test_results = {} self._test_results = {}
self._test_name = 'vr_perf.motopho_latency'
# Connect to the Arduino that drives the servos. # Connect to the Arduino that drives the servos.
devices = GetTtyDevices(r'ttyACM\d+', [0x2a03, 0x2341]) devices = GetTtyDevices(r'ttyACM\d+', [0x2a03, 0x2341])
...@@ -144,6 +145,7 @@ class WebVrLatencyTest(object): ...@@ -144,6 +145,7 @@ class WebVrLatencyTest(object):
if motopho_thread.failed_iteration: if motopho_thread.failed_iteration:
num_retries += 1 num_retries += 1
if num_retries > MOTOPHO_THREAD_RETRIES: if num_retries > MOTOPHO_THREAD_RETRIES:
self._ReportSummaryResult(False, url)
raise RuntimeError( raise RuntimeError(
'Motopho thread failed more than %d times, aborting' % ( 'Motopho thread failed more than %d times, aborting' % (
MOTOPHO_THREAD_RETRIES)) MOTOPHO_THREAD_RETRIES))
...@@ -151,6 +153,7 @@ class WebVrLatencyTest(object): ...@@ -151,6 +153,7 @@ class WebVrLatencyTest(object):
else: else:
samples_obtained += 1 samples_obtained += 1
time.sleep(1) time.sleep(1)
self._ReportSummaryResult(True, url)
self._StoreResults(motopho_thread.latencies, motopho_thread.correlations, self._StoreResults(motopho_thread.latencies, motopho_thread.correlations,
url) url)
# Leaving old threads around shouldn't cause issues, but clean up just in # Leaving old threads around shouldn't cause issues, but clean up just in
...@@ -160,6 +163,19 @@ class WebVrLatencyTest(object): ...@@ -160,6 +163,19 @@ class WebVrLatencyTest(object):
if motopho_thread.isAlive(): if motopho_thread.isAlive():
logging.warning('Motopho thread failed to terminate.') logging.warning('Motopho thread failed to terminate.')
def _ReportSummaryResult(self, passed, url):
"""Stores pass/fail results for the summary output JSON file.
Args:
passed: Boolean, whether the test passed or not
url: The URL that was being tested
"""
self._results_summary[url] = {
'actual': 'PASS' if passed else 'FAIL',
'expected': 'PASS',
'is_unexpected': not passed,
}
def _StoreResults(self, latencies, correlations, url): def _StoreResults(self, latencies, correlations, url):
"""Temporarily stores the results of a test. """Temporarily stores the results of a test.
...@@ -246,7 +262,7 @@ class WebVrLatencyTest(object): ...@@ -246,7 +262,7 @@ class WebVrLatencyTest(object):
results = { results = {
'format_version': '1.0', 'format_version': '1.0',
'benchmark_name': 'vr_perf.motopho_latency', 'benchmark_name': self._test_name,
'benchmark_description': 'Measures the motion-to-photon latency of WebVR', 'benchmark_description': 'Measures the motion-to-photon latency of WebVR',
'charts': charts, 'charts': charts,
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
import json import json
import logging import logging
import subprocess import subprocess
import time
class VrPerfTest(object): class VrPerfTest(object):
"""Base class for all non-Telemetry VR perf tests. """Base class for all non-Telemetry VR perf tests.
...@@ -16,6 +17,8 @@ class VrPerfTest(object): ...@@ -16,6 +17,8 @@ class VrPerfTest(object):
super(VrPerfTest, self).__init__() super(VrPerfTest, self).__init__()
self._args = args self._args = args
self._test_urls = [] self._test_urls = []
self._results_summary = {}
self._test_name = ''
def RunTests(self): def RunTests(self):
"""Runs some test on all the URLs provided to the test on creation. """Runs some test on all the URLs provided to the test on creation.
...@@ -92,6 +95,24 @@ class VrPerfTest(object): ...@@ -92,6 +95,24 @@ class VrPerfTest(object):
self._args.isolated_script_test_output): self._args.isolated_script_test_output):
logging.warning('Isolated script output file not specified, not saving') logging.warning('Isolated script output file not specified, not saving')
return return
results_summary = {
'interrupted': False,
'path_delimiter': '/',
'seconds_since_epoch': time.time(),
'version': 3,
'tests': {
self._test_name: self._results_summary
}
}
failure_counts = {}
for _, results in self._results_summary.iteritems():
if results['actual'] in failure_counts:
failure_counts[results['actual']] += 1
else:
failure_counts[results['actual']] = 1
results_summary['num_failures_by_type'] = failure_counts
with file(self._args.isolated_script_test_output, 'w') as outfile: with file(self._args.isolated_script_test_output, 'w') as outfile:
# TODO(bsheedy): Actually check failures/validity json.dump(results_summary, outfile)
json.dump({'failures': [], 'valid': True}, outfile)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment