Commit 71b865f4 authored by Stephen Martinis's avatar Stephen Martinis Committed by Commit Bot

gtest perf tests: Output well formed test results json

This CL changes the run_gtest_perf_test.py script to output well formed
test results json.

Bug: 855234
Change-Id: Ia5ce6e258865e34a2aa37635c014cd54efa62796
Reviewed-on: https://chromium-review.googlesource.com/1112635
Commit-Queue: Stephen Martinis <martiniss@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Cr-Commit-Position: refs/heads/master@{#570943}
parent 62cc354d
...@@ -32,6 +32,7 @@ import json ...@@ -32,6 +32,7 @@ import json
import os import os
import shutil import shutil
import sys import sys
import time
import tempfile import tempfile
import traceback import traceback
...@@ -108,12 +109,16 @@ def execute_perf_test(args, rest_args): ...@@ -108,12 +109,16 @@ def execute_perf_test(args, rest_args):
env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
rc = 0 rc = 0
start_time = time.time()
with common.temporary_file() as results_path:
try: try:
executable = rest_args[0] executable = rest_args[0]
extra_flags = [] extra_flags = []
if len(rest_args) > 1: if len(rest_args) > 1:
extra_flags = rest_args[1:] extra_flags = rest_args[1:]
extra_flags.append('--test-launcher-summary-output=%s' % results_path)
# These flags are to make sure that test output perf metrics in the log. # These flags are to make sure that test output perf metrics in the log.
if not '--verbose' in extra_flags: if not '--verbose' in extra_flags:
extra_flags.append('--verbose') extra_flags.append('--verbose')
...@@ -147,14 +152,36 @@ def execute_perf_test(args, rest_args): ...@@ -147,14 +152,36 @@ def execute_perf_test(args, rest_args):
traceback.print_exc() traceback.print_exc()
rc = 1 rc = 1
valid = (rc == 0) with open(results_path) as f:
failures = [] if valid else ['(entire test suite)'] test_results = common.get_gtest_summary_passes(json.load(f))
output_json = { output_json = {
'valid': valid, 'version': 3,
'failures': failures, 'interrupted': False,
'path_delimiter': '/',
'seconds_since_epoch': start_time,
'num_failures_by_type': {
'PASS': sum(1 for success in test_results.values() if success),
'FAIL': sum(1 for success in test_results.values() if not success),
},
'tests': {
test: test_result_entry(success) for (
test, success) in test_results.items()
} }
}
return rc, charts, output_json return rc, charts, output_json
def test_result_entry(success):
test = {
'expected': 'PASS',
'actual': 'PASS' if success else 'FAIL',
}
if not success:
test['unexpected'] = True
return test
# This is not really a "script test" so does not need to manually add # This is not really a "script test" so does not need to manually add
# any additional compile targets. # any additional compile targets.
def main_compile_targets(args): def main_compile_targets(args):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment