Commit 71b865f4 authored by Stephen Martinis's avatar Stephen Martinis Committed by Commit Bot

gtest perf tests: Output well formed test results json

This CL changes the run_gtest_perf_test.py script to output well formed
test results json.

Bug: 855234
Change-Id: Ia5ce6e258865e34a2aa37635c014cd54efa62796
Reviewed-on: https://chromium-review.googlesource.com/1112635
Commit-Queue: Stephen Martinis <martiniss@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Cr-Commit-Position: refs/heads/master@{#570943}
parent 62cc354d
...@@ -32,6 +32,7 @@ import json ...@@ -32,6 +32,7 @@ import json
import os import os
import shutil import shutil
import sys import sys
import time
import tempfile import tempfile
import traceback import traceback
...@@ -108,53 +109,79 @@ def execute_perf_test(args, rest_args): ...@@ -108,53 +109,79 @@ def execute_perf_test(args, rest_args):
env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
rc = 0 rc = 0
try: start_time = time.time()
executable = rest_args[0] with common.temporary_file() as results_path:
extra_flags = [] try:
if len(rest_args) > 1: executable = rest_args[0]
extra_flags = rest_args[1:] extra_flags = []
if len(rest_args) > 1:
# These flags are to make sure that test output perf metrics in the log. extra_flags = rest_args[1:]
if not '--verbose' in extra_flags:
extra_flags.append('--verbose') extra_flags.append('--test-launcher-summary-output=%s' % results_path)
if not '--test-launcher-print-test-stdio=always' in extra_flags:
extra_flags.append('--test-launcher-print-test-stdio=always') # These flags are to make sure that test output perf metrics in the log.
if args.isolated_script_test_filter: if not '--verbose' in extra_flags:
filter_list = common.extract_filter_list( extra_flags.append('--verbose')
args.isolated_script_test_filter) if not '--test-launcher-print-test-stdio=always' in extra_flags:
extra_flags.append('--gtest_filter=' + ':'.join(filter_list)) extra_flags.append('--test-launcher-print-test-stdio=always')
if args.isolated_script_test_filter:
if IsWindows(): filter_list = common.extract_filter_list(
executable = '.\%s.exe' % executable args.isolated_script_test_filter)
else: extra_flags.append('--gtest_filter=' + ':'.join(filter_list))
executable = './%s' % executable
with common.temporary_file() as tempfile_path: if IsWindows():
env['CHROME_HEADLESS'] = '1' executable = '.\%s.exe' % executable
cmd = [executable] + extra_flags
if args.xvfb:
rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
else: else:
rc = test_env.run_command_with_output(cmd, env=env, executable = './%s' % executable
stdoutfile=tempfile_path) with common.temporary_file() as tempfile_path:
env['CHROME_HEADLESS'] = '1'
# Now get the correct json format from the stdout to write to the perf cmd = [executable] + extra_flags
# results file
results_processor = ( if args.xvfb:
generate_legacy_perf_dashboard_json.LegacyResultsProcessor()) rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
charts = results_processor.GenerateJsonResults(tempfile_path) else:
except Exception: rc = test_env.run_command_with_output(cmd, env=env,
traceback.print_exc() stdoutfile=tempfile_path)
rc = 1
# Now get the correct json format from the stdout to write to the perf
valid = (rc == 0) # results file
failures = [] if valid else ['(entire test suite)'] results_processor = (
generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
charts = results_processor.GenerateJsonResults(tempfile_path)
except Exception:
traceback.print_exc()
rc = 1
with open(results_path) as f:
test_results = common.get_gtest_summary_passes(json.load(f))
output_json = { output_json = {
'valid': valid, 'version': 3,
'failures': failures, 'interrupted': False,
'path_delimiter': '/',
'seconds_since_epoch': start_time,
'num_failures_by_type': {
'PASS': sum(1 for success in test_results.values() if success),
'FAIL': sum(1 for success in test_results.values() if not success),
},
'tests': {
test: test_result_entry(success) for (
test, success) in test_results.items()
} }
}
return rc, charts, output_json return rc, charts, output_json
def test_result_entry(success):
test = {
'expected': 'PASS',
'actual': 'PASS' if success else 'FAIL',
}
if not success:
test['unexpected'] = True
return test
# This is not really a "script test" so does not need to manually add # This is not really a "script test" so does not need to manually add
# any additional compile targets. # any additional compile targets.
def main_compile_targets(args): def main_compile_targets(args):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment