Commit 9eeaba4e authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Revert "gtest perf tests: Output well formed test results json"

This reverts commit 71b865f4.

Reason for revert: break many perf tests (see https://ci.chromium.org/buildbot/chromium.perf/linux-perf/307)

Original change's description:
> gtest perf tests: Output well formed test results json
> 
> This CL changes the run_gtest_perf_test.py script to output well formed
> test results json.
> 
> Bug: 855234
> Change-Id: Ia5ce6e258865e34a2aa37635c014cd54efa62796
> Reviewed-on: https://chromium-review.googlesource.com/1112635
> Commit-Queue: Stephen Martinis <martiniss@chromium.org>
> Reviewed-by: Dirk Pranke <dpranke@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#570943}

TBR=dpranke@chromium.org,nednguyen@google.com,martiniss@chromium.org,eyaich@chromium.org

Change-Id: Ia0aaff923014438005ebc0a8677c3bbbebb808ac
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 855234
Reviewed-on: https://chromium-review.googlesource.com/1118238Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Cr-Commit-Position: refs/heads/master@{#571083}
parent 3fb17baa
...@@ -32,7 +32,6 @@ import json ...@@ -32,7 +32,6 @@ import json
import os import os
import shutil import shutil
import sys import sys
import time
import tempfile import tempfile
import traceback import traceback
...@@ -109,79 +108,53 @@ def execute_perf_test(args, rest_args): ...@@ -109,79 +108,53 @@ def execute_perf_test(args, rest_args):
env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
rc = 0 rc = 0
start_time = time.time() try:
with common.temporary_file() as results_path: executable = rest_args[0]
try: extra_flags = []
executable = rest_args[0] if len(rest_args) > 1:
extra_flags = [] extra_flags = rest_args[1:]
if len(rest_args) > 1:
extra_flags = rest_args[1:] # These flags are to make sure that test output perf metrics in the log.
if not '--verbose' in extra_flags:
extra_flags.append('--test-launcher-summary-output=%s' % results_path) extra_flags.append('--verbose')
if not '--test-launcher-print-test-stdio=always' in extra_flags:
# These flags are to make sure that test output perf metrics in the log. extra_flags.append('--test-launcher-print-test-stdio=always')
if not '--verbose' in extra_flags: if args.isolated_script_test_filter:
extra_flags.append('--verbose') filter_list = common.extract_filter_list(
if not '--test-launcher-print-test-stdio=always' in extra_flags: args.isolated_script_test_filter)
extra_flags.append('--test-launcher-print-test-stdio=always') extra_flags.append('--gtest_filter=' + ':'.join(filter_list))
if args.isolated_script_test_filter:
filter_list = common.extract_filter_list( if IsWindows():
args.isolated_script_test_filter) executable = '.\%s.exe' % executable
extra_flags.append('--gtest_filter=' + ':'.join(filter_list)) else:
executable = './%s' % executable
if IsWindows(): with common.temporary_file() as tempfile_path:
executable = '.\%s.exe' % executable env['CHROME_HEADLESS'] = '1'
cmd = [executable] + extra_flags
if args.xvfb:
rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
else: else:
executable = './%s' % executable rc = test_env.run_command_with_output(cmd, env=env,
with common.temporary_file() as tempfile_path: stdoutfile=tempfile_path)
env['CHROME_HEADLESS'] = '1'
cmd = [executable] + extra_flags # Now get the correct json format from the stdout to write to the perf
# results file
if args.xvfb: results_processor = (
rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path) generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
else: charts = results_processor.GenerateJsonResults(tempfile_path)
rc = test_env.run_command_with_output(cmd, env=env, except Exception:
stdoutfile=tempfile_path) traceback.print_exc()
rc = 1
# Now get the correct json format from the stdout to write to the perf
# results file valid = (rc == 0)
results_processor = ( failures = [] if valid else ['(entire test suite)']
generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
charts = results_processor.GenerateJsonResults(tempfile_path)
except Exception:
traceback.print_exc()
rc = 1
with open(results_path) as f:
test_results = common.get_gtest_summary_passes(json.load(f))
output_json = { output_json = {
'version': 3, 'valid': valid,
'interrupted': False, 'failures': failures,
'path_delimiter': '/',
'seconds_since_epoch': start_time,
'num_failures_by_type': {
'PASS': sum(1 for success in test_results.values() if success),
'FAIL': sum(1 for success in test_results.values() if not success),
},
'tests': {
test: test_result_entry(success) for (
test, success) in test_results.items()
} }
}
return rc, charts, output_json return rc, charts, output_json
def test_result_entry(success):
test = {
'expected': 'PASS',
'actual': 'PASS' if success else 'FAIL',
}
if not success:
test['unexpected'] = True
return test
# This is not really a "script test" so does not need to manually add # This is not really a "script test" so does not need to manually add
# any additional compile targets. # any additional compile targets.
def main_compile_targets(args): def main_compile_targets(args):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment