Commit 67e9e613 authored by behdad's avatar behdad Committed by Commit Bot

Output paths are defined for rerun separately

The output path to test_results should be separate for rerun and
non-rerun case.

Bug: chromium:1088104
Change-Id: I54b6ba26254b37af3c9df0f44ec9d43ea140e52f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2224819Reviewed-by: default avatarSadrul Chowdhury <sadrul@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Commit-Queue: Behdad Bakhshinategh <behdadb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#773741}
parent 776a75bf
...@@ -124,12 +124,19 @@ class RenderingRepresentativePerfTest(object): ...@@ -124,12 +124,19 @@ class RenderingRepresentativePerfTest(object):
self.benchmark = self.options.benchmarks self.benchmark = self.options.benchmarks
out_dir_path = os.path.dirname(self.options.isolated_script_test_output) out_dir_path = os.path.dirname(self.options.isolated_script_test_output)
self.output_path = os.path.join(
out_dir_path, self.benchmark, 'test_results.json')
self.results_path = os.path.join(
out_dir_path, self.benchmark, 'perf_results.csv')
re_run_output_dir = os.path.join(out_dir_path, 're_run_failures') re_run_output_dir = os.path.join(out_dir_path, 're_run_failures')
self.output_path = {
True: os.path.join(
re_run_output_dir, self.benchmark, 'test_results.json'),
False: os.path.join(out_dir_path, self.benchmark, 'test_results.json')
}
self.results_path = {
True: os.path.join(
re_run_output_dir, self.benchmark, 'perf_results.csv'),
False: os.path.join(out_dir_path, self.benchmark, 'perf_results.csv')
}
re_run_test_output = os.path.join(re_run_output_dir, re_run_test_output = os.path.join(re_run_output_dir,
os.path.basename(self.options.isolated_script_test_output)) os.path.basename(self.options.isolated_script_test_output))
re_run_test_perf_output = os.path.join(re_run_output_dir, re_run_test_perf_output = os.path.join(re_run_output_dir,
...@@ -236,11 +243,11 @@ class RenderingRepresentativePerfTest(object): ...@@ -236,11 +243,11 @@ class RenderingRepresentativePerfTest(object):
METRIC_NAME, measured_avg, upper_limit_avg)) METRIC_NAME, measured_avg, upper_limit_avg))
def interpret_run_benchmark_results(self, rerun=False): def interpret_run_benchmark_results(self, rerun=False):
with open(self.output_path, 'r+') as resultsFile: with open(self.output_path[rerun], 'r+') as resultsFile:
initialOut = json.load(resultsFile) initialOut = json.load(resultsFile)
self.result_recorder[rerun].set_tests(initialOut) self.result_recorder[rerun].set_tests(initialOut)
with open(self.results_path) as csv_file: with open(self.results_path[rerun]) as csv_file:
csv_obj = csv.DictReader(csv_file) csv_obj = csv.DictReader(csv_file)
values_per_story = self.parse_csv_results(csv_obj) values_per_story = self.parse_csv_results(csv_obj)
...@@ -283,7 +290,7 @@ class RenderingRepresentativePerfTest(object): ...@@ -283,7 +290,7 @@ class RenderingRepresentativePerfTest(object):
self.return_code self.return_code
) = self.result_recorder[False].get_output(self.return_code) ) = self.result_recorder[False].get_output(self.return_code)
with open(self.output_path, 'r+') as resultsFile: with open(self.output_path[rerun], 'r+') as resultsFile:
json.dump(finalOut, resultsFile, indent=4) json.dump(finalOut, resultsFile, indent=4)
with open(self.options.isolated_script_test_output, 'w') as outputFile: with open(self.options.isolated_script_test_output, 'w') as outputFile:
json.dump(finalOut, outputFile, indent=4) json.dump(finalOut, outputFile, indent=4)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment