Commit f1240efe authored by Emily Hanley's avatar Emily Hanley Committed by Commit Bot

Adding some timing info to process perf results.

We need to analyze the bottlenecks in the upload to the dashboard.

Bug: 713357
Change-Id: If763ac8689798158c7e7bcc6587646737894fb81
Reviewed-on: https://chromium-review.googlesource.com/1104302Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Commit-Queue: Emily Hanley <eyaich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#568032}
parent f7a90b89
...@@ -9,6 +9,7 @@ import os ...@@ -9,6 +9,7 @@ import os
import shutil import shutil
import sys import sys
import tempfile import tempfile
import time
import uuid import uuid
from core import oauth_api from core import oauth_api
...@@ -104,6 +105,7 @@ def _merge_json_output(output_json, jsons_to_merge, extra_links): ...@@ -104,6 +105,7 @@ def _merge_json_output(output_json, jsons_to_merge, extra_links):
extra_links: a (key, value) map in which keys are the human-readable strings extra_links: a (key, value) map in which keys are the human-readable strings
which describe the data, and value is logdog url that contain the data. which describe the data, and value is logdog url that contain the data.
""" """
begin_time = time.time()
merged_results = results_merger.merge_test_results(jsons_to_merge) merged_results = results_merger.merge_test_results(jsons_to_merge)
# Only append the perf results links if present # Only append the perf results links if present
...@@ -113,11 +115,14 @@ def _merge_json_output(output_json, jsons_to_merge, extra_links): ...@@ -113,11 +115,14 @@ def _merge_json_output(output_json, jsons_to_merge, extra_links):
with open(output_json, 'w') as f: with open(output_json, 'w') as f:
json.dump(merged_results, f) json.dump(merged_results, f)
end_time = time.time()
print_duration('Merging json test results', begin_time, end_time)
return 0 return 0
def _handle_perf_json_test_results( def _handle_perf_json_test_results(
benchmark_directory_map, test_results_list): benchmark_directory_map, test_results_list):
begin_time = time.time()
benchmark_enabled_map = {} benchmark_enabled_map = {}
for benchmark_name, directories in benchmark_directory_map.iteritems(): for benchmark_name, directories in benchmark_directory_map.iteritems():
for directory in directories: for directory in directories:
...@@ -148,6 +153,8 @@ def _handle_perf_json_test_results( ...@@ -148,6 +153,8 @@ def _handle_perf_json_test_results(
print 'Benchmark %s disabled' % benchmark_name print 'Benchmark %s disabled' % benchmark_name
benchmark_enabled_map[benchmark_name] = enabled benchmark_enabled_map[benchmark_name] = enabled
end_time = time.time()
print_duration('Analyzing perf json test results', begin_time, end_time)
return benchmark_enabled_map return benchmark_enabled_map
...@@ -157,7 +164,7 @@ def _generate_unique_logdog_filename(name_prefix): ...@@ -157,7 +164,7 @@ def _generate_unique_logdog_filename(name_prefix):
def _handle_perf_logs(benchmark_directory_map, extra_links): def _handle_perf_logs(benchmark_directory_map, extra_links):
""" Upload benchmark logs to logdog and add a page entry for them. """ """ Upload benchmark logs to logdog and add a page entry for them. """
begin_time = time.time()
benchmark_logs_links = {} benchmark_logs_links = {}
for benchmark_name, directories in benchmark_directory_map.iteritems(): for benchmark_name, directories in benchmark_directory_map.iteritems():
...@@ -177,9 +184,12 @@ def _handle_perf_logs(benchmark_directory_map, extra_links): ...@@ -177,9 +184,12 @@ def _handle_perf_logs(benchmark_directory_map, extra_links):
indent=4, separators=(',', ': ')), indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE) content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks logs'] = logdog_stream extra_links['Benchmarks logs'] = logdog_stream
end_time = time.time()
print_duration('Generating perf log streams', begin_time, end_time)
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links): def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
begin_time = time.time()
with open(benchmarks_shard_map_file) as f: with open(benchmarks_shard_map_file) as f:
benchmarks_shard_data = json.load(f) benchmarks_shard_data = json.load(f)
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map') logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
...@@ -188,12 +198,13 @@ def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links): ...@@ -188,12 +198,13 @@ def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
indent=4, separators=(',', ': ')), indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE) content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks shard map'] = logdog_stream extra_links['Benchmarks shard map'] = logdog_stream
end_time = time.time()
print_duration('Generating benchmark shard map stream', begin_time, end_time)
def _get_benchmark_name(directory): def _get_benchmark_name(directory):
return basename(directory).replace(" benchmark", "") return basename(directory).replace(" benchmark", "")
def _process_perf_results(output_json, configuration_name, def _process_perf_results(output_json, configuration_name,
service_account_file, service_account_file,
build_properties, task_output_dir, build_properties, task_output_dir,
...@@ -210,6 +221,7 @@ def _process_perf_results(output_json, configuration_name, ...@@ -210,6 +221,7 @@ def _process_perf_results(output_json, configuration_name,
or dashboard json format and an output.json file containing the json test or dashboard json format and an output.json file containing the json test
results for the benchmark. results for the benchmark.
""" """
begin_time = time.time()
return_code = 0 return_code = 0
directory_list = [ directory_list = [
f for f in listdir(task_output_dir) f for f in listdir(task_output_dir)
...@@ -266,6 +278,8 @@ def _process_perf_results(output_json, configuration_name, ...@@ -266,6 +278,8 @@ def _process_perf_results(output_json, configuration_name,
# Finally, merge all test results json, add the extra links and write out to # Finally, merge all test results json, add the extra links and write out to
# output location # output location
_merge_json_output(output_json, test_results_list, extra_links) _merge_json_output(output_json, test_results_list, extra_links)
end_time = time.time()
print_duration('Total process_perf_results', begin_time, end_time)
return return_code return return_code
def _merge_chartjson_results(chartjson_dicts): def _merge_chartjson_results(chartjson_dicts):
...@@ -315,6 +329,7 @@ def _handle_perf_results( ...@@ -315,6 +329,7 @@ def _handle_perf_results(
Returns: Returns:
0 if this upload to perf dashboard succesfully, 1 otherwise. 0 if this upload to perf dashboard succesfully, 1 otherwise.
""" """
begin_time = time.time()
tmpfile_dir = tempfile.mkdtemp('resultscache') tmpfile_dir = tempfile.mkdtemp('resultscache')
try: try:
# Upload all eligible benchmarks to the perf dashboard # Upload all eligible benchmarks to the perf dashboard
...@@ -360,6 +375,8 @@ def _handle_perf_results( ...@@ -360,6 +375,8 @@ def _handle_perf_results(
return 0 return 0
finally: finally:
shutil.rmtree(tmpfile_dir) shutil.rmtree(tmpfile_dir)
end_time = time.time()
print_duration('Uploading results to perf dashboard', begin_time, end_time)
def _upload_and_write_perf_data_to_logfile(benchmark_name, results_file, def _upload_and_write_perf_data_to_logfile(benchmark_name, results_file,
...@@ -403,6 +420,8 @@ def _upload_and_write_perf_data_to_logfile(benchmark_name, results_file, ...@@ -403,6 +420,8 @@ def _upload_and_write_perf_data_to_logfile(benchmark_name, results_file,
return upload_failure return upload_failure
def print_duration(step, start, end):
print 'Duration of %s: %d seconds' % (step, end-start)
def main(): def main():
""" See collect_task.collect_task for more on the merge script API. """ """ See collect_task.collect_task for more on the merge script API. """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment