Commit 72e94f50 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Add ability to analyze benchmark's runtime for each perf builder.

Sample usage: ./tools/perf/core/retrieve_story_timing.py analyze-benchmark-runtime -c 'Android One Perf' -o benchmark_runtime_android_one.json

Cq-Include-Trybots: master.tryserver.chromium.perf:obbs_fyi
Change-Id: I9b7e42a1c8a74093c5dcb311f5ee11d5875a8946
Reviewed-on: https://chromium-review.googlesource.com/1160762
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#580768}
parent 195ff3d9
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# found in the LICENSE file. # found in the LICENSE file.
import argparse import argparse
import collections
import json import json
import subprocess import subprocess
import sys import sys
...@@ -25,7 +26,7 @@ ORDER BY ...@@ -25,7 +26,7 @@ ORDER BY
""" """
QUERY_LAST_RUNS = """ QUERY_STORY_AVG_RUNTIME = """
SELECT SELECT
name, name,
ROUND(AVG(time)) AS duration, ROUND(AVG(time)) AS duration,
...@@ -53,6 +54,34 @@ ORDER BY ...@@ -53,6 +54,34 @@ ORDER BY
name name
""" """
QUERY_STORY_TOTAL_RUNTIME = """
SELECT
name,
ROUND(AVG(time)) AS duration,
FROM (
SELECT
run.name AS name,
start_time,
SUM(run.times) AS time
FROM
[test-results-hrd:events.test_results]
WHERE
buildbot_info.builder_name IN ({configuration_names})
AND run.time IS NOT NULL
AND run.time != 0
AND run.is_unexpected IS FALSE
AND DATEDIFF(CURRENT_DATE(), DATE(start_time)) < {num_last_days}
GROUP BY
name,
start_time
ORDER BY
start_time DESC)
GROUP BY
name
ORDER BY
name
"""
def _run_query(query): def _run_query(query):
args = ["bq", "query", "--format=json", "--max_rows=100000", query] args = ["bq", "query", "--format=json", "--max_rows=100000", query]
...@@ -73,10 +102,40 @@ def FetchStoryTimingDataForSingleBuild(configurations, build_number): ...@@ -73,10 +102,40 @@ def FetchStoryTimingDataForSingleBuild(configurations, build_number):
def FetchAverageStortyTimingData(configurations, num_last_days): def FetchAverageStortyTimingData(configurations, num_last_days):
return _run_query(QUERY_LAST_RUNS.format( return _run_query(QUERY_STORY_AVG_RUNTIME.format(
configuration_names=configurations, num_last_days=num_last_days)) configuration_names=configurations, num_last_days=num_last_days))
def FetchBenchmarkRuntime(configurations, num_last_days):
test_total_runtime = _run_query(QUERY_STORY_TOTAL_RUNTIME.format(
configuration_names=configurations, num_last_days=num_last_days))
benchmarks_data = collections.OrderedDict()
total_runtime = 0
total_num_stories = 0
for item in test_total_runtime:
duration = item['duration']
test_name = item['name']
benchmark_name, _ = test_name.split('/', 1)
if not benchmark_name in benchmarks_data:
benchmarks_data[benchmark_name] = {
'num_stories': 0,
'total_runtime_in_seconds': 0,
}
benchmarks_data[benchmark_name]['num_stories'] += 1
total_num_stories += 1
benchmarks_data[benchmark_name]['total_runtime_in_seconds'] += (
float(duration))
total_runtime += float(duration)
benchmarks_data['All benchmarks'] = {
'total_runtime_in_seconds': total_runtime,
'num_stories': total_num_stories
}
return benchmarks_data
_FETCH_BENCHMARK_RUNTIME = 'fetch-benchmark-runtime'
_FETCH_STORY_RUNTIME = 'fetch-story-runtime'
def main(args): def main(args):
""" """
To run this script, you need to be able to run bigquery in your terminal. To run this script, you need to be able to run bigquery in your terminal.
...@@ -92,8 +151,10 @@ def main(args): ...@@ -92,8 +151,10 @@ def main(args):
""" """
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Retrieve story timing from bigquery.') description='Retrieve story timing from bigquery.')
parser.add_argument('action',
choices=[_FETCH_BENCHMARK_RUNTIME, _FETCH_STORY_RUNTIME])
parser.add_argument( parser.add_argument(
'--output-file', action='store', required=True, '--output-file', '-o', action='store', required=True,
help='The filename to send the bigquery results to.') help='The filename to send the bigquery results to.')
parser.add_argument( parser.add_argument(
'--configurations', '-c', action='append', required=True, '--configurations', '-c', action='append', required=True,
...@@ -104,11 +165,15 @@ def main(args): ...@@ -104,11 +165,15 @@ def main(args):
opts = parser.parse_args(args) opts = parser.parse_args(args)
configurations = str(opts.configurations).strip('[]') configurations = str(opts.configurations).strip('[]')
if opts.build_number:
data = FetchStoryTimingDataForSingleBuild(configurations, if opts.action == _FETCH_BENCHMARK_RUNTIME:
opts.build_number) data = FetchBenchmarkRuntime(configurations, num_last_days=5)
else: else:
data = FetchAverageStortyTimingData(configurations, num_last_days=5) if opts.build_number:
data = FetchStoryTimingDataForSingleBuild(configurations,
opts.build_number)
else:
data = FetchAverageStortyTimingData(configurations, num_last_days=5)
with open(opts.output_file, 'w') as output_file: with open(opts.output_file, 'w') as output_file:
json.dump(data, output_file, indent = 4, separators=(',', ': ')) json.dump(data, output_file, indent = 4, separators=(',', ': '))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment