Commit 64909359 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Update perf_data_generator to also generate benchmark tags into tools/perf/benchmark.csv

This CL also refactors the method to generate the story set from benchmark into
tools/perf/core/benchmark_utils.py for better code sharing

Bug:874940
Cq-Include-Trybots: master.tryserver.chromium.perf:obbs_fyi
Change-Id: I2a551840fd852162a4743eac769d92c6ee499bc4

NOTRY=true  # all tests passed in patch set 3

Change-Id: I2a551840fd852162a4743eac769d92c6ee499bc4
Reviewed-on: https://chromium-review.googlesource.com/1179603
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#584081}
parent 57366e25
This diff is collapsed.
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
from telemetry import benchmark as b_module
from telemetry.internal.browser import browser_options
def GetBenchmarkStorySet(benchmark):
if not isinstance(benchmark, b_module.Benchmark):
raise ValueError(
'|benchmark| must be an instace of telemetry.benchmark.Benchmark class. '
'Instead found object of type: %s' % type(benchmark))
options = browser_options.BrowserFinderOptions()
# Add default values for any extra commandline options
# provided by the benchmark.
parser = optparse.OptionParser()
before, _ = parser.parse_args([])
benchmark.AddBenchmarkCommandLineArgs(parser)
after, _ = parser.parse_args([])
for extra_option in dir(after):
if extra_option not in dir(before):
setattr(options, extra_option, getattr(after, extra_option))
return benchmark.CreateStorySet(options)
def GetBenchmarkStoryNames(benchmark):
story_list = []
for story in GetBenchmarkStorySet(benchmark):
if story.name not in story_list:
story_list.append(story.name)
return sorted(story_list)
def GetStoryTags(benchmark):
tags = set()
for story in GetBenchmarkStorySet(benchmark):
tags.update(story.tags)
return sorted(list(tags))
......@@ -22,7 +22,7 @@ import sys
import sets
import tempfile
from core import benchmark_utils
from core import bot_platforms
from core import path_util
from core import undocumented_benchmarks as ub_module
......@@ -627,53 +627,57 @@ def update_all_tests(waterfall, file_path):
get_all_waterfall_benchmarks_metadata())
# not_scheduled means this test is not scheduled on any of the chromium.perf
# waterfalls. Right now, all the below benchmarks are scheduled, but some other
# benchmarks are not scheduled, because they're disabled on all platforms.
BenchmarkMetadata = collections.namedtuple(
'BenchmarkMetadata', 'emails component documentation_url not_scheduled')
class BenchmarkMetadata(object):
def __init__(self, emails, component='', documentation_url='', tags='',
not_scheduled=False):
self.emails = emails
self.component = component
self.documentation_url = documentation_url
self.tags = tags
# not_scheduled means this test is not scheduled on any of the chromium.perf
# waterfalls. Right now, all the below benchmarks are scheduled, but some
# other benchmarks are not scheduled, because they're disabled on all
# platforms.
# TODO(crbug.com/875232): remove this field
self.not_scheduled = not_scheduled
NON_TELEMETRY_BENCHMARKS = {
'angle_perftests': BenchmarkMetadata(
'jmadill@chromium.org, chrome-gpu-perf-owners@chromium.org',
'Internals>GPU>ANGLE', None, False),
'Internals>GPU>ANGLE'),
'validating_command_buffer_perftests': BenchmarkMetadata(
'piman@chromium.org, chrome-gpu-perf-owners@chromium.org',
'Internals>GPU', None, False),
'Internals>GPU'),
'passthrough_command_buffer_perftests': BenchmarkMetadata(
'piman@chromium.org, chrome-gpu-perf-owners@chromium.org',
'Internals>GPU>ANGLE', None, False),
'Internals>GPU>ANGLE'),
'net_perftests': BenchmarkMetadata(
'xunjieli@chromium.org', None, None, False),
'xunjieli@chromium.org'),
'gpu_perftests': BenchmarkMetadata(
'reveman@chromium.org, chrome-gpu-perf-owners@chromium.org',
'Internals>GPU', None, False),
'Internals>GPU'),
'tracing_perftests': BenchmarkMetadata(
'kkraynov@chromium.org, primiano@chromium.org', None, None, False),
'kkraynov@chromium.org, primiano@chromium.org'),
'load_library_perf_tests': BenchmarkMetadata(
'xhwang@chromium.org, crouleau@chromium.org',
'Internals>Media>Encrypted', None, False),
'media_perftests': BenchmarkMetadata(
'crouleau@chromium.org', None, None, False),
'performance_browser_tests': BenchmarkMetadata(
'miu@chromium.org', None, None, False),
'Internals>Media>Encrypted'),
'media_perftests': BenchmarkMetadata('crouleau@chromium.org'),
'performance_browser_tests': BenchmarkMetadata('miu@chromium.org'),
'views_perftests': BenchmarkMetadata(
'tapted@chromium.org', 'Internals>Views', None, False),
'components_perftests': BenchmarkMetadata(
'csharrison@chromium.org', None, None, False)
'tapted@chromium.org', 'Internals>Views'),
'components_perftests': BenchmarkMetadata('csharrison@chromium.org')
}
# If you change this dictionary, run tools/perf/generate_perf_data
NON_WATERFALL_BENCHMARKS = {
'sizes (mac)': BenchmarkMetadata('tapted@chromium.org', None, None, False),
'sizes (win)': BenchmarkMetadata('grt@chromium.org', None, None, False),
'sizes (linux)': BenchmarkMetadata(
'thestig@chromium.org', None, None, False),
'sizes (mac)':
BenchmarkMetadata('tapted@chromium.org'),
'sizes (win)': BenchmarkMetadata('grt@chromium.org'),
'sizes (linux)': BenchmarkMetadata('thestig@chromium.org'),
'resource_sizes': BenchmarkMetadata(
'agrieve@chromium.org, rnephew@chromium.org, perezju@chromium.org',
None, None, False),
'supersize_archive': BenchmarkMetadata(
'agrieve@chromium.org', None, None, False),
'agrieve@chromium.org, rnephew@chromium.org, perezju@chromium.org'),
'supersize_archive': BenchmarkMetadata('agrieve@chromium.org'),
}
......@@ -690,9 +694,11 @@ def get_all_benchmarks_metadata(metadata):
emails = decorators.GetEmails(benchmark)
if emails:
emails = ', '.join(emails)
tags_set = benchmark_utils.GetStoryTags(benchmark())
metadata[benchmark.Name()] = BenchmarkMetadata(
emails, decorators.GetComponent(benchmark),
decorators.GetDocumentationLink(benchmark), False)
decorators.GetDocumentationLink(benchmark),
','.join(tags_set), False)
return metadata
# With migration to new recipe tests are now listed in the shard maps
......@@ -780,7 +786,8 @@ def update_benchmark_csv(file_path):
"""
header_data = [['AUTOGENERATED FILE DO NOT EDIT'],
['See //tools/perf/generate_perf_data.py to make changes'],
['Benchmark name', 'Individual owners', 'Component', 'Documentation']
['Benchmark name', 'Individual owners', 'Component', 'Documentation',
'Tags']
]
csv_data = []
......@@ -798,6 +805,7 @@ def update_benchmark_csv(file_path):
benchmark_metadata[benchmark_name].emails,
benchmark_metadata[benchmark_name].component,
benchmark_metadata[benchmark_name].documentation_url,
benchmark_metadata[benchmark_name].tags,
])
if undocumented_benchmarks != ub_module.UNDOCUMENTED_BENCHMARKS:
error_message = (
......
......@@ -33,11 +33,11 @@ class PerfDataGeneratorTest(unittest.TestCase):
}
}
benchmarks = {
'benchmark_name_1': BenchmarkMetadata('foo@bar.com', None, None, False),
'benchmark_name_1': BenchmarkMetadata('foo@bar.com'),
'benchmark_name_2':
BenchmarkMetadata('darth@deathstar', None, None, False),
BenchmarkMetadata('darth@deathstar'),
'benchmark_name_3':
BenchmarkMetadata('neo@matrix.org', None, None, False)
BenchmarkMetadata('neo@matrix.org')
}
# Mock out content of unowned_benchmarks.txt and sharding map
......@@ -63,8 +63,8 @@ class PerfDataGeneratorTest(unittest.TestCase):
}
}
benchmarks = {
'benchmark_name_2': BenchmarkMetadata(None, None, None, False),
'benchmark_name_3': BenchmarkMetadata(None, None, None, False),
'benchmark_name_2': BenchmarkMetadata('baz@foo.com'),
'benchmark_name_3': BenchmarkMetadata('darth@vader.com'),
}
with self.assertRaises(AssertionError) as context:
......@@ -77,7 +77,7 @@ class PerfDataGeneratorTest(unittest.TestCase):
def testVerifyAllTestsInBenchmarkCsvFindsFakeTest(self):
tests = {'Random fake test': {}}
benchmarks = {
'benchmark_name_1': BenchmarkMetadata(None, None, None, False)
'benchmark_name_1': BenchmarkMetadata('deathstar@empire.com')
}
with self.assertRaises(AssertionError) as context:
......
......@@ -4,20 +4,17 @@
# found in the LICENSE file.
"""Script to check validity of StoryExpectations."""
import optparse
import argparse
import json
import os
from core import benchmark_utils
from core import benchmark_finders
from core import path_util
path_util.AddTelemetryToPath()
path_util.AddAndroidPylibToPath()
from telemetry.internal.browser import browser_options
CLUSTER_TELEMETRY_DIR = os.path.join(
path_util.GetChromiumSrcDir(), 'tools', 'perf', 'contrib',
'cluster_telemetry')
......@@ -33,19 +30,7 @@ def validate_story_names(benchmarks, raw_expectations_data):
continue
b = benchmark()
b.AugmentExpectationsWithParser(raw_expectations_data)
options = browser_options.BrowserFinderOptions()
# Add default values for any extra commandline options
# provided by the benchmark.
parser = optparse.OptionParser()
before, _ = parser.parse_args([])
benchmark.AddBenchmarkCommandLineArgs(parser)
after, _ = parser.parse_args([])
for extra_option in dir(after):
if extra_option not in dir(before):
setattr(options, extra_option, getattr(after, extra_option))
story_set = b.CreateStorySet(options)
story_set = benchmark_utils.GetBenchmarkStorySet(b)
failed_stories = b.GetBrokenExpectations(story_set)
assert not failed_stories, 'Incorrect story names: %s' % str(failed_stories)
......
......@@ -12,32 +12,12 @@ devices we shard to.
Run -h to see available commands.
"""
import optparse
import sys
from core import benchmark_utils
from core import sharding_map_generator
from core import perf_data_generator
from telemetry.internal.browser import browser_options
def _get_stories_for_benchmark(b):
story_list = []
benchmark = b()
options = browser_options.BrowserFinderOptions()
# Add default values for any extra commandline options
# provided by the benchmark.
parser = optparse.OptionParser()
before, _ = parser.parse_args([])
benchmark.AddBenchmarkCommandLineArgs(parser)
after, _ = parser.parse_args([])
for extra_option in dir(after):
if extra_option not in dir(before):
setattr(options, extra_option, getattr(after, extra_option))
for story in benchmark.CreateStorySet(options).stories:
if story.name not in story_list:
story_list.append(story.name)
return story_list
def _include_benchmark(name, shorlist):
......@@ -64,7 +44,7 @@ if __name__ == '__main__':
continue
benchmarks_data[b.Name()] = {
'repeat': b().options.get('pageset_repeat', 1),
'stories': _get_stories_for_benchmark(b)
'stories': benchmark_utils.GetStoriesForbenchmark(b)
}
sys.exit(sharding_map_generator.main(options, benchmarks_data))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment