Commit 64803265 authored by Rakib M. Hasan's avatar Rakib M. Hasan Committed by Commit Bot

[tools/perf] Translate old expectations file into a file that uses the new expectations format

We are attempting to standardize the expectations format. A new format has been created. Now we
have to make all the test/benchmark runners use the new format. This CL will make the src/perf
benchmarks use the new format of expectations.

The script used to translate the old expectations format into the new format can be found at
the following link, https://paste.googleplex.com/5236354608791552

The design document for the expectations format change can be found at
go/change-telemetry-benchmarks-expectations-format.

Bug: chromium:973936
Bug: chromium:992199
Change-Id: I83b0090958fe50517be6540d241be3e29c0925b1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1737991Reviewed-by: default avatarCaleb Rouleau <crouleau@chromium.org>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Commit-Queue: Rakib Hasan <rmhasan@google.com>
Cr-Commit-Position: refs/heads/master@{#685405}
parent eaa6a968
......@@ -40,6 +40,8 @@ def _GetPathsToPrepend(input_api):
chromium_src_dir = input_api.os_path.join(perf_dir, '..', '..')
telemetry_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'telemetry')
typ_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'third_party', 'typ')
experimental_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'experimental')
tracing_dir = input_api.os_path.join(
......@@ -50,6 +52,7 @@ def _GetPathsToPrepend(input_api):
chromium_src_dir, 'build', 'android')
return [
telemetry_dir,
typ_dir,
input_api.os_path.join(telemetry_dir, 'third_party', 'mock'),
experimental_dir,
tracing_dir,
......
......@@ -6,6 +6,7 @@
import argparse
import json
import logging
import os
from core import benchmark_utils
......@@ -14,6 +15,8 @@ from core import path_util
path_util.AddTelemetryToPath()
path_util.AddAndroidPylibToPath()
from typ import expectations_parser as typ_expectations_parser
CLUSTER_TELEMETRY_DIR = os.path.join(
path_util.GetChromiumSrcDir(), 'tools', 'perf', 'contrib',
......@@ -23,16 +26,19 @@ CLUSTER_TELEMETRY_BENCHMARKS = [
benchmark_finders.GetBenchmarksInSubDirectory(CLUSTER_TELEMETRY_DIR)
]
def validate_story_names(benchmarks, raw_expectations_data):
def validate_story_names(benchmarks, test_expectations):
stories = []
for benchmark in benchmarks:
if benchmark.Name() in CLUSTER_TELEMETRY_BENCHMARKS:
continue
b = benchmark()
b.AugmentExpectationsWithParser(raw_expectations_data)
story_set = benchmark_utils.GetBenchmarkStorySet(b)
failed_stories = b.GetBrokenExpectations(story_set)
assert not failed_stories, 'Incorrect story names: %s' % str(failed_stories)
story_set = benchmark_utils.GetBenchmarkStorySet(benchmark())
stories.extend([benchmark.Name() + '/' + s.name for s in story_set.stories])
broken_expectations = test_expectations.check_for_broken_expectations(stories)
unused_patterns = ''
for pattern in set([e.test for e in broken_expectations]):
unused_patterns += ("Expectations with pattern '%s'"
" do not apply to any stories\n" % pattern)
assert not unused_patterns, unused_patterns
def GetDisabledStories(benchmarks, raw_expectations_data):
......@@ -73,9 +79,14 @@ def main(args):
benchmarks = benchmark_finders.GetAllBenchmarks()
with open(path_util.GetExpectationsPath()) as fp:
raw_expectations_data = fp.read()
test_expectations = typ_expectations_parser.TestExpectations()
ret, msg = test_expectations.parse_tagged_list(raw_expectations_data)
if ret:
logging.error(msg)
return ret
if options.list:
stories = GetDisabledStories(benchmarks, raw_expectations_data)
print json.dumps(stories, sort_keys=True, indent=4, separators=(',', ': '))
else:
validate_story_names(benchmarks, raw_expectations_data)
validate_story_names(benchmarks, test_expectations)
return 0
......@@ -8,6 +8,8 @@ from core import story_expectation_validator
from telemetry import benchmark
from telemetry import story
from typ import expectations_parser as typ_expectations_parser
class FakePage(object):
def __init__(self, name):
self._name = name
......@@ -40,32 +42,26 @@ class FakeBenchmark(benchmark.Benchmark):
class StoryExpectationValidatorTest(unittest.TestCase):
def testValidateStoryInValidName(self):
raw_expectations = '# tags: Mac\ncrbug.com/123 [ Mac ] b1/s1 [ Skip ]'
raw_expectations = ('# tags: [ Mac ]\n'
'# results: [ Skip ]\n'
'crbug.com/123 [ Mac ] b1/s1 [ Skip ]\n')
test_expectations = typ_expectations_parser.TestExpectations()
ret, _ = test_expectations.parse_tagged_list(raw_expectations)
self.assertFalse(ret)
benchmarks = [FakeBenchmark]
with self.assertRaises(AssertionError):
story_expectation_validator.validate_story_names(
benchmarks, raw_expectations)
benchmarks, test_expectations)
def testValidateStoryValidName(self):
raw_expectations = '# tags: Mac\ncrbug.com/123 [ Mac ] b1/One [ Skip ]'
raw_expectations = ('# tags: [ Mac] \n'
'# results: [ Skip ]\n'
'crbug.com/123 [ Mac ] b1/One [ Skip ]\n')
test_expectations = typ_expectations_parser.TestExpectations()
ret, _ = test_expectations.parse_tagged_list(raw_expectations)
self.assertFalse(ret)
benchmarks = [FakeBenchmark]
# If a name is invalid, an exception is thrown. If no exception is thrown
# all story names are valid. That is why there is no assert here.
story_expectation_validator.validate_story_names(
benchmarks, raw_expectations)
def testGetDisabledStoriesWithExpectationsData(self):
raw_expectations = '# tags: Mac\ncrbug.com/123 [ Mac ] b1/One [ Skip ]'
benchmarks = [FakeBenchmark]
results = story_expectation_validator.GetDisabledStories(
benchmarks, raw_expectations)
expected = {'b1': {'One': [(['Mac'], 'crbug.com/123')]}}
self.assertEqual(expected, results)
def testGetDisabledStoriesWithoutMatchingExpectationsData(self):
raw_expectations = '# tags: Mac\ncrbug.com/123 [ Mac ] b2/One [ Skip ]'
benchmarks = [FakeBenchmark]
results = story_expectation_validator.GetDisabledStories(
benchmarks, raw_expectations)
expected = { 'b1': {}}
self.assertEqual(expected, results)
benchmarks, test_expectations)
This diff is collapsed.
......@@ -7,14 +7,13 @@ import os
import sys
from telemetry import story
from telemetry.story import expectations
from py_utils import discover
# Import all submodules' PageSet classes.
start_dir = os.path.dirname(os.path.abspath(__file__))
top_level_dir = os.path.dirname(start_dir)
base_classes = [story.StorySet, expectations.StoryExpectations]
base_classes = [story.StorySet]
for base_class in base_classes:
for cls in discover.DiscoverClasses(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment