Commit e6b675d6 authored by Kuo-Hsin Yang's avatar Kuo-Hsin Yang Committed by Commit Bot

[tools/perf] Add option --output-deps to output deps to stdout

The output is used by CrOS autotest telemetry_runner to upload
dependencies to DUT.

Bug: 834659
Change-Id: I5845fa293e07b35bdcb27030da7fc87ca5b61505
Reviewed-on: https://chromium-review.googlesource.com/1025612
Commit-Queue: Achuith Bhandarkar <achuith@chromium.org>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Reviewed-by: default avatarAchuith Bhandarkar <achuith@chromium.org>
Cr-Commit-Position: refs/heads/master@{#553559}
parent fc9d9f7b
...@@ -64,7 +64,14 @@ def _EnumerateDependencies(story_set): ...@@ -64,7 +64,14 @@ def _EnumerateDependencies(story_set):
return [dep[prefix_len:] for dep in deps if dep] return [dep[prefix_len:] for dep in deps if dep]
def FetchDepsForBenchmark(benchmark): def _PrintDeps(mesg, output_deps):
if output_deps:
print mesg
else:
logging.info(mesg)
def FetchDepsForBenchmark(benchmark, output_deps):
# Create a dummy options object which hold default values that are expected # Create a dummy options object which hold default values that are expected
# by Benchmark.CreateStorySet(options) method. # by Benchmark.CreateStorySet(options) method.
parser = optparse.OptionParser() parser = optparse.OptionParser()
...@@ -76,9 +83,11 @@ def FetchDepsForBenchmark(benchmark): ...@@ -76,9 +83,11 @@ def FetchDepsForBenchmark(benchmark):
_FetchDependenciesIfNeeded(story_set) _FetchDependenciesIfNeeded(story_set)
# Log files downloaded. # Log files downloaded.
_PrintDeps('Fetch dependencies for benchmark %s' % benchmark.Name(),
output_deps)
deps = _EnumerateDependencies(story_set) deps = _EnumerateDependencies(story_set)
for dep in deps: for dep in deps:
logging.info(dep) _PrintDeps("Dependency: " + dep, output_deps)
def main(args): def main(args):
...@@ -89,6 +98,14 @@ def main(args): ...@@ -89,6 +98,14 @@ def main(args):
help=('Force fetching all the benchmarks when ' help=('Force fetching all the benchmarks when '
'benchmark_name is not specified'), 'benchmark_name is not specified'),
action='store_true', default=False) action='store_true', default=False)
# Flag --output-deps: print the dependencies to stdout, CrOS autotest
# telemetry_runner parses the output to upload the dependencies to the DUT.
# Example output, fetch_benchmark_deps.py --output-deps octane:
# Fetch dependencies for benchmark octane
# Dependency: tools/perf/page_sets/data/octane_002.wprgo
parser.add_argument('--output-deps',
help=('Print dependencies to stdout'),
action='store_true', default=False)
parser.add_argument( parser.add_argument(
'-v', '--verbose', action='count', dest='verbosity', '-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)') help='Increase verbosity level (repeat as needed)')
...@@ -112,15 +129,15 @@ def main(args): ...@@ -112,15 +129,15 @@ def main(args):
options.benchmark_name, config) options.benchmark_name, config)
if not benchmark: if not benchmark:
raise ValueError('No such benchmark: %s' % options.benchmark_name) raise ValueError('No such benchmark: %s' % options.benchmark_name)
FetchDepsForBenchmark(benchmark) FetchDepsForBenchmark(benchmark, options.output_deps)
else: else:
if not options.force: if not options.force:
raw_input( raw_input(
'No benchmark name is specified. Fetching all benchmark deps. ' 'No benchmark name is specified. Fetching all benchmark deps. '
'Press enter to continue...') 'Press enter to continue...')
for b in benchmark_finders.GetAllPerfBenchmarks(): for b in benchmark_finders.GetAllPerfBenchmarks():
logging.info('Fetch dependencies for benchmark %s', b.Name()) FetchDepsForBenchmark(b, options.output_deps)
FetchDepsForBenchmark(b)
if __name__ == '__main__': if __name__ == '__main__':
main(sys.argv[1:]) main(sys.argv[1:])
...@@ -3,7 +3,10 @@ ...@@ -3,7 +3,10 @@
# found in the LICENSE file. # found in the LICENSE file.
import os import os
import re
import sys
import unittest import unittest
import StringIO
import mock # pylint: disable=import-error import mock # pylint: disable=import-error
...@@ -26,7 +29,9 @@ class FetchBenchmarkDepsUnittest(unittest.TestCase): ...@@ -26,7 +29,9 @@ class FetchBenchmarkDepsUnittest(unittest.TestCase):
""" """
def testFetchWPRs(self): def testFetchWPRs(self):
args = ['smoothness.top_25_smooth'] old_out = sys.stdout
sys.stdout = StringIO.StringIO()
args = ['smoothness.top_25_smooth', '--output-deps']
with mock.patch.object(archive_info.WprArchiveInfo, with mock.patch.object(archive_info.WprArchiveInfo,
'DownloadArchivesIfNeeded', autospec=True) as mock_download: 'DownloadArchivesIfNeeded', autospec=True) as mock_download:
with mock.patch('py_utils.cloud_storage' with mock.patch('py_utils.cloud_storage'
...@@ -42,6 +47,21 @@ class FetchBenchmarkDepsUnittest(unittest.TestCase): ...@@ -42,6 +47,21 @@ class FetchBenchmarkDepsUnittest(unittest.TestCase):
# This benchmark doesn't use any static local files. # This benchmark doesn't use any static local files.
self.assertFalse(mock_get.called) self.assertFalse(mock_get.called)
# Checks fetch_benchmark_deps.py Output.
output_count = 0
dep_pattern = re.compile('Dependency: (.+)')
for line in sys.stdout.getvalue().splitlines():
dep_match = dep_pattern.match(line)
if not dep_match:
continue
filename = dep_match.group(1)
fullpath = os.path.join(path_util.GetChromiumSrcDir(), filename)
sha1path = fullpath + '.sha1'
self.assertTrue(os.path.isfile(sha1path))
output_count += 1
self.assertTrue(output_count > 0)
sys.stdout = old_out
def testFetchServingDirs(self): def testFetchServingDirs(self):
args = ['media.desktop'] args = ['media.desktop']
with mock.patch.object(archive_info.WprArchiveInfo, with mock.patch.object(archive_info.WprArchiveInfo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment