Commit 85daaeb1 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Fix PRESUBMIT errors of tools/perf

Bug:866261
Cq-Include-Trybots: master.tryserver.chromium.perf:obbs_fyi
Change-Id: I9958cd37309a866ccedf668a7471aa1e2ec3d2af

NOTRY=true  # layout test flakiness

Change-Id: I9958cd37309a866ccedf668a7471aa1e2ec3d2af
Reviewed-on: https://chromium-review.googlesource.com/1146330
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#577182}
parent 2d0fd0cb
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
import os import os
# pylint: disable=wrong-import-position
from core import path_util from core import path_util
CLIENT_CONFIG_PATH = os.path.join( CLIENT_CONFIG_PATH = os.path.join(
......
...@@ -4,9 +4,10 @@ ...@@ -4,9 +4,10 @@
import logging import logging
import os import os
import py_utils
import time import time
import py_utils
from telemetry.page import legacy_page_test from telemetry.page import legacy_page_test
from telemetry.util import image_util from telemetry.util import image_util
......
...@@ -772,7 +772,7 @@ def verify_all_tests_in_benchmark_csv(tests, benchmark_metadata): ...@@ -772,7 +772,7 @@ def verify_all_tests_in_benchmark_csv(tests, benchmark_metadata):
def _verify_benchmark_owners(benchmark_metadata): def _verify_benchmark_owners(benchmark_metadata):
unowned_benchmarks = set() unowned_benchmarks = set()
for benchmark_name in benchmark_metadata: for benchmark_name in benchmark_metadata:
if benchmark_metadata[benchmark_name].emails == None: if benchmark_metadata[benchmark_name].emails is None:
unowned_benchmarks.add(benchmark_name) unowned_benchmarks.add(benchmark_name)
assert not unowned_benchmarks, ( assert not unowned_benchmarks, (
......
# Copyright 2017 The Chromium Authors. All rights reserved. # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import json
import unittest import unittest
from core import perf_data_generator from core import perf_data_generator
from core.perf_data_generator import BenchmarkMetadata from core.perf_data_generator import BenchmarkMetadata
import mock import mock
import json
class PerfDataGeneratorTest(unittest.TestCase): class PerfDataGeneratorTest(unittest.TestCase):
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
import calendar import calendar
import datetime import datetime
import httplib import httplib
import httplib2
import json import json
import os import os
import subprocess import subprocess
...@@ -24,12 +23,14 @@ import urllib2 ...@@ -24,12 +23,14 @@ import urllib2
import uuid import uuid
import zlib import zlib
from telemetry.internal.util import external_modules import httplib2
psutil = external_modules.ImportOptionalModule('psutil') from telemetry.internal.util import external_modules
from core import path_util from core import path_util
psutil = external_modules.ImportOptionalModule('psutil')
# The paths in the results dashboard URLs for sending results. # The paths in the results dashboard URLs for sending results.
SEND_RESULTS_PATH = '/add_point' SEND_RESULTS_PATH = '/add_point'
......
...@@ -62,9 +62,11 @@ rm win_10_test_data.json ...@@ -62,9 +62,11 @@ rm win_10_test_data.json
""" """
import argparse import argparse
from collections import OrderedDict import collections
import core.path_util
import json import json
import sys
import core.path_util
core.path_util.AddTelemetryToPath() core.path_util.AddTelemetryToPath()
...@@ -153,16 +155,16 @@ def generate_sharding_map( ...@@ -153,16 +155,16 @@ def generate_sharding_map(
story_timing_ordered_dict, num_shards) story_timing_ordered_dict, num_shards)
total_time = 0 total_time = 0
sharding_map = OrderedDict() sharding_map = collections.OrderedDict()
debug_map = OrderedDict() debug_map = collections.OrderedDict()
min_shard_time = float('inf') min_shard_time = sys.maxint
min_shard_index = None min_shard_index = None
max_shard_time = 0 max_shard_time = 0
max_shard_index = None max_shard_index = None
num_stories = len(story_timing_ordered_dict) num_stories = len(story_timing_ordered_dict)
for i in range(num_shards): for i in range(num_shards):
sharding_map[str(i)] = {'benchmarks': OrderedDict()} sharding_map[str(i)] = {'benchmarks': collections.OrderedDict()}
debug_map[str(i)] = OrderedDict() debug_map[str(i)] = collections.OrderedDict()
time_per_shard = 0 time_per_shard = 0
stories_in_shard = [] stories_in_shard = []
expected_total_time = expected_time_per_shard * (i + 1) expected_total_time = expected_time_per_shard * (i + 1)
...@@ -194,7 +196,7 @@ def generate_sharding_map( ...@@ -194,7 +196,7 @@ def generate_sharding_map(
json.dump(debug_map, output_file, indent = 4, separators=(',', ': ')) json.dump(debug_map, output_file, indent = 4, separators=(',', ': '))
sharding_map['extra_infos'] = OrderedDict([ sharding_map['extra_infos'] = collections.OrderedDict([
('num_stories', num_stories), ('num_stories', num_stories),
# Double all the time stats by 2 to account for reference build. # Double all the time stats by 2 to account for reference build.
('predicted_min_shard_time', min_shard_time * 2), ('predicted_min_shard_time', min_shard_time * 2),
...@@ -214,7 +216,7 @@ def _get_expected_time_per_shard(timing_data, num_shards): ...@@ -214,7 +216,7 @@ def _get_expected_time_per_shard(timing_data, num_shards):
def _add_benchmarks_to_shard(sharding_map, shard_index, stories_in_shard, def _add_benchmarks_to_shard(sharding_map, shard_index, stories_in_shard,
all_stories): all_stories):
benchmarks = OrderedDict() benchmarks = collections.OrderedDict()
for story in stories_in_shard: for story in stories_in_shard:
(b, story) = story.split('/', 1) (b, story) = story.split('/', 1)
if b not in benchmarks: if b not in benchmarks:
...@@ -222,7 +224,7 @@ def _add_benchmarks_to_shard(sharding_map, shard_index, stories_in_shard, ...@@ -222,7 +224,7 @@ def _add_benchmarks_to_shard(sharding_map, shard_index, stories_in_shard,
benchmarks[b].append(story) benchmarks[b].append(story)
# Format the benchmark's stories by indices # Format the benchmark's stories by indices
benchmarks_in_shard = OrderedDict() benchmarks_in_shard = collections.OrderedDict()
for b in benchmarks: for b in benchmarks:
benchmarks_in_shard[b] = {} benchmarks_in_shard[b] = {}
first_story = all_stories[b].index(benchmarks[b][0]) first_story = all_stories[b].index(benchmarks[b][0])
...@@ -251,7 +253,7 @@ def _load_timing_data_from_file(benchmarks_data, timing_data_file, repeat): ...@@ -251,7 +253,7 @@ def _load_timing_data_from_file(benchmarks_data, timing_data_file, repeat):
def _init_timing_dict_for_benchmarks(benchmarks_data): def _init_timing_dict_for_benchmarks(benchmarks_data):
timing_data = OrderedDict() timing_data = collections.OrderedDict()
for b in benchmarks_data: for b in benchmarks_data:
story_list = benchmarks_data[b]['stories'] story_list = benchmarks_data[b]['stories']
for story in story_list: for story in story_list:
...@@ -260,20 +262,20 @@ def _init_timing_dict_for_benchmarks(benchmarks_data): ...@@ -260,20 +262,20 @@ def _init_timing_dict_for_benchmarks(benchmarks_data):
def _generate_empty_sharding_map(num_shards): def _generate_empty_sharding_map(num_shards):
sharding_map = OrderedDict() sharding_map = collections.OrderedDict()
for i in range(0, num_shards): for i in range(0, num_shards):
sharding_map[str(i)] = {'benchmarks': OrderedDict()} sharding_map[str(i)] = {'benchmarks': collections.OrderedDict()}
return sharding_map return sharding_map
def test_sharding_map(sharding_map_file, timing_data, all_stories): def test_sharding_map(sharding_map_file, timing_data, all_stories):
results = OrderedDict() results = collections.OrderedDict()
with open(sharding_map_file) as f: with open(sharding_map_file) as f:
sharding_map = json.load(f, object_pairs_hook=OrderedDict) sharding_map = json.load(f, object_pairs_hook=collections.OrderedDict)
sharding_map.pop('extra_infos', None) sharding_map.pop('extra_infos', None)
for shard in sharding_map: for shard in sharding_map:
results[shard] = OrderedDict() results[shard] = collections.OrderedDict()
shard_total_time = 0 shard_total_time = 0
for benchmark_name in sharding_map[shard]['benchmarks']: for benchmark_name in sharding_map[shard]['benchmarks']:
benchmark = sharding_map[shard]['benchmarks'][benchmark_name] benchmark = sharding_map[shard]['benchmarks'][benchmark_name]
......
...@@ -2,26 +2,27 @@ ...@@ -2,26 +2,27 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
from collections import OrderedDict import collections
import copy import copy
from core import sharding_map_generator
import json import json
import os import os
import tempfile import tempfile
import unittest import unittest
from core import sharding_map_generator
from py_utils import tempfile_ext from py_utils import tempfile_ext
class TestShardingMapGenerator(unittest.TestCase): class TestShardingMapGenerator(unittest.TestCase):
def _init_sample_timing_data(self, times): def _init_sample_timing_data(self, times):
timing_data = OrderedDict() timing_data = collections.OrderedDict()
all_stories = {} all_stories = {}
for i in range(len(times)): for i, _ in enumerate(times):
all_stories['benchmark_' + str(i)] = [] all_stories['benchmark_' + str(i)] = []
story_times = times[i] story_times = times[i]
for j in range(len(story_times)): for j, _ in enumerate(story_times):
all_stories['benchmark_' + str(i)].append('story_' + str(j)) all_stories['benchmark_' + str(i)].append('story_' + str(j))
timing_data['benchmark_' + str(i) + '/' + 'story_' + str(j)] = ( timing_data['benchmark_' + str(i) + '/' + 'story_' + str(j)] = (
story_times[j]) story_times[j])
......
...@@ -42,8 +42,9 @@ def _GetDashboardJson(options): ...@@ -42,8 +42,9 @@ def _GetDashboardJson(options):
with open(options.results_file) as f: with open(options.results_file) as f:
results = json.load(f) results = json.load(f)
dashboard_json = {} dashboard_json = {}
if not 'charts' in results: if 'charts' not in results:
# These are legacy results. # These are legacy results.
# pylint: disable=redefined-variable-type
dashboard_json = results_dashboard.MakeListOfPoints( dashboard_json = results_dashboard.MakeListOfPoints(
results, options.configuration_name, stripped_test_name, results, options.configuration_name, stripped_test_name,
options.buildername, options.buildnumber, {}, options.buildername, options.buildnumber, {},
......
...@@ -15,6 +15,7 @@ from telemetry.wpr import archive_info ...@@ -15,6 +15,7 @@ from telemetry.wpr import archive_info
from core import path_util from core import path_util
import fetch_benchmark_deps import fetch_benchmark_deps
def NormPaths(paths): def NormPaths(paths):
return sorted([os.path.normcase(p) for p in paths.splitlines()]) return sorted([os.path.normcase(p) for p in paths.splitlines()])
......
...@@ -252,6 +252,6 @@ def _FormatHumanReadable(number): ...@@ -252,6 +252,6 @@ def _FormatHumanReadable(number):
exponent += 1 exponent += 1
if digits >= 100: if digits >= 100:
# Don't append a meaningless '.0' to an integer number. # Don't append a meaningless '.0' to an integer number.
digits = int(digits) digits = int(digits) # pylint: disable=redefined-variable-type
# Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6). # Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
return '%s%s' % (digits, metric_prefixes[exponent]) return '%s%s' % (digits, metric_prefixes[exponent])
...@@ -2,12 +2,13 @@ ...@@ -2,12 +2,13 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import os
from page_sets.login_helpers import google_login from page_sets.login_helpers import google_login
from telemetry.page import page as page_module from telemetry.page import page as page_module
from telemetry.page import shared_page_state from telemetry.page import shared_page_state
import os
def _DeterministicPerformanceCounters(): def _DeterministicPerformanceCounters():
with open(os.path.join(os.path.dirname(__file__), with open(os.path.join(os.path.dirname(__file__),
......
...@@ -30,7 +30,7 @@ class RenderingStory(page.Page): ...@@ -30,7 +30,7 @@ class RenderingStory(page.Page):
URL = NotImplemented URL = NotImplemented
ABSTRACT_STORY = True ABSTRACT_STORY = True
SUPPORTED_PLATFORMS = platforms.ALL_PLATFORMS SUPPORTED_PLATFORMS = platforms.ALL_PLATFORMS
TAGS = None TAGS =[]
PLATFORM_SPECIFIC = False PLATFORM_SPECIFIC = False
YEAR = None YEAR = None
...@@ -42,7 +42,6 @@ class RenderingStory(page.Page): ...@@ -42,7 +42,6 @@ class RenderingStory(page.Page):
make_javascript_deterministic=True, make_javascript_deterministic=True,
base_dir=None): base_dir=None):
tags = [] tags = []
if self.TAGS:
for t in self.TAGS: for t in self.TAGS:
assert t in story_tags.ALL_TAGS assert t in story_tags.ALL_TAGS
tags.append(t.name) tags.append(t.name)
......
...@@ -2,9 +2,10 @@ ...@@ -2,9 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import py_utils
import logging import logging
import py_utils
from page_sets.system_health import system_health_story from page_sets.system_health import system_health_story
from page_sets.system_health import story_tags from page_sets.system_health import story_tags
from page_sets.system_health import platforms from page_sets.system_health import platforms
......
...@@ -54,15 +54,14 @@ class SystemHealthStory(page.Page): ...@@ -54,15 +54,14 @@ class SystemHealthStory(page.Page):
# flow during replay. Switch this to False when recording. # flow during replay. Switch this to False when recording.
SKIP_LOGIN = True SKIP_LOGIN = True
SUPPORTED_PLATFORMS = platforms.ALL_PLATFORMS SUPPORTED_PLATFORMS = platforms.ALL_PLATFORMS
TAGS = None TAGS = []
PLATFORM_SPECIFIC = False PLATFORM_SPECIFIC = False
def __init__(self, story_set, take_memory_measurement, def __init__(self, story_set, take_memory_measurement,
extra_browser_args=None): extra_browser_args=None):
case, group, _ = self.NAME.split(':') case, group, _ = self.NAME.split(':')
tags = [] tags = []
if self.TAGS: for t in self.TAGS: # pylint: disable=not-an-iterable
for t in self.TAGS:
assert t in story_tags.ALL_TAGS assert t in story_tags.ALL_TAGS
tags.append(t.name) tags.append(t.name)
super(SystemHealthStory, self).__init__( super(SystemHealthStory, self).__init__(
......
...@@ -8,6 +8,8 @@ import json ...@@ -8,6 +8,8 @@ import json
import logging import logging
import multiprocessing as mp import multiprocessing as mp
import os import os
from os import listdir
from os.path import isfile, join, basename
import shutil import shutil
import sys import sys
import tempfile import tempfile
...@@ -18,8 +20,6 @@ from core import oauth_api ...@@ -18,8 +20,6 @@ from core import oauth_api
from core import path_util from core import path_util
from core import upload_results_to_perf_dashboard from core import upload_results_to_perf_dashboard
from core import results_merger from core import results_merger
from os import listdir
from os.path import isfile, join, basename
path_util.AddAndroidPylibToPath() path_util.AddAndroidPylibToPath()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment