Commit 85daaeb1 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Fix PRESUBMIT errors of tools/perf

Bug:866261
Cq-Include-Trybots: master.tryserver.chromium.perf:obbs_fyi
Change-Id: I9958cd37309a866ccedf668a7471aa1e2ec3d2af

NOTRY=true  # layout test flakiness

Change-Id: I9958cd37309a866ccedf668a7471aa1e2ec3d2af
Reviewed-on: https://chromium-review.googlesource.com/1146330
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#577182}
parent 2d0fd0cb
......@@ -4,6 +4,7 @@
import os
# pylint: disable=wrong-import-position
from core import path_util
CLIENT_CONFIG_PATH = os.path.join(
......
......@@ -4,9 +4,10 @@
import logging
import os
import py_utils
import time
import py_utils
from telemetry.page import legacy_page_test
from telemetry.util import image_util
......
......@@ -772,7 +772,7 @@ def verify_all_tests_in_benchmark_csv(tests, benchmark_metadata):
def _verify_benchmark_owners(benchmark_metadata):
unowned_benchmarks = set()
for benchmark_name in benchmark_metadata:
if benchmark_metadata[benchmark_name].emails == None:
if benchmark_metadata[benchmark_name].emails is None:
unowned_benchmarks.add(benchmark_name)
assert not unowned_benchmarks, (
......
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from core import perf_data_generator
from core.perf_data_generator import BenchmarkMetadata
import mock
import json
class PerfDataGeneratorTest(unittest.TestCase):
......
......@@ -11,7 +11,6 @@
import calendar
import datetime
import httplib
import httplib2
import json
import os
import subprocess
......@@ -24,12 +23,14 @@ import urllib2
import uuid
import zlib
from telemetry.internal.util import external_modules
import httplib2
psutil = external_modules.ImportOptionalModule('psutil')
from telemetry.internal.util import external_modules
from core import path_util
psutil = external_modules.ImportOptionalModule('psutil')
# The paths in the results dashboard URLs for sending results.
SEND_RESULTS_PATH = '/add_point'
......
......@@ -62,9 +62,11 @@ rm win_10_test_data.json
"""
import argparse
from collections import OrderedDict
import core.path_util
import collections
import json
import sys
import core.path_util
core.path_util.AddTelemetryToPath()
......@@ -153,16 +155,16 @@ def generate_sharding_map(
story_timing_ordered_dict, num_shards)
total_time = 0
sharding_map = OrderedDict()
debug_map = OrderedDict()
min_shard_time = float('inf')
sharding_map = collections.OrderedDict()
debug_map = collections.OrderedDict()
min_shard_time = sys.maxint
min_shard_index = None
max_shard_time = 0
max_shard_index = None
num_stories = len(story_timing_ordered_dict)
for i in range(num_shards):
sharding_map[str(i)] = {'benchmarks': OrderedDict()}
debug_map[str(i)] = OrderedDict()
sharding_map[str(i)] = {'benchmarks': collections.OrderedDict()}
debug_map[str(i)] = collections.OrderedDict()
time_per_shard = 0
stories_in_shard = []
expected_total_time = expected_time_per_shard * (i + 1)
......@@ -194,7 +196,7 @@ def generate_sharding_map(
json.dump(debug_map, output_file, indent = 4, separators=(',', ': '))
sharding_map['extra_infos'] = OrderedDict([
sharding_map['extra_infos'] = collections.OrderedDict([
('num_stories', num_stories),
# Double all the time stats by 2 to account for reference build.
('predicted_min_shard_time', min_shard_time * 2),
......@@ -214,7 +216,7 @@ def _get_expected_time_per_shard(timing_data, num_shards):
def _add_benchmarks_to_shard(sharding_map, shard_index, stories_in_shard,
all_stories):
benchmarks = OrderedDict()
benchmarks = collections.OrderedDict()
for story in stories_in_shard:
(b, story) = story.split('/', 1)
if b not in benchmarks:
......@@ -222,7 +224,7 @@ def _add_benchmarks_to_shard(sharding_map, shard_index, stories_in_shard,
benchmarks[b].append(story)
# Format the benchmark's stories by indices
benchmarks_in_shard = OrderedDict()
benchmarks_in_shard = collections.OrderedDict()
for b in benchmarks:
benchmarks_in_shard[b] = {}
first_story = all_stories[b].index(benchmarks[b][0])
......@@ -251,7 +253,7 @@ def _load_timing_data_from_file(benchmarks_data, timing_data_file, repeat):
def _init_timing_dict_for_benchmarks(benchmarks_data):
timing_data = OrderedDict()
timing_data = collections.OrderedDict()
for b in benchmarks_data:
story_list = benchmarks_data[b]['stories']
for story in story_list:
......@@ -260,20 +262,20 @@ def _init_timing_dict_for_benchmarks(benchmarks_data):
def _generate_empty_sharding_map(num_shards):
sharding_map = OrderedDict()
sharding_map = collections.OrderedDict()
for i in range(0, num_shards):
sharding_map[str(i)] = {'benchmarks': OrderedDict()}
sharding_map[str(i)] = {'benchmarks': collections.OrderedDict()}
return sharding_map
def test_sharding_map(sharding_map_file, timing_data, all_stories):
results = OrderedDict()
results = collections.OrderedDict()
with open(sharding_map_file) as f:
sharding_map = json.load(f, object_pairs_hook=OrderedDict)
sharding_map = json.load(f, object_pairs_hook=collections.OrderedDict)
sharding_map.pop('extra_infos', None)
for shard in sharding_map:
results[shard] = OrderedDict()
results[shard] = collections.OrderedDict()
shard_total_time = 0
for benchmark_name in sharding_map[shard]['benchmarks']:
benchmark = sharding_map[shard]['benchmarks'][benchmark_name]
......
......@@ -2,26 +2,27 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
import collections
import copy
from core import sharding_map_generator
import json
import os
import tempfile
import unittest
from core import sharding_map_generator
from py_utils import tempfile_ext
class TestShardingMapGenerator(unittest.TestCase):
def _init_sample_timing_data(self, times):
timing_data = OrderedDict()
timing_data = collections.OrderedDict()
all_stories = {}
for i in range(len(times)):
for i, _ in enumerate(times):
all_stories['benchmark_' + str(i)] = []
story_times = times[i]
for j in range(len(story_times)):
for j, _ in enumerate(story_times):
all_stories['benchmark_' + str(i)].append('story_' + str(j))
timing_data['benchmark_' + str(i) + '/' + 'story_' + str(j)] = (
story_times[j])
......
......@@ -42,8 +42,9 @@ def _GetDashboardJson(options):
with open(options.results_file) as f:
results = json.load(f)
dashboard_json = {}
if not 'charts' in results:
if 'charts' not in results:
# These are legacy results.
# pylint: disable=redefined-variable-type
dashboard_json = results_dashboard.MakeListOfPoints(
results, options.configuration_name, stripped_test_name,
options.buildername, options.buildnumber, {},
......
......@@ -15,6 +15,7 @@ from telemetry.wpr import archive_info
from core import path_util
import fetch_benchmark_deps
def NormPaths(paths):
return sorted([os.path.normcase(p) for p in paths.splitlines()])
......
......@@ -252,6 +252,6 @@ def _FormatHumanReadable(number):
exponent += 1
if digits >= 100:
# Don't append a meaningless '.0' to an integer number.
digits = int(digits)
digits = int(digits) # pylint: disable=redefined-variable-type
# Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
return '%s%s' % (digits, metric_prefixes[exponent])
......@@ -2,12 +2,13 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from page_sets.login_helpers import google_login
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
import os
def _DeterministicPerformanceCounters():
with open(os.path.join(os.path.dirname(__file__),
......
......@@ -30,7 +30,7 @@ class RenderingStory(page.Page):
URL = NotImplemented
ABSTRACT_STORY = True
SUPPORTED_PLATFORMS = platforms.ALL_PLATFORMS
TAGS = None
TAGS =[]
PLATFORM_SPECIFIC = False
YEAR = None
......@@ -42,10 +42,9 @@ class RenderingStory(page.Page):
make_javascript_deterministic=True,
base_dir=None):
tags = []
if self.TAGS:
for t in self.TAGS:
assert t in story_tags.ALL_TAGS
tags.append(t.name)
for t in self.TAGS:
assert t in story_tags.ALL_TAGS
tags.append(t.name)
name = self.BASE_NAME + name_suffix
if self.YEAR:
name += ('_' + self.YEAR)
......
......@@ -2,9 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import py_utils
import logging
import py_utils
from page_sets.system_health import system_health_story
from page_sets.system_health import story_tags
from page_sets.system_health import platforms
......
......@@ -54,17 +54,16 @@ class SystemHealthStory(page.Page):
# flow during replay. Switch this to False when recording.
SKIP_LOGIN = True
SUPPORTED_PLATFORMS = platforms.ALL_PLATFORMS
TAGS = None
TAGS = []
PLATFORM_SPECIFIC = False
def __init__(self, story_set, take_memory_measurement,
extra_browser_args=None):
case, group, _ = self.NAME.split(':')
tags = []
if self.TAGS:
for t in self.TAGS:
assert t in story_tags.ALL_TAGS
tags.append(t.name)
for t in self.TAGS: # pylint: disable=not-an-iterable
assert t in story_tags.ALL_TAGS
tags.append(t.name)
super(SystemHealthStory, self).__init__(
shared_page_state_class=_SystemHealthSharedState,
page_set=story_set, name=self.NAME, url=self.URL, tags=tags,
......
......@@ -8,6 +8,8 @@ import json
import logging
import multiprocessing as mp
import os
from os import listdir
from os.path import isfile, join, basename
import shutil
import sys
import tempfile
......@@ -18,8 +20,6 @@ from core import oauth_api
from core import path_util
from core import upload_results_to_perf_dashboard
from core import results_merger
from os import listdir
from os.path import isfile, join, basename
path_util.AddAndroidPylibToPath()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment