Commit 94a5492a authored by Brian Sheedy's avatar Brian Sheedy Committed by Commit Bot

Fix GPU PyLint Usage

Fixes how //content/test/gpu runs PyLint during presubmit. Previously,
it relied on lint_unittest.py to be picked up when running all the other
tests in //content/test/gpu/gpu_tests, which has the downside of making
PyLint not cover other directories in //content/test/gpu. Instead, we
now use the canned check that pretty much all of Chromium uses.

Also fixes all the PyLint issues that were found as a result of this
switch.

Bug: 1133587
Change-Id: Idd7ce9f5aad3e1d02c8d7dbcbd7eeb131fdff725
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2508952
Commit-Queue: Brian Sheedy <bsheedy@chromium.org>
Commit-Queue: Yuly Novikov <ynovikov@chromium.org>
Auto-Submit: Brian Sheedy <bsheedy@chromium.org>
Reviewed-by: default avatarYuly Novikov <ynovikov@chromium.org>
Cr-Commit-Position: refs/heads/master@{#822486}
parent 3b682c00
...@@ -9,7 +9,9 @@ for more details about the presubmit API built into depot_tools. ...@@ -9,7 +9,9 @@ for more details about the presubmit API built into depot_tools.
""" """
def CommonChecks(input_api, output_api): def CommonChecks(input_api, output_api):
commands = [ results = []
gpu_tests = [
input_api.Command( input_api.Command(
name='run_content_test_gpu_unittests', name='run_content_test_gpu_unittests',
cmd=[input_api.python_executable, 'run_unittests.py', 'gpu_tests'], cmd=[input_api.python_executable, 'run_unittests.py', 'gpu_tests'],
...@@ -24,7 +26,13 @@ def CommonChecks(input_api, output_api): ...@@ -24,7 +26,13 @@ def CommonChecks(input_api, output_api):
kwargs={}, kwargs={},
message=output_api.PresubmitError) message=output_api.PresubmitError)
] ]
return input_api.RunTests(commands) results.extend(input_api.RunTests(gpu_tests))
pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api)
results.extend(input_api.RunTests(pylint_checks))
return results
def CheckChangeOnUpload(input_api, output_api): def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api) return CommonChecks(input_api, output_api)
......
...@@ -171,17 +171,16 @@ def GetTestSuiteFromVariant(variant): ...@@ -171,17 +171,16 @@ def GetTestSuiteFromVariant(variant):
""" """
suite_name = variant.get('test_suite', 'default_suite') suite_name = variant.get('test_suite', 'default_suite')
gpu = variant.get('gpu') gpu = variant.get('gpu')
os = variant.get('os') os_dimension = variant.get('os')
gpu = ConvertGpuToVendorName(gpu) gpu = ConvertGpuToVendorName(gpu)
return '%s on %s on %s' % (suite_name, gpu, os) return '%s on %s on %s' % (suite_name, gpu, os_dimension)
def GetUnexpectedPasses(builds, args): def GetUnexpectedPasses(builds):
"""Gets the unexpected test passes from the given builds. """Gets the unexpected test passes from the given builds.
Args: Args:
builds: The output of GetBuildbucketIds(). builds: The output of GetBuildbucketIds().
args: The parsed arguments from an argparse.ArgumentParser.
Returns: Returns:
A dict in the following form: A dict in the following form:
...@@ -284,7 +283,7 @@ def ParseArgs(): ...@@ -284,7 +283,7 @@ def ParseArgs():
def main(): def main():
args = ParseArgs() args = ParseArgs()
builds = GetBuildbucketIds(args) builds = GetBuildbucketIds(args)
unexpected_passes = GetUnexpectedPasses(builds, args) unexpected_passes = GetUnexpectedPasses(builds)
PrintUnexpectedPasses(unexpected_passes, args) PrintUnexpectedPasses(unexpected_passes, args)
......
...@@ -15,7 +15,7 @@ import subprocess ...@@ -15,7 +15,7 @@ import subprocess
import sys import sys
import tempfile import tempfile
import parameter_set from gold_inexact_matching import parameter_set
CHROMIUM_SRC_DIR = os.path.realpath( CHROMIUM_SRC_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')) os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
import logging import logging
import base_parameter_optimizer as base_optimizer import gold_inexact_matching.base_parameter_optimizer as base_optimizer
import parameter_set from gold_inexact_matching import parameter_set
class BinarySearchParameterOptimizer(base_optimizer.BaseParameterOptimizer): class BinarySearchParameterOptimizer(base_optimizer.BaseParameterOptimizer):
...@@ -50,8 +50,7 @@ class BinarySearchParameterOptimizer(base_optimizer.BaseParameterOptimizer): ...@@ -50,8 +50,7 @@ class BinarySearchParameterOptimizer(base_optimizer.BaseParameterOptimizer):
while (abs(known_good - known_bad)) > 1: while (abs(known_good - known_bad)) > 1:
midpoint = (known_good + known_bad) / 2 midpoint = (known_good + known_bad) / 2
parameters = self._CreateParameterSet(midpoint) parameters = self._CreateParameterSet(midpoint)
success, num_pixels, max_diff = self._RunComparisonForParameters( success, _, _ = self._RunComparisonForParameters(parameters)
parameters)
if success: if success:
logging.info('Found good parameters %s', parameters) logging.info('Found good parameters %s', parameters)
known_good = midpoint known_good = midpoint
......
...@@ -4,8 +4,9 @@ ...@@ -4,8 +4,9 @@
import logging import logging
import iterative_parameter_optimizer as iterative_optimizer import gold_inexact_matching.iterative_parameter_optimizer\
import parameter_set as iterative_optimizer
from gold_inexact_matching import parameter_set
class BruteForceParameterOptimizer( class BruteForceParameterOptimizer(
......
...@@ -7,34 +7,36 @@ import argparse ...@@ -7,34 +7,36 @@ import argparse
import logging import logging
import sys import sys
import base_parameter_optimizer as base_optimizer import gold_inexact_matching.base_parameter_optimizer as base_optimizer
import binary_search_parameter_optimizer as binary_optimizer import gold_inexact_matching.binary_search_parameter_optimizer\
import brute_force_parameter_optimizer as brute_optimizer as binary_optimizer
import local_minima_parameter_optimizer as local_optimizer import gold_inexact_matching.brute_force_parameter_optimizer as brute_optimizer
import optimizer_set import gold_inexact_matching.local_minima_parameter_optimizer\
"""Script to find suitable values for Skia Gold inexact matching. as local_optimizer
from gold_inexact_matching import optimizer_set
Inexact matching in Skia Gold has three tunable parameters:
1. The max number of differing pixels. # Script to find suitable values for Skia Gold inexact matching.
2. The max delta for any single pixel. #
3. The threshold for a Sobel filter. # Inexact matching in Skia Gold has three tunable parameters:
# 1. The max number of differing pixels.
Ideally, we use the following hierarchy of comparison approaches: # 2. The max delta for any single pixel.
1. Exact matching. # 3. The threshold for a Sobel filter.
2. Exact matching after a Sobel filter is applied. #
3. Fuzzy matching after a Sobel filter is applied. # Ideally, we use the following hierarchy of comparison approaches:
# 1. Exact matching.
However, there may be cases where only using a Sobel filter requires masking a # 2. Exact matching after a Sobel filter is applied.
very large amount of the image compared to Sobel + very conservative fuzzy # 3. Fuzzy matching after a Sobel filter is applied.
matching. #
# However, there may be cases where only using a Sobel filter requires masking a
Even if such cases are not hit, the process of determining good values for the # very large amount of the image compared to Sobel + very conservative fuzzy
parameters is quite tedious since it requires downloading images from Gold and # matching.
manually running multiple calls to `goldctl match`. #
# Even if such cases are not hit, the process of determining good values for the
This script attempts to remedy both issues by handling all of the trial and # parameters is quite tedious since it requires downloading images from Gold and
error and suggesting potential parameter values for the user to choose from. # manually running multiple calls to `goldctl match`.
""" #
# This script attempts to remedy both issues by handling all of the trial and
# error and suggesting potential parameter values for the user to choose from.
def CreateArgumentParser(): def CreateArgumentParser():
......
...@@ -2,9 +2,12 @@ ...@@ -2,9 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import base_parameter_optimizer as base_optimizer import gold_inexact_matching.base_parameter_optimizer as base_optimizer
# This is an abstract class itself, so it's fine that it doesn't implement
# all of BaseParameterOptimizer's abstract methods.
# pylint: disable=abstract-method
class IterativeParameterOptimizer(base_optimizer.BaseParameterOptimizer): class IterativeParameterOptimizer(base_optimizer.BaseParameterOptimizer):
"""Abstract ParameterOptimizer class for running an iterative algorithm.""" """Abstract ParameterOptimizer class for running an iterative algorithm."""
...@@ -49,3 +52,4 @@ class IterativeParameterOptimizer(base_optimizer.BaseParameterOptimizer): ...@@ -49,3 +52,4 @@ class IterativeParameterOptimizer(base_optimizer.BaseParameterOptimizer):
assert self._args.max_diff_step >= self.MIN_MAX_DIFF_STEP assert self._args.max_diff_step >= self.MIN_MAX_DIFF_STEP
assert self._args.delta_threshold_step >= self.MIN_DELTA_THRESHOLD_STEP assert self._args.delta_threshold_step >= self.MIN_DELTA_THRESHOLD_STEP
#pylint: enable=abstract-method
...@@ -7,8 +7,9 @@ import itertools ...@@ -7,8 +7,9 @@ import itertools
import logging import logging
import sys import sys
import iterative_parameter_optimizer as iterative_optimizer import gold_inexact_matching.iterative_parameter_optimizer\
import parameter_set as iterative_optimizer
from gold_inexact_matching import parameter_set
class LocalMinimaParameterOptimizer( class LocalMinimaParameterOptimizer(
......
...@@ -138,11 +138,11 @@ class InfoCollectionTest(gpu_integration_test.GpuIntegrationTest): ...@@ -138,11 +138,11 @@ class InfoCollectionTest(gpu_integration_test.GpuIntegrationTest):
@staticmethod @staticmethod
def _ValueToStr(value): def _ValueToStr(value):
if type(value) is str: if isinstance(value, str):
return value return value
if type(value) is unicode: if isinstance(value, unicode):
return str(value) return str(value)
if type(value) is bool: if isinstance(value, bool):
return 'supported' if value else 'unsupported' return 'supported' if value else 'unsupported'
assert False assert False
......
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import os
from gpu_tests import path_util
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'third_party',
'logilab')
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'third_party',
'logilab', 'logilab')
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'third_party',
'pylint')
try:
from pylint import lint
except ImportError:
lint = None
_RC_FILE = os.path.join(path_util.GetGpuTestDir(), 'pylintrc')
def LintCheckPassed(directory):
args = [directory, '--rcfile=%s' % _RC_FILE]
try:
assert lint, 'pylint module cannot be found'
lint.Run(args)
assert False, (
'This should not be reached as lint.Run always raise SystemExit')
except SystemExit as err:
if err.code == 0:
return True
return False
class LintTest(unittest.TestCase):
def testPassingPylintCheckForGpuTestsDir(self):
self.assertTrue(LintCheckPassed(os.path.abspath(os.path.dirname(__file__))))
...@@ -396,7 +396,7 @@ class PowerMeasurementIntegrationTest(gpu_integration_test.GpuIntegrationTest): ...@@ -396,7 +396,7 @@ class PowerMeasurementIntegrationTest(gpu_integration_test.GpuIntegrationTest):
@staticmethod @staticmethod
def _AppendResults(results_sum, results): def _AppendResults(results_sum, results):
assert type(results_sum) is dict and type(results) is dict assert isinstance(results_sum, dict) and isinstance(results, dict)
assert results assert results
first_append = not results_sum first_append = not results_sum
for key, value in results.items(): for key, value in results.items():
...@@ -404,7 +404,7 @@ class PowerMeasurementIntegrationTest(gpu_integration_test.GpuIntegrationTest): ...@@ -404,7 +404,7 @@ class PowerMeasurementIntegrationTest(gpu_integration_test.GpuIntegrationTest):
results_sum[key] = [value] results_sum[key] = [value]
else: else:
assert key in results_sum assert key in results_sum
assert type(results_sum[key]) is list assert isinstance(results_sum[key], list)
results_sum[key].append(value) results_sum[key].append(value)
return results_sum return results_sum
......
...@@ -10,10 +10,8 @@ conformance_relcomps = ('third_party', 'webgl', 'src', 'sdk', 'tests') ...@@ -10,10 +10,8 @@ conformance_relcomps = ('third_party', 'webgl', 'src', 'sdk', 'tests')
extensions_relcomps = ('content', 'test', 'data', 'gpu') extensions_relcomps = ('content', 'test', 'data', 'gpu')
# pylint: disable=star-args
conformance_relpath = os.path.join(*conformance_relcomps) conformance_relpath = os.path.join(*conformance_relcomps)
extensions_relpath = os.path.join(*extensions_relcomps) extensions_relpath = os.path.join(*extensions_relcomps)
# pylint: enable=star-args
conformance_path = os.path.join(path_util.GetChromiumSrcDir(), conformance_path = os.path.join(path_util.GetChromiumSrcDir(),
conformance_relpath) conformance_relpath)
......
...@@ -52,6 +52,7 @@ import time ...@@ -52,6 +52,7 @@ import time
try: try:
from selenium import webdriver from selenium import webdriver
from selenium.common import exceptions
except ImportError as error: except ImportError as error:
logging.error( logging.error(
"This script needs selenium and appropriate web drivers to be installed.") "This script needs selenium and appropriate web drivers to be installed.")
...@@ -169,7 +170,7 @@ def CreateWebDriver(browser, user_data_dir, url, fullscreen, ...@@ -169,7 +170,7 @@ def CreateWebDriver(browser, user_data_dir, url, fullscreen,
actions.move_to_element(video_el) actions.move_to_element(video_el)
actions.double_click(video_el) actions.double_click(video_el)
actions.perform() actions.perform()
except: except exceptions.InvalidSelectorException:
logging.warning('Could not locate video element to make fullscreen') logging.warning('Could not locate video element to make fullscreen')
return driver return driver
...@@ -282,9 +283,9 @@ def main(argv): ...@@ -282,9 +283,9 @@ def main(argv):
logging.error("Can't locate file at %s", logging.error("Can't locate file at %s",
options.extra_browser_args_filename) options.extra_browser_args_filename)
else: else:
with open(options.extra_browser_args_filename, 'r') as file: with open(options.extra_browser_args_filename, 'r') as f:
extra_browser_args.extend(file.read().split()) extra_browser_args.extend(f.read().split())
file.close() f.close()
for run in range(1, options.repeat + 1): for run in range(1, options.repeat + 1):
logfile = ipg_utils.GenerateIPGLogFilename(log_prefix, options.logdir, run, logfile = ipg_utils.GenerateIPGLogFilename(log_prefix, options.logdir, run,
......
...@@ -88,17 +88,20 @@ def DetermineResultsFromMultipleRuns(measurements, repeat_strategy): ...@@ -88,17 +88,20 @@ def DetermineResultsFromMultipleRuns(measurements, repeat_strategy):
def ProcessJsonData(jsons, def ProcessJsonData(jsons,
measurement_names=_MEASUREMENTS, measurement_names=None,
per_bot=False, per_bot=False,
repeat_strategy=RepeatStrategy.COUNT_MINIMUM, repeat_strategy=RepeatStrategy.COUNT_MINIMUM,
bot_allowlist=[], bot_allowlist=None,
bot_blocklist=[]): bot_blocklist=None):
measurement_names = measurement_names or _MEASUREMENTS
bot_allowlist = bot_allowlist or []
bot_blocklist = bot_blocklist or []
min_build = None min_build = None
max_build = None max_build = None
results = {} results = {}
bots = [] bots = []
for json in jsons: for j in jsons:
builds = json.get('builds', []) builds = j.get('builds', [])
for build in builds: for build in builds:
build_number = build.get('number', -1) build_number = build.get('number', -1)
if build_number > 0: if build_number > 0:
...@@ -192,8 +195,8 @@ def MarkExperiment(description): ...@@ -192,8 +195,8 @@ def MarkExperiment(description):
def GetBotBuilds(jsons, bot_name): def GetBotBuilds(jsons, bot_name):
build_numbers = [] build_numbers = []
for json in jsons: for j in jsons:
builds = json.get('builds', []) builds = j.get('builds', [])
for build in builds: for build in builds:
build_number = build.get('number', -1) build_number = build.get('number', -1)
if build_number > 0: if build_number > 0:
...@@ -215,8 +218,8 @@ def GetOutliers(data, variation_threshold): ...@@ -215,8 +218,8 @@ def GetOutliers(data, variation_threshold):
def FindBuild(jsons, selected_bots, test_name, result): def FindBuild(jsons, selected_bots, test_name, result):
for json in jsons: for j in jsons:
builds = json.get('builds', []) builds = j.get('builds', [])
for build in builds: for build in builds:
build_number = build.get('number', -1) build_number = build.get('number', -1)
if build_number < 0: if build_number < 0:
...@@ -299,8 +302,9 @@ def RunExperiment_BadBots(jsons, ...@@ -299,8 +302,9 @@ def RunExperiment_BadBots(jsons,
def RunExperiment_GoodBots(jsons, def RunExperiment_GoodBots(jsons,
bad_bots=[], bad_bots=None,
repeat_strategy=RepeatStrategy.COUNT_MINIMUM): repeat_strategy=RepeatStrategy.COUNT_MINIMUM):
bad_bots = bad_bots or []
MIN_RUNS_PER_BOT = 8 MIN_RUNS_PER_BOT = 8
STDEV_GOOD_BOT_THRESHOLD = 0.2 STDEV_GOOD_BOT_THRESHOLD = 0.2
GOOD_BOT_RANGE_PERC = 0.08 GOOD_BOT_RANGE_PERC = 0.08
......
...@@ -73,8 +73,8 @@ def main(): ...@@ -73,8 +73,8 @@ def main():
lambda package_name: os.path.join( lambda package_name: os.path.join(
web_engine_dir, package_name, 'ids.txt'), web_engine_dir, package_name, 'ids.txt'),
package_names) package_names)
symbolizer = RunSymbolizer(listener.stdout, open(args.system_log_file, RunSymbolizer(listener.stdout, open(args.system_log_file, 'w'),
'w'), build_ids_paths) build_ids_paths)
# Keep the Amber repository live while the test runs. # Keep the Amber repository live while the test runs.
with target.GetAmberRepo(): with target.GetAmberRepo():
......
...@@ -38,12 +38,14 @@ import json ...@@ -38,12 +38,14 @@ import json
import re import re
import subprocess import subprocess
# pylint: disable=line-too-long
# Schemas: # Schemas:
# - go/buildbucket-bq and go/buildbucket-proto/build.proto # - go/buildbucket-bq and go/buildbucket-proto/build.proto
# - go/luci/cq/bq and # - go/luci/cq/bq and
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/go.chromium.org/luci/cv/api/bigquery/v1/attempt.proto # https://source.chromium.org/chromium/infra/infra/+/master:go/src/go.chromium.org/luci/cv/api/bigquery/v1/attempt.proto
# #
# Original author: maruel@ # Original author: maruel@
# pylint: enable=line-too-long
QUERY_TEMPLATE = """\ QUERY_TEMPLATE = """\
WITH cq_builds AS ( WITH cq_builds AS (
SELECT SELECT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment