Commit 0b068f91 authored by Raul Tambre's avatar Raul Tambre Committed by Commit Bot

tools: Use Python 3 style print statements [7/9]

Initial conversion performed using '2to3 -f print .'.
Imports added and duplicate parentheses removed manually.
Manually converted files, comments and inline code that 2to3 missed.
Afterwards ran "git cl format --python" and cherry-picked the formatting changes.

There are no intended behavioural changes.

Bug: 941669
Change-Id: I3944165366e10cfdc3017b540f8563ee26751682
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1818479Reviewed-by: default avatarNico Weber <thakis@chromium.org>
Commit-Queue: Nico Weber <thakis@chromium.org>
Auto-Submit: Raul Tambre <raul@tambre.ee>
Cr-Commit-Position: refs/heads/master@{#699766}
parent c979302d
......@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import os
import collections
......@@ -298,18 +300,18 @@ class _BlinkPerfMeasurement(legacy_page_test.LegacyPageTest):
def PrintAndCollectTraceEventMetrics(self, trace_cpu_time_metrics, results):
unit = 'ms'
print
print()
for trace_event_name, cpu_times in trace_cpu_time_metrics.iteritems():
print 'CPU times of trace event "%s":' % trace_event_name
print('CPU times of trace event "%s":' % trace_event_name)
cpu_times_string = ', '.join(['{0:.10f}'.format(t) for t in cpu_times])
print 'values %s %s' % (cpu_times_string, unit)
print('values %s %s' % (cpu_times_string, unit))
avg = 0.0
if cpu_times:
avg = sum(cpu_times)/len(cpu_times)
print 'avg', '{0:.10f}'.format(avg), unit
print('avg', '{0:.10f}'.format(avg), unit)
results.AddMeasurement(trace_event_name, unit, cpu_times)
print
print '\n'
print()
print('\n')
def ValidateAndMeasurePage(self, page, tab, results):
trace_cpu_time_metrics = {}
......@@ -329,7 +331,7 @@ class _BlinkPerfMeasurement(legacy_page_test.LegacyPageTest):
for line in log.splitlines():
if line.startswith("FATAL: "):
print line
print(line)
continue
if not line.startswith('values '):
continue
......@@ -344,7 +346,7 @@ class _BlinkPerfMeasurement(legacy_page_test.LegacyPageTest):
break
print log
print(log)
self.PrintAndCollectTraceEventMetrics(trace_cpu_time_metrics, results)
......
......@@ -4,6 +4,8 @@
"""This tool provides a command line interface for the flakiness dashboard."""
from __future__ import print_function
import argparse
from cli_tools.flakiness_cli import analysis
......@@ -54,4 +56,4 @@ def Main():
df = analysis.pandas.concat(dfs)
df = df.sort_values('flakiness', ascending=False)
print df
print(df)
......@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import csv
import json
import logging
......@@ -25,13 +27,13 @@ def StartJobFromConfig(config_path):
raise ValueError('Invalid job config')
response = pinpoint_service.NewJob(**config)
print 'Started:', response['jobUrl']
print('Started:', response['jobUrl'])
def CheckJobStatus(job_ids):
for job_id in job_ids:
job = pinpoint_service.Job(job_id)
print '%s: %s' % (job_id, job['status'].lower())
print('%s: %s' % (job_id, job['status'].lower()))
def DownloadJobResultsAsCsv(job_ids, only_differences, output_file):
......@@ -45,10 +47,10 @@ def DownloadJobResultsAsCsv(job_ids, only_differences, output_file):
os_path = _OsPathFromJob(job)
results_file = os_path.join(
job['arguments']['benchmark'], 'perf_results.json')
print 'Fetching results for %s job %s:' % (job['status'].lower(), job_id)
print('Fetching results for %s job %s:' % (job['status'].lower(), job_id))
for change_id, isolate_hash in job_results.IterTestOutputIsolates(
job, only_differences):
print '- isolate: %s ...' % isolate_hash
print('- isolate: %s ...' % isolate_hash)
try:
histograms = isolate_service.RetrieveFile(isolate_hash, results_file)
except KeyError:
......@@ -57,7 +59,7 @@ def DownloadJobResultsAsCsv(job_ids, only_differences, output_file):
for row in histograms_df.IterRows(json.loads(histograms)):
writer.writerow((job_id, change_id, isolate_hash) + row)
num_rows += 1
print 'Wrote data from %d histograms in %s.' % (num_rows, output_file)
print('Wrote data from %d histograms in %s.' % (num_rows, output_file))
def _OsPathFromJob(job):
......
......@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import logging
try:
......@@ -47,25 +49,25 @@ def FetchAlertsData(args):
pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts)
num_alerts += len(alerts)
bug_ids.update(alerts['bug_id'].unique())
print '%d alerts found!' % num_alerts
print('%d alerts found!' % num_alerts)
# Get set of bugs associated with those alerts.
bug_ids.discard(0) # A bug_id of 0 means untriaged.
print '%d bugs found!' % len(bug_ids)
print('%d bugs found!' % len(bug_ids))
# Filter out bugs already in cache.
if args.use_cache:
known_bugs = set(
b for b in bug_ids if tables.bugs.Get(con, b) is not None)
if known_bugs:
print '(skipping %d bugs already in the database)' % len(known_bugs)
print('(skipping %d bugs already in the database)' % len(known_bugs))
bug_ids.difference_update(known_bugs)
# Use worker pool to fetch bug data.
total_seconds = worker_pool.Run(
'Fetching data of %d bugs: ' % len(bug_ids),
_FetchBugsWorker, args, bug_ids)
print '[%.1f bugs per second]' % (len(bug_ids) / total_seconds)
print('[%.1f bugs per second]' % (len(bug_ids) / total_seconds))
def _IterStaleTestPaths(con, test_paths):
......@@ -131,24 +133,24 @@ def FetchTimeseriesData(args):
if args.filters:
test_paths = filter(_MatchesAllFilters, test_paths)
num_found = len(test_paths)
print '%d test paths found!' % num_found
print('%d test paths found!' % num_found)
# Filter out test_paths already in cache.
if args.use_cache:
test_paths = list(_IterStaleTestPaths(con, test_paths))
num_skipped = num_found - len(test_paths)
if num_skipped:
print '(skipping %d test paths already in the database)' % num_skipped
print('(skipping %d test paths already in the database)' % num_skipped)
# Use worker pool to fetch test path data.
total_seconds = worker_pool.Run(
'Fetching data of %d timeseries: ' % len(test_paths),
_FetchTimeseriesWorker, args, test_paths)
print '[%.1f test paths per second]' % (len(test_paths) / total_seconds)
print('[%.1f test paths per second]' % (len(test_paths) / total_seconds))
if args.output_csv is not None:
print
print 'Post-processing data for study ...'
print()
print('Post-processing data for study ...')
dfs = []
with tables.DbSession(args.database_file) as con:
for test_path in test_paths:
......@@ -157,4 +159,4 @@ def FetchTimeseriesData(args):
df = studies.PostProcess(pandas.concat(dfs, ignore_index=True))
with cli_utils.OpenWrite(args.output_csv) as f:
df.to_csv(f, index=False)
print 'Wrote timeseries data to:', args.output_csv
print('Wrote timeseries data to:', args.output_csv)
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import os
from benchmarks import blink_perf
......@@ -27,6 +30,6 @@ class BlinkPerfAll(blink_perf._BlinkPerfBenchmark):
path = os.path.abspath(options.test_path)
else:
path = os.path.join(blink_perf.BLINK_PERF_BASE_DIR, options.test_path)
print
print 'Running all tests in %s' % path
print()
print('Running all tests in %s' % path)
return blink_perf.CreateStorySetFromPath(path, blink_perf.SKIPPED_FILE)
......@@ -3,8 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import division
from __future__ import print_function
import argparse
import json
......@@ -46,7 +46,7 @@ def main():
trace_size_in_mib = os.path.getsize(args.local_trace_path) / (2 ** 20)
# Bails out on trace that are too big. See crbug.com/812631 for more details.
if trace_size_in_mib > 400:
print 'Trace size is too big: %s MiB' % trace_size_in_mib
print('Trace size is too big: %s MiB' % trace_size_in_mib)
return 1
logging.warning('Starting to compute metrics on trace')
......@@ -59,8 +59,8 @@ def main():
time.time() - start))
for f in mre_result.failures:
print 'Running metric failed:'
print f.stack
print('Running metric failed:')
print(f.stack)
return 1
with tempfile.NamedTemporaryFile() as temp:
......@@ -70,12 +70,12 @@ def main():
result = histograms_to_csv.HistogramsToCsv(temp.name)
if result.returncode != 0:
print 'histograms_to_csv.HistogramsToCsv returned %d' % result.returncode
print('histograms_to_csv.HistogramsToCsv returned %d' % result.returncode)
return result.returncode
else:
with open(args.output_csv, 'w') as f:
f.write(result.stdout.rstrip())
print 'Output CSV created in file://' + args.output_csv
print('Output CSV created in file://' + args.output_csv)
if __name__ == '__main__':
......
......@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import logging
import time
......@@ -52,7 +54,7 @@ class CrosMultiTabStory(page_module.Page):
except exceptions.DevtoolsTargetCrashException:
logging.info('Navigate: devtools context lost')
if i % 10 == 0:
print 'opening tab:', i
print('opening tab:', i)
# Waiting for every tabs to be stable.
for i, url in enumerate(url_list):
......@@ -82,7 +84,7 @@ class CrosMultiTabStory(page_module.Page):
time.sleep(self._pause_after_switch)
if i % 10 == 0:
print 'switching tab:', i
print('switching tab:', i)
class CrosMultiTabTypical24Story(CrosMultiTabStory):
......
......@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import fnmatch
import imp
import logging
......@@ -217,7 +219,7 @@ class FindDependenciesCommand(command_line.OptparseCommand):
dependencies = FindDependencies(target_paths, args)
if args.zip:
ZipDependencies(target_paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
print('Zip archive written to %s.' % args.zip)
else:
print '\n'.join(sorted(dependencies))
print('\n'.join(sorted(dependencies)))
return 0
......@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import logging
import os
import time
......@@ -83,7 +85,7 @@ class BrowserMinidumpTest(tab_test_case.TabTestCase):
self.assertTrue(crash_function in sections[3])
# If we actually have a valid Crashpad stack, make sure it contains the
# crash function as well.
print sections[4][:80]
print(sections[4][:80])
if '**EMPTY**' not in sections[4]:
self.assertTrue(crash_function in sections[4])
......
......@@ -12,6 +12,8 @@ logic to inflate those into the full (unwieldy) configurations in
//testing/buildbot that are consumed by the chromium recipe code.
"""
from __future__ import print_function
import argparse
import collections
import csv
......@@ -821,10 +823,10 @@ def update_all_tests(builders_dict, file_path):
def merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
class BenchmarkMetadata(object):
......@@ -981,7 +983,7 @@ def is_perf_benchmarks_scheduling_valid(
'//tools/perf/core/perf_data_generator.py.' % test_name)
for message in error_messages:
print >> outstream, '*', textwrap.fill(message, 70), '\n'
print('*', textwrap.fill(message, 70), '\n', file=outstream)
return not error_messages
......@@ -1289,11 +1291,11 @@ def main(args):
and validate_docs(labs_docs_file)
and is_perf_benchmarks_scheduling_valid(
perf_waterfall_file, outstream=sys.stderr)):
print 'All the perf config files are up-to-date. \\o/'
print('All the perf config files are up-to-date. \\o/')
return 0
else:
print ('Not all perf config files are up-to-date. Please run %s '
'to update them.') % sys.argv[0]
print('Not all perf config files are up-to-date. Please run %s '
'to update them.' % sys.argv[0])
return 1
else:
update_all_tests(FYI_BUILDERS, fyi_waterfall_file)
......
......@@ -10,6 +10,8 @@
# That file is now deprecated and this one is
# the new source of truth.
from __future__ import print_function
import calendar
import datetime
import httplib
......@@ -92,8 +94,8 @@ def SendResults(data, data_label, url, send_as_histograms=False,
for i in xrange(1, num_retries + 1):
try:
print 'Sending %s result of %s to dashboard (attempt %i out of %i).' % (
data_type, data_label, i, num_retries)
print('Sending %s result of %s to dashboard (attempt %i out of %i).' %
(data_type, data_label, i, num_retries))
if send_as_histograms:
_SendHistogramJson(url, dashboard_data_str, token_generator_callback)
else:
......@@ -117,9 +119,9 @@ def SendResults(data, data_label, url, send_as_histograms=False,
break
for err in errors:
print err
print(err)
print 'Time spent sending results to %s: %s' % (url, time.time() - start)
print('Time spent sending results to %s: %s' % (url, time.time() - start))
return all_data_uploaded
......@@ -267,8 +269,8 @@ def MakeDashboardJsonV1(chart_json, revision_dict, test_name, bot, buildername,
A dictionary in the format accepted by the perf dashboard.
"""
if not chart_json:
print 'Error: No json output from telemetry.'
print '@@@STEP_FAILURE@@@'
print('Error: No json output from telemetry.')
print('@@@STEP_FAILURE@@@')
point_id, versions = _RevisionNumberColumns(revision_dict, prefix='')
......
......@@ -8,6 +8,8 @@
# This file is responsbile for merging JSON test results in both the simplified
# JSON format and the Chromium JSON test results format version 3.
from __future__ import print_function
import copy
import json
import sys
......@@ -278,7 +280,7 @@ def main(files):
for f in files[1:]:
sys.stderr.write('Merging %s\n' % f)
result = merge_test_results([result, json.load(open(f))])
print json.dumps(result)
print(json.dumps(result))
return 0
......
......@@ -8,6 +8,8 @@
# with sections copied from:
# //build/scripts/slave/slave_utils.py
from __future__ import print_function
import json
import optparse
import os
......@@ -41,7 +43,7 @@ def _GetDashboardJson(options):
reference_build = 'reference' in options.name
stripped_test_name = options.name.replace('.reference', '')
results = {}
print 'Opening results file %s' % options.results_file
print('Opening results file %s' % options.results_file)
with open(options.results_file) as f:
results = json.load(f)
dashboard_json = {}
......@@ -93,8 +95,8 @@ def _GetDashboardHistogramData(options):
output_dir=output_dir,
max_bytes=max_bytes)
end_time = time.time()
print 'Duration of adding diagnostics for %s: %d seconds' % (
stripped_test_name, end_time - begin_time)
print('Duration of adding diagnostics for %s: %d seconds' %
(stripped_test_name, end_time - begin_time))
# Read all batch files from output_dir.
dashboard_jsons = []
......@@ -140,7 +142,7 @@ def main(args):
parser.error('configuration_name and results_url are required.')
if not options.perf_dashboard_machine_group:
print 'Error: Invalid perf dashboard machine group'
print('Error: Invalid perf dashboard machine group')
return 1
if not options.send_as_histograms:
......@@ -180,7 +182,7 @@ def main(args):
return 1
else:
# The upload didn't fail since there was no data to upload.
print 'Warning: No perf dashboard JSON was produced.'
print('Warning: No perf dashboard JSON was produced.')
return 0
if __name__ == '__main__':
......
......@@ -3,6 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import re
import sys
......@@ -25,7 +27,7 @@ class _Color(object):
def _PrintWithColor(text, *colors):
print ''.join(colors) + text + _Color.ENDC
print(''.join(colors) + text + _Color.ENDC)
def _ExtractBuildRevisionRange(build_page_content, build_url, test_name):
......@@ -47,10 +49,10 @@ def _ShouldSkipBuild(build_page_content, build_url):
]
for failure in _GLOBAL_FAILURE:
if failure in build_page_content:
print
print(
_PrintWithColor(
"Warning: %s has '%s'."
' Skipping this build' % (build_url, failure), _Color.WARNING)
' Skipping this build' % (build_url, failure), _Color.WARNING))
return True
return False
......@@ -65,7 +67,7 @@ def FindFirstFailureRange(build_url, test_name):
while True:
current_build_url = initial_build_url + str(build_number)
build_number -= 1
print '\rProcess %s' % current_build_url,
print('\rProcess %s' % current_build_url,)
sys.stdout.flush()
build_page_content = urllib2.urlopen(current_build_url).read()
if _ShouldSkipBuild(build_page_content, build_url):
......@@ -89,8 +91,8 @@ def Main(args):
options = parser.parse_args(args)
first_failure_revisions, first_failed_build = FindFirstFailureRange(
options.build_url, options.test_name)
print
print
print()
print()
_PrintWithColor(
'First failure range: %s - %s CLs' % (
(min(first_failure_revisions), max(first_failure_revisions)),
......
......@@ -3,6 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import collections
import json
......@@ -145,7 +147,7 @@ def _LoadTimingData(args):
data = retrieve_story_timing.FetchAverageStortyTimingData(
configurations=[builder_name], num_last_days=5)
_DumpJson(data, timing_file_path)
print 'Finish retrieve story timing data for %s' % repr(builder_name)
print('Finish retrieve story timing data for %s' % repr(builder_name))
def _GenerateShardMap(
......@@ -176,10 +178,10 @@ def _PromptWarning():
'perf-sheriffs@chromium.org and put a warning about expected '
'false regressions in your CL '
'description')
print textwrap.fill(message, 70), '\n'
print(textwrap.fill(message, 70), '\n')
answer = raw_input("Enter 'y' to continue: ")
if answer != 'y':
print 'Abort updating shard maps for benchmarks on perf waterfall'
print('Abort updating shard maps for benchmarks on perf waterfall')
sys.exit(0)
......@@ -197,7 +199,7 @@ def _UpdateShardsForBuilders(args):
_PromptWarning()
if not args.use_old_timing_data:
print 'Update shards timing data. May take a while...'
print('Update shards timing data. May take a while...')
load_timing_args = []
for b in builders:
load_timing_args.append((b.name, b.timing_file_path))
......@@ -207,7 +209,7 @@ def _UpdateShardsForBuilders(args):
for b in builders:
_GenerateShardMap(
b, b.num_shards, b.shards_map_file_path, args.debug, benchmark=None)
print 'Updated sharding map for %s' % repr(b.name)
print('Updated sharding map for %s' % repr(b.name))
def _CreateShardMapForBenchmark(args):
......@@ -250,7 +252,7 @@ def _DescheduleBenchmark(args):
del benchmarks[benchmark]
os.remove(b.shards_map_file_path)
_DumpJson(shards_map, b.shards_map_file_path)
print 'done.'
print('done.')
def _ParseBenchmarks(shard_map_path):
......@@ -313,7 +315,7 @@ def _ValidateShardMaps(args):
'UNSCHEDULED_{benchmark}'.format(benchmark=benchmark))
for error in errors:
print >> sys.stderr, '*', textwrap.fill(error, 70), '\n'
print('*', textwrap.fill(error, 70), '\n', file=sys.stderr)
if errors:
return 1
return 0
......
......@@ -2,6 +2,9 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import optparse
import sys
......@@ -31,7 +34,7 @@ def main(args):
benchmarks = benchmark_finders.GetOfficialBenchmarks()
for b in benchmarks:
print '{:<60}'.format(b.Name())
print('{:<60}'.format(b.Name()))
if __name__ == '__main__':
......
......@@ -3,6 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import collections
import json
......@@ -195,7 +197,7 @@ def _handle_perf_json_test_results(
# Output is null meaning the test didn't produce any results.
# Want to output an error and continue loading the rest of the
# test results.
print 'No results produced for %s, skipping upload' % directory
print('No results produced for %s, skipping upload' % directory)
continue
if json_results.get('version') == 3:
# Non-telemetry tests don't have written json results but
......@@ -216,7 +218,8 @@ def _handle_perf_json_test_results(
if not enabled:
# We don't upload disabled benchmarks or tests that are run
# as a smoke test
print 'Benchmark %s ran no tests on at least one shard' % benchmark_name
print(
'Benchmark %s ran no tests on at least one shard' % benchmark_name)
continue
benchmark_enabled_map[benchmark_name] = True
......@@ -432,8 +435,8 @@ def _upload_individual(
results_filename = os.path.join(directories[0], 'perf_results.json')
results_size_in_mib = os.path.getsize(results_filename) / (2 ** 20)
print 'Uploading perf results from %s benchmark (size %s Mib)' % (
benchmark_name, results_size_in_mib)
print('Uploading perf results from %s benchmark (size %s Mib)' %
(benchmark_name, results_size_in_mib))
with open(output_json_file, 'w') as oj:
upload_return_code = _upload_perf_results(
results_filename,
......@@ -566,14 +569,14 @@ def _write_perf_data_to_logfile(benchmark_name, output_file,
json.dump(results, output_json_file,
indent=4, separators=(',', ': '))
except ValueError:
print ('Error parsing perf results JSON for benchmark %s' %
benchmark_name)
print('Error parsing perf results JSON for benchmark %s' %
benchmark_name)
output_json_file.close()
viewer_url = output_json_file.get_viewer_url()
else:
print ("Perf results JSON file doesn't exist for benchmark %s" %
benchmark_name)
print("Perf results JSON file doesn't exist for benchmark %s" %
benchmark_name)
base_benchmark_name = benchmark_name.replace('.reference', '')
......@@ -599,11 +602,12 @@ def _write_perf_data_to_logfile(benchmark_name, output_file,
def print_duration(step, start, end):
print 'Duration of %s: %d seconds' % (step, end-start)
print('Duration of %s: %d seconds' % (step, end - start))
def main():
""" See collect_task.collect_task for more on the merge script API. """
print sys.argv
print(sys.argv)
parser = argparse.ArgumentParser()
# configuration-name (previously perf-id) is the name of bot the tests run on
# For example, buildbot-test is the name of the android-go-perf bot
......
......@@ -15,6 +15,8 @@ Example:
tools/resources/find_unused_resouces.py chrome/browser/browser_resources.grd
"""
from __future__ import print_function
__author__ = 'jamescook@chromium.org (James Cook)'
......@@ -87,7 +89,7 @@ def GetUnusedResources(grd_filepath):
unused_resources = []
grd_file = open(grd_filepath, 'r')
grd_data = grd_file.read()
print 'Checking:'
print('Checking:')
# Match the resource id and file path out of substrings like:
# ...name="IDR_FOO_123" file="common/foo.png"...
# by matching between the quotation marks.
......@@ -113,7 +115,7 @@ def GetUnusedResources(grd_filepath):
searched.add(key)
# Print progress as we go along.
print resource_id
print(resource_id)
# Ensure the resource isn't used anywhere by checking both for the resource
# id (which should appear in C++ code) and the raw filename (in case the
......@@ -124,7 +126,7 @@ def GetUnusedResources(grd_filepath):
# other matching files, it is unused.
if len(matching_files) == 1:
# Give the user some happy news.
print 'Unused!'
print('Unused!')
unused_resources.append([resource_id, filepath])
return unused_resources
......@@ -157,47 +159,47 @@ def GetScaleDirectories(resources_path):
def main():
# The script requires exactly one parameter, the .grd file path.
if len(sys.argv) != 2:
print 'Usage: tools/resources/find_unused_resources.py <path/to/grd>'
print('Usage: tools/resources/find_unused_resources.py <path/to/grd>')
sys.exit(1)
grd_filepath = sys.argv[1]
# Try to ensure we are in a source checkout.
current_dir = os.getcwd()
if os.path.basename(current_dir) != 'src':
print 'Script must be run in your "src" directory.'
print('Script must be run in your "src" directory.')
sys.exit(1)
# We require a git checkout to use git grep.
if not os.path.exists(current_dir + '/.git'):
print 'You must use a git checkout for this script to run.'
print current_dir + '/.git', 'not found.'
print('You must use a git checkout for this script to run.')
print(current_dir + '/.git', 'not found.')
sys.exit(1)
# Look up the scale-factor directories.
resources_path = os.path.dirname(grd_filepath)
scale_directories = GetScaleDirectories(resources_path)
if not scale_directories:
print 'No scale directories (like "default_100_percent") found.'
print('No scale directories (like "default_100_percent") found.')
sys.exit(1)
# |unused_resources| stores pairs of [resource_id, filepath] for resource ids
# that are not referenced in the code.
unused_resources = GetUnusedResources(grd_filepath)
if not unused_resources:
print 'All resources are used.'
print('All resources are used.')
sys.exit(0)
# Dump our output for the user.
print
print 'Unused resource ids:'
print()
print('Unused resource ids:')
for resource_id, filepath in unused_resources:
print resource_id
print(resource_id)
# Print a list of 'git rm' command lines to remove unused assets.
print
print 'Unused files:'
print()
print('Unused files:')
for resource_id, filepath in unused_resources:
for directory in scale_directories:
print 'git rm ' + os.path.join(directory, filepath)
print('git rm ' + os.path.join(directory, filepath))
if __name__ == '__main__':
......
......@@ -3,6 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import os
import re
......@@ -78,16 +79,16 @@ def Main():
resource_id = int(match.group('resource_id'))
resource_name = match.group('resource_name')
if resource_id in resource_id_to_name_file_map:
print 'Duplicate:', resource_id
print (resource_name, f)
print resource_id_to_name_file_map[resource_id]
print('Duplicate:', resource_id)
print(resource_name, f)
print(resource_id_to_name_file_map[resource_id])
raise
resource_id_to_name_file_map[resource_id] = (resource_name, f)
unused_resources = GetResourceIdsFromRepackMessage(sys.stdin)
for resource_id in unused_resources:
if resource_id not in resource_id_to_name_file_map:
print 'WARNING: Unknown resource id', resource_id
print('WARNING: Unknown resource id', resource_id)
continue
(resource_name, filename) = resource_id_to_name_file_map[resource_id]
sys.stdout.write('%d: %s in %s\n' % (resource_id, resource_name, filename))
......
......@@ -10,6 +10,8 @@ Example:
tools/resources/list_unused_grit_header.py ui/strings/ui_strings.grd chrome ui
"""
from __future__ import print_function
import os
import sys
import xml.etree.ElementTree
......@@ -20,7 +22,7 @@ IF_ELSE_THEN_TAGS = ('if', 'else', 'then')
def Usage(prog_name):
print prog_name, 'GRD_FILE PATHS_TO_SCAN'
print(prog_name, 'GRD_FILE PATHS_TO_SCAN')
def FilterResourceIds(resource_id):
......@@ -197,13 +199,13 @@ def main(argv):
paths_to_scan = argv[2:]
for f in paths_to_scan:
if not os.path.exists(f):
print 'Error: %s does not exist' % f
print('Error: %s does not exist' % f)
return 1
tree = xml.etree.ElementTree.parse(grd_file)
grit_header = GetOutputHeaderFile(tree)
if not grit_header:
print 'Error: %s does not generate any output headers.' % grd_file
print('Error: %s does not generate any output headers.' % grd_file)
return 1
resources = GetResourcesForGrdFile(tree, grd_file)
......@@ -222,10 +224,10 @@ def main(argv):
if not NeedsGritInclude(grit_header, resources, path_to_scan):
files_with_unneeded_grit_includes.append(path_to_scan)
else:
print 'Warning: Skipping %s' % path_to_scan
print('Warning: Skipping %s' % path_to_scan)
if files_with_unneeded_grit_includes:
print '\n'.join(files_with_unneeded_grit_includes)
print('\n'.join(files_with_unneeded_grit_includes))
return 2
return 0
......
......@@ -3,6 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
def Run(os_path=None, args=None):
_HERE_PATH = os_path.dirname(os_path.realpath(__file__))
......@@ -23,4 +25,4 @@ def Run(os_path=None, args=None):
if __name__ == '__main__':
import os
import sys
print Run(os_path=os.path, args=sys.argv[1:])
print(Run(os_path=os.path, args=sys.argv[1:]))
......@@ -8,6 +8,8 @@ it for review so that the CL will land automatically if it passes the
commit-queue's checks.
"""
from __future__ import print_function
import logging
import optparse
import os
......@@ -23,7 +25,7 @@ import subprocess2
def die_with_error(msg):
print >> sys.stderr, msg
print(msg, file=sys.stderr)
sys.exit(1)
......@@ -52,7 +54,7 @@ class PrintSubprocess(object):
"""Wrapper for subprocess2 which prints out every command."""
def __getattr__(self, attr):
def _run_subprocess2(cmd, *args, **kwargs):
print cmd
print(cmd)
sys.stdout.flush()
return getattr(subprocess2, attr)(cmd, *args, **kwargs)
return _run_subprocess2
......@@ -130,7 +132,7 @@ def main():
try:
old_rev = process_deps(os.path.join(root_dir, 'DEPS'), project, new_rev,
options.dry_run)
print '%s roll %s:%s' % (project.title(), old_rev, new_rev)
print('%s roll %s:%s' % (project.title(), old_rev, new_rev))
review_field = 'TBR' if options.commit else 'R'
commit_msg = options.message or '%s roll %s:%s\n' % (project.title(),
......@@ -138,7 +140,7 @@ def main():
commit_msg += '\n%s=%s\n' % (review_field, options.reviewers)
if options.dry_run:
print 'Commit message: ' + commit_msg
print('Commit message: ' + commit_msg)
return 0
prnt_subprocess.check_output(['git', 'commit', '-m', commit_msg, 'DEPS'])
......
......@@ -5,6 +5,8 @@
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
from __future__ import print_function
import os
import re
import sys
......@@ -47,7 +49,8 @@ def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
print(key + " is missing " + owner)
if '__main__' == __name__:
sys.exit(main())
......@@ -9,6 +9,8 @@ Either use the command-line interface (see --help) or directly call make_case
from Python shell (see make_case documentation).
"""
from __future__ import print_function
import argparse
import codecs
import doctest
......
......@@ -7,6 +7,8 @@
HeapProfilerDumpLiveObjects.
"""
from __future__ import print_function
import os
import re
import subprocess
......@@ -14,10 +16,11 @@ import sys
import tempfile
def usage():
print """\
print("""\
Usage:
tools/tcmalloc/print-live-objects.py out/Debug/chrome leaks.dmp
"""
""")
def LoadDump(dump_file):
result = []
......@@ -29,7 +32,7 @@ def LoadDump(dump_file):
line_no = line_no + 1
matches = leakfmt.match(line)
if not matches:
print "%s: could not parse line %d, skipping" % (dump_file, line_no)
print("%s: could not parse line %d, skipping" % (dump_file, line_no))
else:
trace = { "size": int(matches.group(1)),
"address": matches.group(2),
......@@ -67,7 +70,7 @@ def Symbolize(binary, traces):
def Main(argv):
if sys.platform != 'linux2':
print 'print-live-objects.py requires addr2line only present on Linux.'
print('print-live-objects.py requires addr2line only present on Linux.')
sys.exit(1)
if len(argv) != 3:
......@@ -78,13 +81,13 @@ def Main(argv):
Symbolize(argv[1], traces)
if not traces:
print "No leaks found!"
print("No leaks found!")
for trace in sorted(traces, key=lambda x: -x["size"]):
print "Leak of %d bytes at address %s" % (trace["size"], trace["address"])
print("Leak of %d bytes at address %s" % (trace["size"], trace["address"]))
for frame in trace["frames"]:
print " %s (%s)" % (frame["name"], frame["location"])
print ""
print(" %s (%s)" % (frame["name"], frame["location"]))
print("")
if __name__ == '__main__':
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment