Commit 7880dbfb authored by Wenbin Zhang's avatar Wenbin Zhang Committed by Commit Bot

[benchmarking] add timestamps for logging in process_perf_results

The logs printed by process_perf_results.py do not have timestamps. This
CL added the timestamps by updating the config for python logging.

Bug: chromium:1035935
Change-Id: I824dcc4c594904ff7c2437248679df9e503052ec
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1985219
Commit-Queue: Wenbin Zhang <wenbinzhang@google.com>
Reviewed-by: default avatarCaleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#728788}
parent 47ce2c38
...@@ -17,6 +17,11 @@ import tempfile ...@@ -17,6 +17,11 @@ import tempfile
import time import time
import uuid import uuid
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s pid=%(process)d'
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
from core import path_util from core import path_util
from core import upload_results_to_perf_dashboard from core import upload_results_to_perf_dashboard
from core import results_merger from core import results_merger
...@@ -172,7 +177,8 @@ def _handle_perf_json_test_results( ...@@ -172,7 +177,8 @@ def _handle_perf_json_test_results(
# Output is null meaning the test didn't produce any results. # Output is null meaning the test didn't produce any results.
# Want to output an error and continue loading the rest of the # Want to output an error and continue loading the rest of the
# test results. # test results.
print('No results produced for %s, skipping upload' % directory) logging.warning(
'No results produced for %s, skipping upload' % directory)
continue continue
if json_results.get('version') == 3: if json_results.get('version') == 3:
# Non-telemetry tests don't have written json results but # Non-telemetry tests don't have written json results but
...@@ -193,7 +199,7 @@ def _handle_perf_json_test_results( ...@@ -193,7 +199,7 @@ def _handle_perf_json_test_results(
if not enabled: if not enabled:
# We don't upload disabled benchmarks or tests that are run # We don't upload disabled benchmarks or tests that are run
# as a smoke test # as a smoke test
print( logging.info(
'Benchmark %s ran no tests on at least one shard' % benchmark_name) 'Benchmark %s ran no tests on at least one shard' % benchmark_name)
continue continue
benchmark_enabled_map[benchmark_name] = True benchmark_enabled_map[benchmark_name] = True
...@@ -410,7 +416,7 @@ def _upload_individual( ...@@ -410,7 +416,7 @@ def _upload_individual(
results_filename = os.path.join(directories[0], 'perf_results.json') results_filename = os.path.join(directories[0], 'perf_results.json')
results_size_in_mib = os.path.getsize(results_filename) / (2 ** 20) results_size_in_mib = os.path.getsize(results_filename) / (2 ** 20)
print('Uploading perf results from %s benchmark (size %s Mib)' % logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
(benchmark_name, results_size_in_mib)) (benchmark_name, results_size_in_mib))
with open(output_json_file, 'w') as oj: with open(output_json_file, 'w') as oj:
upload_return_code = _upload_perf_results( upload_return_code = _upload_perf_results(
...@@ -548,7 +554,7 @@ def _write_perf_data_to_logfile(benchmark_name, output_file, ...@@ -548,7 +554,7 @@ def _write_perf_data_to_logfile(benchmark_name, output_file,
try: try:
results = json.load(f) results = json.load(f)
except ValueError: except ValueError:
print('Error parsing perf results JSON for benchmark %s' % logging.error('Error parsing perf results JSON for benchmark %s' %
benchmark_name) benchmark_name)
if results: if results:
try: try:
...@@ -556,12 +562,12 @@ def _write_perf_data_to_logfile(benchmark_name, output_file, ...@@ -556,12 +562,12 @@ def _write_perf_data_to_logfile(benchmark_name, output_file,
json.dump(results, output_json_file, json.dump(results, output_json_file,
indent=4, separators=(',', ': ')) indent=4, separators=(',', ': '))
except ValueError as e: except ValueError as e:
print('ValueError: "%s" while dumping output to logdog' % e) logging.error('ValueError: "%s" while dumping output to logdog' % e)
finally: finally:
output_json_file.close() output_json_file.close()
viewer_url = output_json_file.get_viewer_url() viewer_url = output_json_file.get_viewer_url()
else: else:
print("Perf results JSON file doesn't exist for benchmark %s" % logging.warning("Perf results JSON file doesn't exist for benchmark %s" %
benchmark_name) benchmark_name)
base_benchmark_name = benchmark_name.replace('.reference', '') base_benchmark_name = benchmark_name.replace('.reference', '')
...@@ -590,12 +596,12 @@ def _write_perf_data_to_logfile(benchmark_name, output_file, ...@@ -590,12 +596,12 @@ def _write_perf_data_to_logfile(benchmark_name, output_file,
def print_duration(step, start, end): def print_duration(step, start, end):
print('Duration of %s: %d seconds' % (step, end - start)) logging.info('Duration of %s: %d seconds' % (step, end - start))
def main(): def main():
""" See collect_task.collect_task for more on the merge script API. """ """ See collect_task.collect_task for more on the merge script API. """
print(sys.argv) logging.info(sys.argv)
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
# configuration-name (previously perf-id) is the name of bot the tests run on # configuration-name (previously perf-id) is the name of bot the tests run on
# For example, buildbot-test is the name of the android-go-perf bot # For example, buildbot-test is the name of the android-go-perf bot
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment