Commit cb890354 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Set service_account_file of results dashboard upload to None for perf

LUCI builder

Bug: 878389, 860677
Cq-Include-Trybots: master.tryserver.chromium.perf:obbs_fyi
Change-Id: I5cb85e8a22b1fe3c9bf64a993f3aa3f765a0734e
Reviewed-on: https://chromium-review.googlesource.com/1194825
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarSimon Hatch <simonhatch@chromium.org>
Cr-Commit-Position: refs/heads/master@{#587012}
parent ddeffe99
......@@ -52,8 +52,12 @@ class SendResultsFatalException(SendResultException):
def LuciAuthTokenGeneratorCallback(
service_account_file, token_expiration_in_minutes):
args = ['luci-auth', 'token',
'-service-account-json', service_account_file,
'-lifetime', '%im' % token_expiration_in_minutes]
if service_account_file:
args += ['-service-account-json', service_account_file]
else:
print ('service_account_file is not set. '
'Use LUCI swarming task service account')
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() == 0:
return p.stdout.read()
......@@ -78,7 +82,9 @@ def SendResults(data, data_label, url, send_as_histograms=False,
url: Performance Dashboard URL (including schema).
send_as_histograms: True if result is to be sent to /add_histograms.
service_account_file: string; path to service account file which is used
for authenticating when upload data to perf dashboard.
for authenticating when upload data to perf dashboard. This can be None
for the case of LUCI builder, which means the task service account of the
builder will be used.
token_generator_callback: a callback for generating the authentication token
to upload to perf dashboard.
This callback takes two parameters
......@@ -89,11 +95,6 @@ def SendResults(data, data_label, url, send_as_histograms=False,
num_retries: Number of times to retry uploading to the perf dashboard upon
recoverable error.
"""
if send_as_histograms and not service_account_file:
raise ValueError(
'Must set a valid service_account_file for uploading histogram set '
'data')
start = time.time()
errors = []
all_data_uploaded = False
......
......@@ -101,7 +101,7 @@ def _CreateParser():
parser.add_option('--git-revision')
parser.add_option('--output-json-dashboard-url')
parser.add_option('--send-as-histograms', action='store_true')
parser.add_option('--service-account-file')
parser.add_option('--service-account-file', default=None)
return parser
......@@ -115,7 +115,7 @@ def main(args):
if not options.configuration_name or not options.results_url:
parser.error('configuration_name and results_url are required.')
service_account_file = options.service_account_file or None
service_account_file = options.service_account_file
if not options.perf_dashboard_machine_group:
print 'Error: Invalid perf dashboard machine group'
......
......@@ -75,10 +75,18 @@ def _upload_perf_results(json_to_upload, name, configuration_name,
'--got-revision-cp', build_properties['got_revision_cp'],
'--got-v8-revision', build_properties['got_v8_revision'],
'--got-webrtc-revision', build_properties['got_webrtc_revision'],
'--service-account-file', service_account_file,
'--output-json-file', output_json_file,
'--perf-dashboard-machine-group', _GetMachineGroup(build_properties)
]
is_luci = False
buildbucket = json.loads(build_properties.get('buildbucket', "{}"))
if ('build' in buildbucket and
buildbucket['build'].get('bucket') == 'luci.chrome.ci'):
is_luci = True
if service_account_file and not is_luci:
args += ['--service-account-file', service_account_file]
if build_properties.get('git_revision'):
args.append('--git-revision')
args.append(build_properties['git_revision'])
......@@ -220,9 +228,16 @@ def process_perf_results(output_json, configuration_name,
perftest-output.json file containing the performance results in histogram
or dashboard json format and an output.json file containing the json test
results for the benchmark.
Returns:
(return_code, upload_results_map):
return_code is 0 if the whole operation is successful, non zero otherwise.
benchmark_upload_result_map: the dictionary that describe which benchmarks
were successfully uploaded.
"""
begin_time = time.time()
return_code = 0
benchmark_upload_result_map = {}
directory_list = [
f for f in listdir(task_output_dir)
if not isfile(join(task_output_dir, f))
......@@ -272,7 +287,7 @@ def process_perf_results(output_json, configuration_name,
if not smoke_test_mode:
try:
return_code = _handle_perf_results(
return_code, benchmark_upload_result_map = _handle_perf_results(
benchmark_enabled_map, benchmark_directory_map,
configuration_name, build_properties, service_account_file,
extra_links)
......@@ -285,7 +300,7 @@ def process_perf_results(output_json, configuration_name,
_merge_json_output(output_json, test_results_list, extra_links)
end_time = time.time()
print_duration('Total process_perf_results', begin_time, end_time)
return return_code
return return_code, benchmark_upload_result_map
def _merge_chartjson_results(chartjson_dicts):
merged_results = chartjson_dicts[0]
......@@ -352,14 +367,14 @@ def _upload_individual(
print 'Uploading perf results from %s benchmark (size %s Mib)' % (
benchmark_name, results_size_in_mib)
with open(output_json_file, 'w') as oj:
upload_fail = _upload_perf_results(
upload_return_code = _upload_perf_results(
results_filename,
benchmark_name, configuration_name, build_properties,
service_account_file, oj)
upload_end_time = time.time()
print_duration(('%s upload time' % (benchmark_name)),
upload_begin_time, upload_end_time)
return (benchmark_name, upload_fail)
return (benchmark_name, upload_return_code == 0)
finally:
shutil.rmtree(tmpfile_dir)
......@@ -369,9 +384,9 @@ def _upload_individual_benchmark(params):
return _upload_individual(*params)
except Exception:
benchmark_name = params[0]
upload_fail = True
upload_succeed = False
logging.exception('Error uploading perf result of %s' % benchmark_name)
return benchmark_name, upload_fail
return benchmark_name, upload_succeed
def _handle_perf_results(
......@@ -384,7 +399,11 @@ def _handle_perf_results(
|extra_links|.
Returns:
0 if this upload to perf dashboard succesfully, 1 otherwise.
(return_code, benchmark_upload_result_map)
return_code is 0 if this upload to perf dashboard successfully, 1
otherwise.
benchmark_upload_result_map is a dictionary describes which benchmark
was successfully uploaded.
"""
begin_time = time.time()
tmpfile_dir = tempfile.mkdtemp('outputresults')
......@@ -421,21 +440,21 @@ def _handle_perf_results(
# Keep a mapping of benchmarks to their upload results
benchmark_upload_result_map = {}
for r in results:
benchmark_upload_result_map[r[0]] = bool(r[1])
benchmark_upload_result_map[r[0]] = r[1]
logdog_dict = {}
upload_failures_counter = 0
logdog_stream = None
logdog_label = 'Results Dashboard'
for benchmark_name, output_file in results_dict.iteritems():
failure = benchmark_upload_result_map[benchmark_name]
if failure:
upload_succeed = benchmark_upload_result_map[benchmark_name]
if not upload_succeed:
upload_failures_counter += 1
is_reference = '.reference' in benchmark_name
_write_perf_data_to_logfile(
benchmark_name, output_file,
configuration_name, build_properties, logdog_dict,
is_reference, failure)
is_reference, upload_failure=not upload_succeed)
logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
logdog_stream = logdog_helper.text(logdog_file_name,
......@@ -449,8 +468,8 @@ def _handle_perf_results(
end_time = time.time()
print_duration('Uploading results to perf dashboard', begin_time, end_time)
if upload_failures_counter > 0:
return 1
return 0
return 1, benchmark_upload_result_map
return 0, benchmark_upload_result_map
finally:
shutil.rmtree(tmpfile_dir)
......@@ -511,7 +530,8 @@ def main():
# configuration-name and results-url are set in the json file which is going
# away tools/perf/core/chromium.perf.fyi.extras.json
parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
parser.add_argument('--service-account-file', help=argparse.SUPPRESS)
parser.add_argument('--service-account-file', help=argparse.SUPPRESS,
default=None)
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
parser.add_argument('--summary-json', help=argparse.SUPPRESS)
......@@ -525,15 +545,12 @@ def main():
args = parser.parse_args()
if not args.service_account_file and not args.smoke_test_mode:
raise Exception(
'Service account file must be specificed for dashboard upload')
return process_perf_results(
return_code, _ = process_perf_results(
args.output_json, args.configuration_name,
args.service_account_file,
args.build_properties, args.task_output_dir,
args.smoke_test_mode)
return return_code
if __name__ == '__main__':
......
......@@ -25,7 +25,7 @@ import process_perf_results as ppr_module
class _FakeLogdogStream(object):
def write(self, data):
pass
del data # unused
def close(self):
pass
......@@ -66,6 +66,8 @@ class ProcessPerfResultsIntegrationTest(unittest.TestCase):
shutil.rmtree(self.test_dir)
@decorators.Disabled('chromeos') # crbug.com/865800
@decorators.Disabled('win') # crbug.com/860677, mock doesn't integrate well
# with multiprocessing on Windows.
def testIntegration(self):
build_properties = json.dumps({
'perf_dashboard_machine_group': 'test-builder',
......@@ -74,14 +76,61 @@ class ProcessPerfResultsIntegrationTest(unittest.TestCase):
'got_v8_revision': 'beef1234',
'got_revision_cp': 'refs/heads/master@{#1234}',
'got_webrtc_revision': 'fee123',
'git_revision': 'deadbeef'
})
ppr_module.process_perf_results(
'git_revision': 'deadbeef',
'buildbucket': r"""{"build":
{"bucket": "master.tryserver.chromium.perf",
"created_by": "user:foo",
"created_ts": "1535490272757820",
"id": "8936915467712010816",
"lease_key": "461228535",
"tags": ["builder:obbs_fyi", "buildset:patch/1194825/3",
"cq_experimental:False",
"master:master.tryserver.chromium.perf",
"user_agent:cq"]}}"""
})
return_code, benchmark_upload_result_map = ppr_module.process_perf_results(
self.output_json, configuration_name='test-builder',
service_account_file = self.service_account_file,
build_properties=build_properties,
task_output_dir=self.task_output_dir,
smoke_test_mode=False)
self.assertEquals(return_code, 1)
self.assertEquals(benchmark_upload_result_map,
{
"power.desktop.reference": True,
"blink_perf.image_decoder": True,
"octane.reference": True,
"power.desktop": True,
"speedometer-future": True,
"blink_perf.owp_storage": True,
"memory.desktop": True,
"wasm": True,
"dummy_benchmark.histogram_benchmark_1": True,
"dummy_benchmark.histogram_benchmark_1.reference": True,
"wasm.reference": True,
"speedometer": True,
"memory.long_running_idle_gmail_tbmv2": True,
"v8.runtime_stats.top_25": True,
"dummy_benchmark.noisy_benchmark_1": True,
"blink_perf.svg": True,
"v8.runtime_stats.top_25.reference": True,
"jetstream.reference": True,
"jetstream": True,
"speedometer2-future.reference": True,
"speedometer2-future": False, # Only this fails due to malformed data
"blink_perf.svg.reference": True,
"blink_perf.image_decoder.reference": True,
"power.idle_platform.reference": True,
"power.idle_platform": True,
"dummy_benchmark.noisy_benchmark_1.reference": True,
"speedometer-future.reference": True,
"memory.long_running_idle_gmail_tbmv2.reference": True,
"memory.desktop.reference": True,
"blink_perf.owp_storage.reference": True,
"octane": True,
"speedometer.reference": True
})
if __name__ == '__main__':
unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment