Commit c000352c authored by prasadv@chromium.org's avatar prasadv@chromium.org

Reformat bisect results output.

Reformat the bisect results output to give detailed information regarding the bisect job.
Includes clear instructions to repro steps.

BUG=383669,383935
NOTRY=true

Review URL: https://codereview.chromium.org/388623002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@282735 0039d316-1c4b-4281-b951-d872f2087c98
parent 165c5952
...@@ -183,6 +183,71 @@ BISECT_MODE_MEAN = 'mean' ...@@ -183,6 +183,71 @@ BISECT_MODE_MEAN = 'mean'
BISECT_MODE_STD_DEV = 'std_dev' BISECT_MODE_STD_DEV = 'std_dev'
BISECT_MODE_RETURN_CODE = 'return_code' BISECT_MODE_RETURN_CODE = 'return_code'
# The perf dashboard specifically looks for the string
# "Estimated Confidence: 95%" to decide whether or not
# to cc the author(s). If you change this, please update the perf
# dashboard as well.
RESULTS_BANNER = """
===== BISECT JOB RESULTS =====
Status: %(status)s
Test Command: %(command)s
Test Metric: %(metrics)s
Relative Change: %(change)s
Estimated Confidence: %(confidence)d%%"""
# The perf dashboard specifically looks for the string
# "Author : " to parse out who to cc on a bug. If you change the
# formatting here, please update the perf dashboard as well.
RESULTS_REVISION_INFO = """
===== SUSPECTED CL(s) =====
Subject : %(subject)s
Author : %(author)s%(email_info)s%(commit_info)s
Date : %(cl_date)s"""
REPRO_STEPS_LOCAL = """
==== INSTRUCTIONS TO REPRODUCE ====
To run locally:
$%(command)s"""
REPRO_STEPS_TRYJOB = """
To reproduce on Performance trybot:
1. Create new git branch or check out existing branch.
2. Edit tools/run-perf-test.cfg (instructions in file) or \
third_party/WebKit/Tools/run-perf-test.cfg.
a) Take care to strip any src/ directories from the head of \
relative path names.
b) On desktop, only --browser=release is supported, on android \
--browser=android-chromium-testshell.
c) Test command to use: %(command)s
3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
committed locally to run-perf-test.cfg.
Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
$ git cl upload --bypass-hooks
4. Send your try job to the tryserver. \
[Please make sure to use appropriate bot to reproduce]
$ git cl try -m tryserver.chromium.perf -b <bot>
For more details please visit \nhttps://sites.google.com/a/chromium.org/dev/\
developers/performance-try-bots"""
RESULTS_THANKYOU = """
===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
Visit http://www.chromium.org/developers/core-principles for Chrome's policy
on perf regressions.
Contact chrome-perf-dashboard-team with any questions or suggestions about
bisecting.
.------.
.---. \ \==)
|PERF\ \ \\
| ---------'-------'-----------.
. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
\______________.-------._______________)
/ /
/ /
/ /==)
._______."""
def _AddAdditionalDepotInfo(depot_info): def _AddAdditionalDepotInfo(depot_info):
"""Adds additional depot info to the global depot variables.""" """Adds additional depot info to the global depot variables."""
...@@ -3208,19 +3273,45 @@ class BisectPerformanceMetrics(object): ...@@ -3208,19 +3273,45 @@ class BisectPerformanceMetrics(object):
# dashboard as well. # dashboard as well.
print 'Confidence in Bisection Results: %d%%' % results_dict['confidence'] print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
def _ConfidenceLevelStatus(self, results_dict):
if not results_dict['confidence']:
return None
confidence_status = 'Successful with %(level)s confidence%(warning)s.'
if results_dict['confidence'] >= 95:
level = 'high'
else:
level = 'low'
warning = ' and warnings'
if not self.warnings:
warning = ''
return confidence_status % {'level': level, 'warning': warning}
def _PrintThankYou(self):
print RESULTS_THANKYOU
def _PrintBanner(self, results_dict): def _PrintBanner(self, results_dict):
print
print " __o_\___ Aw Snap! We hit a speed bump!"
print "=-O----O-'__.~.___________________________________"
print
if self._IsBisectModeReturnCode(): if self._IsBisectModeReturnCode():
print ('Bisect reproduced a change in return codes while running the ' metrics = 'N/A'
'performance test.') change = 'Yes'
else: else:
print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the ' metrics = '/'.join(self.opts.metric)
'%s metric.' % (results_dict['regression_size'], change = '%.02f%% (+/-%.02f%%)' % (
results_dict['regression_std_err'], '/'.join(self.opts.metric))) results_dict['regression_size'], results_dict['regression_std_err'])
self._PrintConfidence(results_dict)
if results_dict['culprit_revisions'] and results_dict['confidence']:
status = self._ConfidenceLevelStatus(results_dict)
else:
status = 'Failure, could not reproduce.'
change = 'Bisect could not reproduce a change.'
print RESULTS_BANNER % {
'status': status,
'command': self.opts.command,
'metrics': metrics,
'change': change,
'confidence': results_dict['confidence'],
}
def _PrintFailedBanner(self, results_dict): def _PrintFailedBanner(self, results_dict):
print print
...@@ -3246,25 +3337,22 @@ class BisectPerformanceMetrics(object): ...@@ -3246,25 +3337,22 @@ class BisectPerformanceMetrics(object):
return '' return ''
def _PrintRevisionInfo(self, cl, info, depot=None): def _PrintRevisionInfo(self, cl, info, depot=None):
# The perf dashboard specifically looks for the string email_info = ''
# "Author : " to parse out who to cc on a bug. If you change the
# formatting here, please update the perf dashboard as well.
print
print 'Subject : %s' % info['subject']
print 'Author : %s' % info['author']
if not info['email'].startswith(info['author']): if not info['email'].startswith(info['author']):
print 'Email : %s' % info['email'] email_info = '\nEmail : %s' % info['email']
commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot) commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
if commit_link: if commit_link:
print 'Link : %s' % commit_link commit_info = '\nLink : %s' % commit_link
else: else:
print commit_info = ('\nFailed to parse svn revision from body:\n%s' %
print 'Failed to parse svn revision from body:' info['body'])
print print RESULTS_REVISION_INFO % {
print info['body'] 'subject': info['subject'],
print 'author': info['author'],
print 'Commit : %s' % cl 'email_info': email_info,
print 'Date : %s' % info['date'] 'commit_info': commit_info,
'cl_date': info['date']
}
def _PrintTableRow(self, column_widths, row_data): def _PrintTableRow(self, column_widths, row_data):
assert len(column_widths) == len(row_data) assert len(column_widths) == len(row_data)
...@@ -3318,9 +3406,9 @@ class BisectPerformanceMetrics(object): ...@@ -3318,9 +3406,9 @@ class BisectPerformanceMetrics(object):
final_step=True): final_step=True):
print print
if final_step: if final_step:
print 'Tested commits:' print '===== TESTED COMMITS ====='
else: else:
print 'Partial results:' print '===== PARTIAL RESULTS ====='
self._PrintTestedCommitsHeader() self._PrintTestedCommitsHeader()
state = 0 state = 0
for current_id, current_data in revision_data_sorted: for current_id, current_data in revision_data_sorted:
...@@ -3354,12 +3442,12 @@ class BisectPerformanceMetrics(object): ...@@ -3354,12 +3442,12 @@ class BisectPerformanceMetrics(object):
self._PrintTestedCommitsEntry(current_data, cl_link, state_str) self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
def _PrintReproSteps(self): def _PrintReproSteps(self):
print command = '$ ' + self.opts.command
print 'To reproduce locally:'
print '$ ' + self.opts.command
if bisect_utils.IsTelemetryCommand(self.opts.command): if bisect_utils.IsTelemetryCommand(self.opts.command):
print command += ('\nAlso consider passing --profiler=list to see available '
print 'Also consider passing --profiler=list to see available profilers.' 'profilers.')
print REPRO_STEPS_LOCAL % {'command': command}
print REPRO_STEPS_TRYJOB % {'command': command}
def _PrintOtherRegressions(self, other_regressions, revision_data): def _PrintOtherRegressions(self, other_regressions, revision_data):
print print
...@@ -3413,7 +3501,7 @@ class BisectPerformanceMetrics(object): ...@@ -3413,7 +3501,7 @@ class BisectPerformanceMetrics(object):
print print
print 'WARNINGS:' print 'WARNINGS:'
for w in set(self.warnings): for w in set(self.warnings):
print ' !!! %s' % w print ' ! %s' % w
def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good): def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
other_regressions = [] other_regressions = []
...@@ -3614,26 +3702,23 @@ class BisectPerformanceMetrics(object): ...@@ -3614,26 +3702,23 @@ class BisectPerformanceMetrics(object):
# bugs. If you change this, please update the perf dashboard as well. # bugs. If you change this, please update the perf dashboard as well.
bisect_utils.OutputAnnotationStepStart('Results') bisect_utils.OutputAnnotationStepStart('Results')
self._PrintBanner(results_dict)
self._PrintWarnings()
if results_dict['culprit_revisions'] and results_dict['confidence']: if results_dict['culprit_revisions'] and results_dict['confidence']:
self._PrintBanner(results_dict)
for culprit in results_dict['culprit_revisions']: for culprit in results_dict['culprit_revisions']:
cl, info, depot = culprit cl, info, depot = culprit
self._PrintRevisionInfo(cl, info, depot) self._PrintRevisionInfo(cl, info, depot)
self._PrintReproSteps()
if results_dict['other_regressions']: if results_dict['other_regressions']:
self._PrintOtherRegressions(results_dict['other_regressions'], self._PrintOtherRegressions(results_dict['other_regressions'],
revision_data) revision_data)
else:
self._PrintFailedBanner(results_dict)
self._PrintReproSteps()
self._PrintTestedCommitsTable(revision_data_sorted, self._PrintTestedCommitsTable(revision_data_sorted,
results_dict['first_working_revision'], results_dict['first_working_revision'],
results_dict['last_broken_revision'], results_dict['last_broken_revision'],
results_dict['confidence']) results_dict['confidence'])
self._PrintStepTime(revision_data_sorted) self._PrintStepTime(revision_data_sorted)
self._PrintWarnings() self._PrintReproSteps()
self._PrintThankYou()
if self.opts.output_buildbot_annotations: if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed() bisect_utils.OutputAnnotationStepClosed()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment