Commit 5eb683cf authored by behdad's avatar behdad Committed by Commit Bot

Added multiplier error margin

Instead of a fixed 2ms margin of error for frame_times. Now using a
percentage (10%) of upper limit.
So for a story with upper limit of 16ms margin will be 1.6ms
and for a story with upper limit of 60 ms would be 6ms.

TBR=crouleau@chromium.org,sadrul@chromium.org

Bug: chromium:1031609
Change-Id: I67ceb4f84af5d07ce5606856afd868265060e6a2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1956042Reviewed-by: default avatarBehdad Bakhshinategh <behdadb@chromium.org>
Commit-Queue: Behdad Bakhshinategh <behdadb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#722750}
parent a73d601a
...@@ -29,12 +29,12 @@ import common ...@@ -29,12 +29,12 @@ import common
import run_performance_tests import run_performance_tests
# AVG_ERROR_MARGIN determines how much more the value of frame times can be # AVG_ERROR_MARGIN determines how much more the value of frame times can be
# compared to the recorded value # compared to the recorded value (multiplier of upper limit).
AVG_ERROR_MARGIN = 2.0 AVG_ERROR_MARGIN = 1.1
# CI stands for confidence intervals. "ci_095"s recorded in the data is the # CI stands for confidence intervals. "ci_095"s recorded in the data is the
# recorded range between upper and lower CIs. CI_ERROR_MARGIN is the maximum # recorded range between upper and lower CIs. CI_ERROR_MARGIN is the maximum
# acceptable ratio of calculated ci_095 to the recorded ones. # acceptable ratio of calculated ci_095 to the recorded ones.
CI_ERROR_MARGIN = 2.0 CI_ERROR_MARGIN = 1.5
class ResultRecorder(object): class ResultRecorder(object):
def __init__(self): def __init__(self):
...@@ -146,7 +146,7 @@ def interpret_run_benchmark_results(upper_limit_data, ...@@ -146,7 +146,7 @@ def interpret_run_benchmark_results(upper_limit_data,
'compared to upper limit ({:.3f})').format( 'compared to upper limit ({:.3f})').format(
benchmark, story_name, measured_ci,upper_limit_ci)) benchmark, story_name, measured_ci,upper_limit_ci))
result_recorder.add_failure(story_name, benchmark) result_recorder.add_failure(story_name, benchmark)
elif (measured_avg > upper_limit_avg + AVG_ERROR_MARGIN): elif (measured_avg > upper_limit_avg * AVG_ERROR_MARGIN):
print(('[ FAILED ] {}/{} higher average frame_times({:.3f}) compared' + print(('[ FAILED ] {}/{} higher average frame_times({:.3f}) compared' +
' to upper limit ({:.3f})').format( ' to upper limit ({:.3f})').format(
benchmark, story_name, measured_avg, upper_limit_avg)) benchmark, story_name, measured_avg, upper_limit_avg))
...@@ -201,8 +201,8 @@ def main(): ...@@ -201,8 +201,8 @@ def main():
# The values used as the upper limit are the 99th percentile of the # The values used as the upper limit are the 99th percentile of the
# avg and ci_095 frame_times recorded by dashboard in the past 200 revisions. # avg and ci_095 frame_times recorded by dashboard in the past 200 revisions.
# If the value measured here would be higher than this value at least by # If the value measured here would be higher than this value at least by
# 2ms [AVG_ERROR_MARGIN], that would be considered a failure. # 10 [AVG_ERROR_MARGIN] percent of upper limit, that would be considered a
# crbug.com/953895 # failure. crbug.com/953895
with open( with open(
os.path.join(os.path.dirname(__file__), os.path.join(os.path.dirname(__file__),
'representative_perf_test_data', 'representative_perf_test_data',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment