Commit 6e4de3d4 authored by behdad's avatar behdad Committed by Commit Bot

Added unittest for adjust_upper_limit script

This change adds some unittests for adjust_upper_limit script used by rendering
representative pertf tests.

Bug: chromium:1052361
Change-Id: I5850b5ca888d6dca7318277ca8bd6eb4bdf40a56
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2283660Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Commit-Queue: Behdad Bakhshinategh <behdadb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#788776}
parent 56f7b697
...@@ -86,21 +86,21 @@ def FetchItemData(task_id, benchmark, index, temp_dir): ...@@ -86,21 +86,21 @@ def FetchItemData(task_id, benchmark, index, temp_dir):
print("CSV results were not produced!") print("CSV results were not produced!")
def GetPercentileValues(benchmark, tags, limit, percentile): def CreateDataframe(benchmark, tags, limit):
"""Get the percentile value of recent runs described by given tags. """Creates the dataframe of values recorded in recent runs.
Given the tags, benchmark this function fetches the data of last {limit} Given the tags, benchmark this function fetches the data of last {limit}
runs, and find the percentile value for each story. runs, and returns a dataframe of values for focused metrics such as
frame_times and CPU_wall_time_ratio.
Args: Args:
benchmark: The benchmark these task are on (desktop/mobile). benchmark: The benchmark these task are on (desktop/mobile).
tags: The tags which describe the tasks such as OS and buildername. tags: The tags which describe the tasks such as OS and buildername.
limit: The number of runs to look at. limit: The number of runs to look at.
percentile: the percentile to return.
Returns: Returns:
A dictionary with averages and confidence interval ranges calculated A dataframe with averages and confidence interval of frame_times, and
from the percentile of recent runs. average value of CPU_wall_time_ratio of each story of each run.
""" """
items = [] items = []
for tag_set in tags: for tag_set in tags:
...@@ -114,7 +114,21 @@ def GetPercentileValues(benchmark, tags, limit, percentile): ...@@ -114,7 +114,21 @@ def GetPercentileValues(benchmark, tags, limit, percentile):
idx += 1 idx += 1
finally: finally:
shutil.rmtree(temp_dir) shutil.rmtree(temp_dir)
data_frame = pandas.concat(dfs, ignore_index=True) return pandas.concat(dfs, ignore_index=True)
def GetPercentileValues(data_frame, percentile):
"""Get the percentile value of each metric for recorded values in dataframe.
Args:
data_frame: The dataframe with averages and confidence intervals of each
story of each run.
percentile: the percentile to use for determining the upper limits.
Returns:
A dictionary with averages and confidence interval ranges calculated
from the percentile of recent runs.
"""
if not data_frame.empty: if not data_frame.empty:
avg_df = data_frame.pivot(index='stories', columns='index', values='avg') avg_df = data_frame.pivot(index='stories', columns='index', values='avg')
...@@ -177,9 +191,10 @@ def RecalculateUpperLimits(data_point_count): ...@@ -177,9 +191,10 @@ def RecalculateUpperLimits(data_point_count):
for platform in platform_specific_tags: for platform in platform_specific_tags:
platform_data = platform_specific_tags[platform] platform_data = platform_specific_tags[platform]
print('\n- Processing data ({})'.format(platform)) print('\n- Processing data ({})'.format(platform))
results[platform] = GetPercentileValues(
platform_data['benchmark'], platform_data['tags'], dataframe = CreateDataframe(platform_data['benchmark'],
data_point_count, 0.95) platform_data['tags'], data_point_count)
results[platform] = GetPercentileValues(dataframe, 0.95)
# Loop over results and adjust base on current values. # Loop over results and adjust base on current values.
for story in results[platform]: for story in results[platform]:
......
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import adjust_upper_limits
import pandas as pd
import os
import sys
import unittest
def create_sample_dataframe(story_name, count, avg_start, avg_step, ci_start,
ci_step, cpu_wal_start, cpu_wall_step):
cols = ['stories', 'avg', 'ci_095', 'cpu_wall_time_ratio', 'index']
df = pd.DataFrame(columns=cols)
for idx in range(count):
avg = avg_start + idx * avg_step
ci = ci_start + idx * ci_step
cpu_wall = cpu_wal_start + idx * cpu_wall_step
df = df.append(
{
'stories': story_name,
'avg': avg,
'ci_095': ci,
'cpu_wall_time_ratio': cpu_wall,
'index': idx
},
ignore_index=True)
return df
class TestAdjustUpperLimits(unittest.TestCase):
def test_get_percentile_values(self):
dataframe = create_sample_dataframe('story_name', 21, 16.0, 0.5, 0.2, 0.01,
0.2, 0.01)
limits = adjust_upper_limits.GetPercentileValues(dataframe, 0.95)
# Given values for avg: [16, 16.5, 17, ..., 25.5, 26]
self.assertEquals(limits['story_name']['avg'], 25.5)
# Given values for ci_095: [0.2, 0.21, 0.22, ..., 0.39, 0.4]
self.assertEquals(limits['story_name']['ci_095'], 0.39)
# Given values for cpu_wall_time_ratio: [0.2, 0.21, 0.22, ..., 0.39, 0.4]
self.assertEquals(limits['story_name']['cpu_wall_time_ratio'], 0.21)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment