Commit 51706546 authored by Roberto Carrillo's avatar Roberto Carrillo Committed by Commit Bot

[Reland] Check for counter overflows when merging profraw files.

This is a reland of http://crrev.com/c/1894619

The issue was that the signature for _validate_and_convert_profraw did
not accept the newly added output list `counter_overflows`.

I added a test to validate the behavior of this function.

R=sajjadm,liaoyuke,nodir

Bug: 1020404
Change-Id: I25ab734e2d4c5b2237b00c9b8e7d103d4f8de122
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1900427
Commit-Queue: Roberto Carrillo <robertocn@chromium.org>
Reviewed-by: default avatarYuke Liao <liaoyuke@chromium.org>
Cr-Commit-Position: refs/heads/master@{#712932}
parent 36d95d9a
...@@ -113,6 +113,7 @@ def _validate_and_convert_profraws(profraw_files, profdata_tool_path): ...@@ -113,6 +113,7 @@ def _validate_and_convert_profraws(profraw_files, profdata_tool_path):
A tulple: A tulple:
A list of converted .profdata files of *valid* profraw files. A list of converted .profdata files of *valid* profraw files.
A list of *invalid* profraw files. A list of *invalid* profraw files.
A list of profraw files that have counter overflows.
""" """
logging.info('Validating and converting .profraw files.') logging.info('Validating and converting .profraw files.')
...@@ -125,11 +126,13 @@ def _validate_and_convert_profraws(profraw_files, profdata_tool_path): ...@@ -125,11 +126,13 @@ def _validate_and_convert_profraws(profraw_files, profdata_tool_path):
pool = multiprocessing.Pool(counts) pool = multiprocessing.Pool(counts)
output_profdata_files = multiprocessing.Manager().list() output_profdata_files = multiprocessing.Manager().list()
invalid_profraw_files = multiprocessing.Manager().list() invalid_profraw_files = multiprocessing.Manager().list()
counter_overflows = multiprocessing.Manager().list()
for profraw_file in profraw_files: for profraw_file in profraw_files:
pool.apply_async(_validate_and_convert_profraw, pool.apply_async(
(profraw_file, output_profdata_files, _validate_and_convert_profraw,
invalid_profraw_files, profdata_tool_path)) (profraw_file, output_profdata_files, invalid_profraw_files,
counter_overflows, profdata_tool_path))
pool.close() pool.close()
pool.join() pool.join()
...@@ -138,29 +141,52 @@ def _validate_and_convert_profraws(profraw_files, profdata_tool_path): ...@@ -138,29 +141,52 @@ def _validate_and_convert_profraws(profraw_files, profdata_tool_path):
for input_file in profraw_files: for input_file in profraw_files:
os.remove(input_file) os.remove(input_file)
return list(output_profdata_files), list(invalid_profraw_files) return list(output_profdata_files), list(invalid_profraw_files), list(
counter_overflows)
def _validate_and_convert_profraw(profraw_file, output_profdata_files, def _validate_and_convert_profraw(profraw_file, output_profdata_files,
invalid_profraw_files, profdata_tool_path): invalid_profraw_files, counter_overflows,
profdata_tool_path):
output_profdata_file = profraw_file.replace('.profraw', '.profdata') output_profdata_file = profraw_file.replace('.profraw', '.profdata')
subprocess_cmd = [ subprocess_cmd = [
profdata_tool_path, 'merge', '-o', output_profdata_file, '-sparse=true', profdata_tool_path, 'merge', '-o', output_profdata_file, '-sparse=true',
profraw_file profraw_file
] ]
profile_valid = False
counter_overflow = False
validation_output = None
# 1. Determine if the profile is valid.
try: try:
# Redirecting stderr is required because when error happens, llvm-profdata # Redirecting stderr is required because when error happens, llvm-profdata
# writes the error output to stderr and our error handling logic relies on # writes the error output to stderr and our error handling logic relies on
# that output. # that output.
output = subprocess.check_output(subprocess_cmd, stderr=subprocess.STDOUT) validation_output = subprocess.check_output(
logging.info('Validating and converting %r to %r succeeded with output: %r', subprocess_cmd, stderr=subprocess.STDOUT)
profraw_file, output_profdata_file, output) if 'Counter overflow' in validation_output:
output_profdata_files.append(output_profdata_file) counter_overflow = True
else:
profile_valid = True
except subprocess.CalledProcessError as error: except subprocess.CalledProcessError as error:
logging.warning('Validating and converting %r to %r failed with output: %r', validation_output = error.output
profraw_file, output_profdata_file, error.output)
# 2. Add the profile to the appropriate list(s).
if profile_valid:
output_profdata_files.append(output_profdata_file)
else:
invalid_profraw_files.append(profraw_file) invalid_profraw_files.append(profraw_file)
if counter_overflow:
counter_overflows.append(profraw_file)
# 3. Log appropriate message
if not profile_valid:
template = 'Bad profile: %r, output: %r'
if counter_overflow:
template = 'Counter overflow: %r, output: %r'
logging.warning(template, profraw_file, validation_output)
# 4. Delete profdata for invalid profiles if present.
if os.path.exists(output_profdata_file): if os.path.exists(output_profdata_file):
# The output file may be created before llvm-profdata determines the # The output file may be created before llvm-profdata determines the
# input is invalid. Delete it so that it does not leak and affect other # input is invalid. Delete it so that it does not leak and affect other
...@@ -201,12 +227,13 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path): ...@@ -201,12 +227,13 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path):
profdata_tool_path: The path to the llvm-profdata executable. profdata_tool_path: The path to the llvm-profdata executable.
Returns: Returns:
The list of profiles that had to be excluded to get the merge to The list of profiles that had to be excluded to get the merge to
succeed. succeed and a list of profiles that had a counter overflow.
""" """
profile_input_file_paths = _get_profile_paths(input_dir, input_extension) profile_input_file_paths = _get_profile_paths(input_dir, input_extension)
invalid_profraw_files = [] invalid_profraw_files = []
counter_overflows = []
if input_extension == '.profraw': if input_extension == '.profraw':
profile_input_file_paths, invalid_profraw_files = ( profile_input_file_paths, invalid_profraw_files, counter_overflows = (
_validate_and_convert_profraws(profile_input_file_paths, _validate_and_convert_profraws(profile_input_file_paths,
profdata_tool_path)) profdata_tool_path))
logging.info('List of converted .profdata files: %r', logging.info('List of converted .profdata files: %r',
...@@ -215,6 +242,10 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path): ...@@ -215,6 +242,10 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path):
'List of invalid .profraw files that failed to validate and convert: %r' 'List of invalid .profraw files that failed to validate and convert: %r'
), invalid_profraw_files) ), invalid_profraw_files)
if counter_overflows:
logging.warning('There were %d profiles with counter overflows',
len(counter_overflows))
# The list of input files could be empty in the following scenarios: # The list of input files could be empty in the following scenarios:
# 1. The test target is pure Python scripts test which doesn't execute any # 1. The test target is pure Python scripts test which doesn't execute any
# C/C++ binaries, such as devtools_closure_compile. # C/C++ binaries, such as devtools_closure_compile.
...@@ -223,7 +254,7 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path): ...@@ -223,7 +254,7 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path):
if not profile_input_file_paths: if not profile_input_file_paths:
logging.info('There is no valid profraw/profdata files to merge, skip ' logging.info('There is no valid profraw/profdata files to merge, skip '
'invoking profdata tools.') 'invoking profdata tools.')
return invalid_profraw_files return invalid_profraw_files, counter_overflows
invalid_profdata_files = _call_profdata_tool( invalid_profdata_files = _call_profdata_tool(
profile_input_file_paths=profile_input_file_paths, profile_input_file_paths=profile_input_file_paths,
...@@ -234,7 +265,7 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path): ...@@ -234,7 +265,7 @@ def merge_profiles(input_dir, output_file, input_extension, profdata_tool_path):
for input_file in profile_input_file_paths: for input_file in profile_input_file_paths:
os.remove(input_file) os.remove(input_file)
return invalid_profraw_files + invalid_profdata_files return invalid_profraw_files + invalid_profdata_files, counter_overflows
# We want to retry shards that contain one or more profiles that cannot be # We want to retry shards that contain one or more profiles that cannot be
# merged (typically due to corruption described in crbug.com/937521). # merged (typically due to corruption described in crbug.com/937521).
......
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import unittest
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(
0,
os.path.abspath(
os.path.join(THIS_DIR, os.pardir, os.pardir, os.pardir, 'third_party',
'pymock')))
import mock
import merge_lib as merger
class MergeLibTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(MergeLibTest, self).__init__(*args, **kwargs)
self.maxDiff = None
@mock.patch.object(subprocess, 'check_output')
def test_validate_and_convert_profraw(self, mock_cmd):
test_cases = [
([''], [['mock.profdata'], [], []]),
(['Counter overflow'], [[], ['mock.profraw'], ['mock.profraw']]),
(subprocess.CalledProcessError(
255,
'llvm-cov merge -o mock.profdata -sparse=true mock.profraw',
output='Malformed profile'), [[], ['mock.profraw'], []]),
]
for side_effect, expected_results in test_cases:
mock_cmd.side_effect = side_effect
output_profdata_files = []
invalid_profraw_files = []
counter_overflows = []
merger._validate_and_convert_profraw(
'mock.profraw', output_profdata_files, invalid_profraw_files,
counter_overflows, '/usr/bin/llvm-cov')
self.assertEqual(
expected_results,
[output_profdata_files, invalid_profraw_files, counter_overflows])
if __name__ == '__main__':
unittest.main()
...@@ -76,10 +76,20 @@ def main(): ...@@ -76,10 +76,20 @@ def main():
# test results merge script such as layout tests will treat them as json test # test results merge script such as layout tests will treat them as json test
# results files and result in errors. # results files and result in errors.
logging.info('Merging code coverage profraw data') logging.info('Merging code coverage profraw data')
invalid_profiles = coverage_merger.merge_profiles( invalid_profiles, counter_overflows = coverage_merger.merge_profiles(
params.task_output_dir, params.task_output_dir,
os.path.join(params.profdata_dir, 'default.profdata'), '.profraw', os.path.join(params.profdata_dir, 'default.profdata'), '.profraw',
params.llvm_profdata) params.llvm_profdata)
# At the moment counter overflows overlap with invalid profiles, but this is
# not guaranteed to remain the case indefinitely. To avoid future conflicts
# treat these separately.
if counter_overflows:
with open(
os.path.join(params.profdata_dir, 'profiles_with_overflows.json'),
'w') as f:
json.dump(counter_overflows, f)
if invalid_profiles: if invalid_profiles:
with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'), with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
'w') as f: 'w') as f:
......
...@@ -47,7 +47,7 @@ class MergeProfilesTest(unittest.TestCase): ...@@ -47,7 +47,7 @@ class MergeProfilesTest(unittest.TestCase):
'llvm-profdata', 'a.json', 'b.json', 'c.json' 'llvm-profdata', 'a.json', 'b.json', 'c.json'
] ]
with mock.patch.object(merger, 'merge_profiles') as mock_merge: with mock.patch.object(merger, 'merge_profiles') as mock_merge:
mock_merge.return_value = None mock_merge.return_value = None, None
with mock.patch.object(sys, 'argv', args): with mock.patch.object(sys, 'argv', args):
merge_results.main() merge_results.main()
self.assertEqual( self.assertEqual(
...@@ -92,6 +92,8 @@ class MergeProfilesTest(unittest.TestCase): ...@@ -92,6 +92,8 @@ class MergeProfilesTest(unittest.TestCase):
], [ ], [
'/b/some/path/0/default-2.profraw', '/b/some/path/0/default-2.profraw',
'/b/some/path/1/default-1.profraw', '/b/some/path/1/default-1.profraw',
], [
'/b/some/path/1/default-1.profraw',
] ]
with mock.patch.object(os, 'walk') as mock_walk: with mock.patch.object(os, 'walk') as mock_walk:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment