Commit 9461cf2b authored by Ramin Halavati's avatar Ramin Halavati Committed by Commit Bot

Update network traffic annotation auditor tests.

Tests are added to compare outputs of traffic_annotation_auditor using
different parameters.

Bug: 690323
Change-Id: I69bc0234310020e2e92ecdd25c90f543f9b99459
Reviewed-on: https://chromium-review.googlesource.com/960143
Commit-Queue: Ramin Halavati <rhalavati@chromium.org>
Reviewed-by: default avatarMartin Šrámek <msramek@chromium.org>
Cr-Commit-Position: refs/heads/master@{#551298}
parent c69ef0f7
......@@ -9,13 +9,14 @@
import os
import argparse
import sys
import tempfile
from annotation_tools import NetworkTrafficAnnotationTools
# If this test starts failing, please set TEST_IS_ENABLED to "False" and file a
# bug to get this reenabled, and cc the people listed in
# //tools/traffic_annotation/OWNERS.
TEST_IS_ENABLED = False
TEST_IS_ENABLED = True
class TrafficAnnotationTestsChecker():
......@@ -28,20 +29,78 @@ class TrafficAnnotationTestsChecker():
"""
self.tools = NetworkTrafficAnnotationTools(build_path)
def RunAllTests(self):
result = self.RunOnAllFiles()
#TODO(rhalavati): Add more tests, and create a pipeline for them.
return result
"""Runs all tests and returns the result."""
return self.CheckAuditorResults() and self.CheckOutputExpectations()
def CheckAuditorResults(self):
"""Runs auditor using different configurations, expecting to run error free,
and having equal results in the exported TSV file in all cases. The TSV file
provides a summary of all annotations and their content.
Returns:
bool True if all results are as expected.
"""
configs = [
["--test-only", "--error-resilient"], # Similar to trybot.
["--test-only"], # Failing on any runtime error.
# TODO(rhalavati): The --no-filtering mode requires a full Chrome build
# which is now not available on the FYI bot.
# ["--test-only", "--no-filtering"] # Not using heuristic filtering.
]
last_result = None
for config in configs:
result = self._RunTest(config)
if not result:
print("No output for config: %s" % config)
return False
if last_result and last_result != result:
print("Unexpected different results for config: %s" % config)
return False
last_result = result
return True
def CheckOutputExpectations(self):
# TODO(rhalavati): Add tests to check for an expected minimum number of
# items for each type of pattern that auditor extracts. E.g., we should have
# many annotations of each type (complete, partial, ...), functions that
# need annotations, direct assignment to mutable annotations, etc.
return True
def _RunTest(self, args):
"""Runs the auditor test with given |args|, and returns the extracted
annotations.
def RunOnAllFiles(self):
args = ["--test-only"]
_, stderr_text, return_code = self.tools.RunAuditor(args)
Args:
args: list of str Arguments to be passed to auditor.
Returns:
str Content of annotations.tsv file if successful, otherwise None.
"""
print("Running auditor using config: %s" % args)
temp_file = tempfile.NamedTemporaryFile()
temp_filename = temp_file.name
temp_file.close()
_, stderr_text, return_code = self.tools.RunAuditor(
args + ["--annotations-file=%s" % temp_filename])
annotations = None if (return_code or stderr_text) \
else open(temp_filename).read()
os.remove(temp_filename)
if not return_code:
print("RunOnAlFiles Passed.")
elif stderr_text:
print(stderr_text)
return return_code
print("Test PASSED.")
else:
print("Test FAILED.\n%s" % stderr_text)
return annotations
def main():
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment