Commit 8dbd29d5 authored by behara.ms@samsung.com's avatar behara.ms@samsung.com

Enabling archiving of test results by default in run-webkit-tests.

Added unit_tests for archiving of layout test results.

Review URL: https://codereview.chromium.org/489093002

git-svn-id: svn://svn.chromium.org/blink/trunk@181991 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent 5c31073f
...@@ -155,30 +155,33 @@ class Manager(object): ...@@ -155,30 +155,33 @@ class Manager(object):
def _rename_results_folder(self): def _rename_results_folder(self):
try: try:
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html")))) timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
except OSError, e: except (IOError, OSError), e:
# It might be possible that results.html was not generated in previous run, because the test # It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder. # run was interrupted even before testing started. In those cases, don't archive the folder.
# Simply override the current folder contents with new results. # Simply override the current folder contents with new results.
import errno import errno
if e.errno == errno.EEXIST: if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
_log.warning("No results.html file found in previous run, skipping it.") _log.warning("No results.html file found in previous run, skipping it.")
return None return None
archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp)) archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name) archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
self._filesystem.move(self._results_directory, archived_path) self._filesystem.move(self._results_directory, archived_path)
def _clobber_old_archived_results(self): def _delete_dirs(self, dir_list):
for dir in dir_list:
self._filesystem.rmtree(dir)
def _limit_archived_results_count(self):
results_directory_path = self._filesystem.dirname(self._results_directory) results_directory_path = self._filesystem.dirname(self._results_directory)
file_list = self._filesystem.listdir(results_directory_path) file_list = self._filesystem.listdir(results_directory_path)
results_directories = [] results_directories = []
for dir in file_list: for dir in file_list:
file_path = self._filesystem.join(results_directory_path, dir) file_path = self._filesystem.join(results_directory_path, dir)
if self._filesystem.isdir(file_path): if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path) results_directories.append(file_path)
results_directories.sort(key=lambda x: self._filesystem.mtime(x)) results_directories.sort(key=lambda x: self._filesystem.mtime(x))
self._printer.write_update("Clobbering old archived results in %s" % results_directory_path) self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
for dir in results_directories[:-self.ARCHIVED_RESULTS_LIMIT]: self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
self._filesystem.rmtree(dir)
def _set_up_run(self, test_names): def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...") self._printer.write_update("Checking build ...")
...@@ -202,14 +205,12 @@ class Manager(object): ...@@ -202,14 +205,12 @@ class Manager(object):
self._port.stop_helper() self._port.stop_helper()
return exit_code return exit_code
if self._options.enable_versioned_results and self._filesystem.exists(self._results_directory): if self._options.clobber_old_results:
if self._options.clobber_old_results: self._clobber_old_results()
_log.warning("Flag --enable_versioned_results overrides --clobber-old-results.") elif self._filesystem.exists(self._results_directory):
self._clobber_old_archived_results() self._limit_archived_results_count()
# Rename the existing results folder for archiving. # Rename the existing results folder for archiving.
self._rename_results_folder() self._rename_results_folder()
elif self._options.clobber_old_results:
self._clobber_old_results()
# Create the output directory if it doesn't already exist. # Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory) self._port.host.filesystem.maybe_make_directory(self._results_directory)
...@@ -406,16 +407,17 @@ class Manager(object): ...@@ -406,16 +407,17 @@ class Manager(object):
writer.write_crash_log(crash_log) writer.write_crash_log(crash_log)
def _clobber_old_results(self): def _clobber_old_results(self):
# Just clobber the actual test results directories since the other dir_above_results_path = self._filesystem.dirname(self._results_directory)
# files in the results directory are explicitly used for cross-run self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
# tracking. if not self._filesystem.exists(dir_above_results_path):
self._printer.write_update("Clobbering old results in %s" % return
self._results_directory) file_list = self._filesystem.listdir(dir_above_results_path)
layout_tests_dir = self._port.layout_tests_dir() results_directories = []
possible_dirs = self._port.test_dirs() for dir in file_list:
for dirname in possible_dirs: file_path = self._filesystem.join(dir_above_results_path, dir)
if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)): if self._filesystem.isdir(file_path) and self._results_directory in file_path:
self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname)) results_directories.append(file_path)
self._delete_dirs(results_directories)
# Port specific clean-up. # Port specific clean-up.
self._port.clobber_old_port_specific_results() self._port.clobber_old_port_specific_results()
......
...@@ -122,3 +122,55 @@ class ManagerTest(unittest.TestCase): ...@@ -122,3 +122,55 @@ class ManagerTest(unittest.TestCase):
run_results = TestRunResults(expectations, len(tests)) run_results = TestRunResults(expectations, len(tests))
manager = get_manager() manager = get_manager()
manager._look_for_new_crash_logs(run_results, time.time()) manager._look_for_new_crash_logs(run_results, time.time())
def _make_fake_test_result(self, host, results_directory):
host.filesystem.maybe_make_directory(results_directory)
host.filesystem.write_binary_file(results_directory + '/results.html', 'This is a test results file')
def test_rename_results_folder(self):
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
def get_manager():
manager = Manager(port, options=MockOptions(max_locked_shards=1), printer=FakePrinter())
return manager
self._make_fake_test_result(port.host, '/tmp/layout-test-results')
self.assertTrue(port.host.filesystem.exists('/tmp/layout-test-results'))
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(port.host.filesystem.mtime('/tmp/layout-test-results/results.html')))
archived_file_name = '/tmp/layout-test-results' + '_' + timestamp
manager = get_manager()
manager._rename_results_folder()
self.assertFalse(port.host.filesystem.exists('/tmp/layout-test-results'))
self.assertTrue(port.host.filesystem.exists(archived_file_name))
def test_clobber_old_results(self):
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
def get_manager():
manager = Manager(port, options=MockOptions(max_locked_shards=1), printer=FakePrinter())
return manager
self._make_fake_test_result(port.host, '/tmp/layout-test-results')
self.assertTrue(port.host.filesystem.exists('/tmp/layout-test-results'))
manager = get_manager()
manager._clobber_old_results()
self.assertFalse(port.host.filesystem.exists('/tmp/layout-test-results'))
def test_limit_archived_results_count(self):
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
def get_manager():
manager = Manager(port, options=MockOptions(max_locked_shards=1), printer=FakePrinter())
return manager
for x in range(1, 31):
dir_name = '/tmp/layout-test-results' + '_' + str(x)
self._make_fake_test_result(port.host, dir_name)
manager = get_manager()
manager._limit_archived_results_count()
deleted_dir_count = 0
for x in range(1, 31):
dir_name = '/tmp/layout-test-results' + '_' + str(x)
if not port.host.filesystem.exists(dir_name):
deleted_dir_count = deleted_dir_count + 1
self.assertEqual(deleted_dir_count, 5)
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json import json
import logging
class ProcessJsonData(object): class ProcessJsonData(object):
...@@ -113,10 +114,13 @@ class GenerateDashBoard(object): ...@@ -113,10 +114,13 @@ class GenerateDashBoard(object):
file_list = self._filesystem.listdir(self._results_directory_path) file_list = self._filesystem.listdir(self._results_directory_path)
results_directories = [] results_directories = []
for dir in file_list: for dir in file_list:
if self._filesystem.isdir(self._filesystem.join(self._results_directory_path, dir)): full_dir_path = self._filesystem.join(self._results_directory_path, dir)
results_directories.append(self._filesystem.join(self._results_directory_path, dir)) if self._filesystem.isdir(full_dir_path):
if self._results_directory in full_dir_path:
results_directories.append(full_dir_path)
results_directories.sort(reverse=True, key=lambda x: self._filesystem.mtime(x)) results_directories.sort(reverse=True, key=lambda x: self._filesystem.mtime(x))
with open(self._filesystem.join(results_directories[0], 'failing_results.json'), "r") as file: current_failing_results_json_file = self._filesystem.join(results_directories[0], 'failing_results.json')
with self._filesystem.open_text_file_for_reading(current_failing_results_json_file) as file:
input_json_string = file.readline() input_json_string = file.readline()
input_json_string = input_json_string[12:-2] # Remove preceeding string ADD_RESULTS( and ); at the end input_json_string = input_json_string[12:-2] # Remove preceeding string ADD_RESULTS( and ); at the end
self._current_result_json_dict['tests'] = json.loads(input_json_string)['tests'] self._current_result_json_dict['tests'] = json.loads(input_json_string)['tests']
...@@ -127,21 +131,26 @@ class GenerateDashBoard(object): ...@@ -127,21 +131,26 @@ class GenerateDashBoard(object):
# Load the remaining stale layout test results Json's to create the dashboard # Load the remaining stale layout test results Json's to create the dashboard
for json_file in results_directories: for json_file in results_directories:
with open(self._filesystem.join(json_file, 'failing_results.json'), "r") as file: failing_json_file_path = self._filesystem.join(json_file, 'failing_results.json')
full_json_file_path = self._filesystem.join(json_file, 'full_results.json')
with self._filesystem.open_text_file_for_reading(failing_json_file_path) as file:
json_string = file.readline() json_string = file.readline()
json_string = json_string[12:-2] # Remove preceeding string ADD_RESULTS( and ); at the end json_string = json_string[12:-2] # Remove preceeding string ADD_RESULTS( and ); at the end
self._old_failing_results_list.append(json.loads(json_string)) self._old_failing_results_list.append(json.loads(json_string))
with open(self._filesystem.join(json_file, 'full_results.json'), "r") as full_file: with self._filesystem.open_text_file_for_reading(full_json_file_path) as full_file:
json_string_full_result = full_file.readline() json_string_full_result = full_file.readline()
self._old_full_results_list.append(json.loads(json_string_full_result)) self._old_full_results_list.append(json.loads(json_string_full_result))
self._copy_dashboard_html() self._copy_dashboard_html()
def generate(self): def generate(self):
self._initialize() self._initialize()
process_json_data = ProcessJsonData(self._current_result_json_dict, self._old_failing_results_list, self._old_full_results_list)
self._final_result = process_json_data.generate_archived_result() # There must be atleast one archived result to be processed
final_json = json.dumps(self._final_result) if self._current_result_json_dict:
final_json = 'ADD_RESULTS(' + final_json + ');' process_json_data = ProcessJsonData(self._current_result_json_dict, self._old_failing_results_list, self._old_full_results_list)
with open(self._filesystem.join(self._results_directory, 'archived_results.json'), "w") as file: self._final_result = process_json_data.generate_archived_result()
file.write(final_json) final_json = json.dumps(self._final_result)
final_json = 'ADD_RESULTS(' + final_json + ');'
with self._filesystem.open_text_file_for_writing(self._filesystem.join(self._results_directory, 'archived_results.json')) as file:
file.write(final_json)
...@@ -77,9 +77,8 @@ def main(argv, stdout, stderr): ...@@ -77,9 +77,8 @@ def main(argv, stdout, stderr):
bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging) bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
bot_printer.print_results(run_details) bot_printer.print_results(run_details)
if options.enable_versioned_results: gen_dash_board = GenerateDashBoard(port)
gen_dash_board = GenerateDashBoard(port) gen_dash_board.generate()
gen_dash_board.generate()
return run_details.exit_code return run_details.exit_code
...@@ -135,8 +134,6 @@ def parse_args(args): ...@@ -135,8 +134,6 @@ def parse_args(args):
help="Use the specified port's baselines first"), help="Use the specified port's baselines first"),
optparse.make_option("--driver-name", type="string", optparse.make_option("--driver-name", type="string",
help="Alternative driver binary to use"), help="Alternative driver binary to use"),
optparse.make_option("--enable-versioned-results", action="store_true",
default=False, help="Archive the test results for later access."),
optparse.make_option("--full-results-html", action="store_true", optparse.make_option("--full-results-html", action="store_true",
default=False, default=False,
help="Show all failures in results.html, rather than only regressions"), help="Show all failures in results.html, rather than only regressions"),
......
...@@ -78,9 +78,7 @@ class Printer(object): ...@@ -78,9 +78,7 @@ class Printer(object):
self._print_default("Using port '%s'" % self._port.name()) self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration()) self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("View the test results at file://%s/results.html" % results_directory) self._print_default("View the test results at file://%s/results.html" % results_directory)
self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
if self._options.enable_versioned_results:
self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
# FIXME: should these options be in printing_options? # FIXME: should these options be in printing_options?
if self._options.new_baseline: if self._options.new_baseline:
......
...@@ -126,7 +126,6 @@ class Testprinter(unittest.TestCase): ...@@ -126,7 +126,6 @@ class Testprinter(unittest.TestCase):
printer, err = self.get_printer() printer, err = self.get_printer()
# FIXME: it's lame that i have to set these options directly. # FIXME: it's lame that i have to set these options directly.
printer._options.pixel_tests = True printer._options.pixel_tests = True
printer._options.enable_versioned_results = True
printer._options.new_baseline = True printer._options.new_baseline = True
printer._options.time_out_ms = 6000 printer._options.time_out_ms = 6000
printer._options.slow_time_out_ms = 12000 printer._options.slow_time_out_ms = 12000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment