Rework run-mojo-python-tests to use layout-test-compatible json output.
This patch adds the ability to record the results of a test run into a JSON file that can be uploaded to the flakiness dashboard. R=viettrungluu@chromium.org, phajdan.jr@chromium.org, ojan@chromium.org, szager@chromium.org BUG=364709 Review URL: https://codereview.chromium.org/344233008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@279836 0039d316-1c4b-4281-b951-d872f2087c98
This commit is contained in:
@@ -3,29 +3,39 @@
|
|||||||
# Use of this source code is governed by a BSD-style license that can be
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
# found in the LICENSE file.
|
# found in the LICENSE file.
|
||||||
|
|
||||||
import optparse
|
import argparse
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = optparse.OptionParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.usage = 'run_mojo_python_tests.py [options] [tests...]'
|
parser.usage = 'run_mojo_python_tests.py [options] [tests...]'
|
||||||
parser.add_option('-v', '--verbose', action='count', default=0)
|
parser.add_argument('-v', '--verbose', action='count', default=0)
|
||||||
parser.add_option('--unexpected-failures', metavar='FILENAME', action='store',
|
parser.add_argument('--metadata', action='append', default=[],
|
||||||
help=('path to write a list of any tests that fail '
|
help=('optional key=value metadata that will be stored '
|
||||||
'unexpectedly.'))
|
'in the results files (can be used for revision '
|
||||||
parser.epilog = ('If --unexpected-failures is passed, a list of the tests '
|
'numbers, etc.)'))
|
||||||
'that failed (one per line) will be written to the file. '
|
parser.add_argument('--write-full-results-to', metavar='FILENAME',
|
||||||
'If no tests failed, the file will be truncated (empty). '
|
action='store',
|
||||||
'If the test run did not completely properly, or something '
|
help='path to write the list of full results to.')
|
||||||
'else weird happened, any existing file will be left '
|
parser.add_argument('tests', nargs='*')
|
||||||
'unmodified. '
|
|
||||||
'If --unexpected-failures is *not* passed, any existing '
|
args = parser.parse_args()
|
||||||
'file will be ignored and left unmodified.')
|
|
||||||
options, args = parser.parse_args()
|
bad_metadata = False
|
||||||
|
for val in args.metadata:
|
||||||
|
if '=' not in val:
|
||||||
|
print >> sys.stderr, ('Error: malformed metadata "%s"' % val)
|
||||||
|
bad_metadata = True
|
||||||
|
if bad_metadata:
|
||||||
|
print >> sys.stderr
|
||||||
|
parser.print_help()
|
||||||
|
return 2
|
||||||
|
|
||||||
chromium_src_dir = os.path.join(os.path.dirname(__file__),
|
chromium_src_dir = os.path.join(os.path.dirname(__file__),
|
||||||
os.pardir,
|
os.pardir,
|
||||||
@@ -36,38 +46,102 @@ def main():
|
|||||||
|
|
||||||
pylib_dir = os.path.join(chromium_src_dir, 'mojo', 'public',
|
pylib_dir = os.path.join(chromium_src_dir, 'mojo', 'public',
|
||||||
'tools', 'bindings', 'pylib')
|
'tools', 'bindings', 'pylib')
|
||||||
if args:
|
if args.tests:
|
||||||
if not pylib_dir in sys.path:
|
if pylib_dir not in sys.path:
|
||||||
sys.path.append(pylib_dir)
|
sys.path.append(pylib_dir)
|
||||||
suite = unittest.TestSuite()
|
suite = unittest.TestSuite()
|
||||||
for test_name in args:
|
for test_name in args:
|
||||||
suite.addTests(loader.loadTestsFromName(test_name))
|
suite.addTests(loader.loadTestsFromName(test_name))
|
||||||
else:
|
else:
|
||||||
suite = loader.discover(pylib_dir, pattern='*_unittest.py')
|
suite = loader.discover(pylib_dir, pattern='*_unittest.py')
|
||||||
|
|
||||||
runner = unittest.runner.TextTestRunner(verbosity=(options.verbose + 1))
|
runner = unittest.runner.TextTestRunner(verbosity=(args.verbose + 1))
|
||||||
result = runner.run(suite)
|
result = runner.run(suite)
|
||||||
|
|
||||||
if options.unexpected_failures:
|
full_results = _FullResults(suite, result, args.metadata)
|
||||||
WriteUnexpectedFailures(result, options.unexpected_failures)
|
if args.write_full_results_to:
|
||||||
|
with open(args.write_full_results_to, 'w') as fp:
|
||||||
|
json.dump(full_results, fp, indent=2)
|
||||||
|
fp.write("\n")
|
||||||
|
|
||||||
return 0 if result.wasSuccessful() else 1
|
return 0 if result.wasSuccessful() else 1
|
||||||
|
|
||||||
|
|
||||||
def WriteUnexpectedFailures(result, path):
|
TEST_SEPARATOR = '.'
|
||||||
|
|
||||||
|
|
||||||
|
def _FullResults(suite, result, metadata):
|
||||||
|
"""Convert the unittest results to the Chromium JSON test result format.
|
||||||
|
|
||||||
|
This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
|
||||||
|
"""
|
||||||
|
|
||||||
|
full_results = {}
|
||||||
|
full_results['interrupted'] = False
|
||||||
|
full_results['path_delimiter'] = TEST_SEPARATOR
|
||||||
|
full_results['version'] = 3
|
||||||
|
full_results['seconds_since_epoch'] = time.time()
|
||||||
|
for md in metadata:
|
||||||
|
key, val = md.split('=', 1)
|
||||||
|
full_results[key] = val
|
||||||
|
|
||||||
|
all_test_names = _AllTestNames(suite)
|
||||||
|
failed_test_names = _FailedTestNames(result)
|
||||||
|
|
||||||
|
full_results['num_failures_by_type'] = {
|
||||||
|
'Failure': len(failed_test_names),
|
||||||
|
'Pass': len(all_test_names) - len(failed_test_names),
|
||||||
|
}
|
||||||
|
|
||||||
|
full_results['tests'] = {}
|
||||||
|
|
||||||
|
for test_name in all_test_names:
|
||||||
|
value = {
|
||||||
|
'expected': 'PASS',
|
||||||
|
'actual': 'FAIL' if (test_name in failed_test_names) else 'FAIL',
|
||||||
|
}
|
||||||
|
_AddPathToTrie(full_results['tests'], test_name, value)
|
||||||
|
|
||||||
|
return full_results
|
||||||
|
|
||||||
|
|
||||||
|
def _AllTestNames(suite):
|
||||||
|
test_names = []
|
||||||
|
# _tests is protected pylint: disable=W0212
|
||||||
|
for test in suite._tests:
|
||||||
|
if isinstance(test, unittest.suite.TestSuite):
|
||||||
|
test_names.extend(_AllTestNames(test))
|
||||||
|
else:
|
||||||
|
test_names.append(_UnitTestName(test))
|
||||||
|
return test_names
|
||||||
|
|
||||||
|
|
||||||
|
def _FailedTestNames(result):
|
||||||
|
failed_test_names = set()
|
||||||
|
for (test, _) in result.failures + result.errors:
|
||||||
|
failed_test_names.add(_UnitTestName(test))
|
||||||
|
return failed_test_names
|
||||||
|
|
||||||
|
|
||||||
|
def _AddPathToTrie(trie, path, value):
|
||||||
|
if TEST_SEPARATOR not in path:
|
||||||
|
trie[path] = value
|
||||||
|
return
|
||||||
|
directory, rest = path.split(TEST_SEPARATOR, 1)
|
||||||
|
if directory not in trie:
|
||||||
|
trie[directory] = {}
|
||||||
|
_AddPathToTrie(trie[directory], rest, value)
|
||||||
|
|
||||||
|
|
||||||
|
_UNITTEST_NAME_REGEX = re.compile("(\w+) \(([\w.]+)\)")
|
||||||
|
|
||||||
|
|
||||||
|
def _UnitTestName(test):
|
||||||
# This regex and UnitTestName() extracts the test_name in a way
|
# This regex and UnitTestName() extracts the test_name in a way
|
||||||
# that can be handed back to the loader successfully.
|
# that can be handed back to the loader successfully.
|
||||||
|
m = _UNITTEST_NAME_REGEX.match(str(test))
|
||||||
test_description = re.compile("(\w+) \(([\w.]+)\)")
|
assert m, "could not find test name from test description %s" % str(test)
|
||||||
|
return "%s.%s" % (m.group(2), m.group(1))
|
||||||
def UnitTestName(test):
|
|
||||||
m = test_description.match(str(test))
|
|
||||||
return "%s.%s" % (m.group(2), m.group(1))
|
|
||||||
|
|
||||||
with open(path, 'w') as fp:
|
|
||||||
for (test, _) in result.failures + result.errors:
|
|
||||||
fp.write(UnitTestName(test) + '\n')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
Reference in New Issue
Block a user