0

[telemetry] Refactor run_tests to remove old cruft.

- Remove dead code and circuitous code paths.
- Make its API consistent with run_benchmark, taking advantage of command_line.OptparseCommand.


BUG=346956
TEST=tools/telemetry/run_tests && tools/perf/run_tests

Review URL: https://codereview.chromium.org/228073002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@280232 0039d316-1c4b-4281-b951-d872f2087c98
This commit is contained in:
dtu@chromium.org
2014-06-27 03:58:23 +00:00
parent bf300e5c8f
commit 8465b58fd7
12 changed files with 166 additions and 184 deletions

@ -15,17 +15,10 @@ This script DOES NOT run benchmarks. run_gpu_tests does that.
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'tools', 'telemetry'))
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import run_tests
if __name__ == '__main__':
top_level_dir = os.path.abspath(os.path.dirname(__file__))
runner = gtest_testrunner.GTestTestRunner(print_result_after_run=False)
ret = run_tests.Main(sys.argv[1:], top_level_dir, top_level_dir, runner)
if runner.result:
runner.result.PrintSummary()
sys.exit(min(ret + runner.result.num_errors, 255))
else:
sys.exit(ret)
base_dir = os.path.dirname(os.path.realpath(__file__))
run_tests.environment = run_tests.Environment(base_dir, [base_dir])
sys.exit(run_tests.RunTestsCommand.main())

@ -14,7 +14,7 @@ import unittest
from telemetry import test
from telemetry.core import discover
from telemetry.page import page_measurement
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import gtest_unittest_results
from telemetry.unittest import options_for_unittests
@ -55,7 +55,7 @@ def SmokeTestGenerator(benchmark):
def load_tests(_, _2, _3):
suite = gtest_testrunner.GTestTestSuite()
suite = gtest_unittest_results.GTestTestSuite()
benchmarks_dir = os.path.dirname(__file__)
top_level_dir = os.path.dirname(benchmarks_dir)

@ -5,24 +5,18 @@
"""This script runs unit tests of the code in the perf directory.
This script DOES NOT run benchmarks. run_benchmarks and run_measurement do that.
This script DOES NOT run benchmarks. run_benchmark and run_measurement do that.
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry'))
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import run_tests
if __name__ == '__main__':
top_level_dir = os.path.abspath(os.path.dirname(__file__))
runner = gtest_testrunner.GTestTestRunner(print_result_after_run=False)
ret = run_tests.Main(sys.argv[1:], top_level_dir, top_level_dir, runner)
if runner.result:
runner.result.PrintSummary()
sys.exit(min(ret + runner.result.num_errors, 255))
else:
sys.exit(ret)
base_dir = os.path.dirname(os.path.realpath(__file__))
run_tests.environment = run_tests.Environment(base_dir, [base_dir])
sys.exit(run_tests.RunTestsCommand.main())

@ -6,17 +6,10 @@
import os
import sys
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import run_tests
if __name__ == '__main__':
top_level_dir = os.path.abspath(os.path.dirname(__file__))
runner = gtest_testrunner.GTestTestRunner(print_result_after_run=False)
ret = run_tests.Main(sys.argv[1:], top_level_dir, top_level_dir, runner)
if runner.result:
runner.result.PrintSummary()
sys.exit(min(ret + runner.result.num_errors, 255))
else:
sys.exit(ret)
base_dir = os.path.dirname(os.path.realpath(__file__))
run_tests.environment = run_tests.Environment(base_dir, [base_dir])
sys.exit(run_tests.RunTestsCommand.main())

@ -108,4 +108,4 @@ class SubcommandCommand(Command):
args.command.ProcessCommandLineArgs(parser, args)
def Run(self, args):
args.command().Run(args)
return args.command().Run(args)

@ -16,6 +16,9 @@ class TestOutputStream(object):
assert isinstance(data, str)
self.output_data.append(data)
def flush(self):
pass
class BaseTestResultsUnittest(unittest.TestCase):

@ -3,7 +3,6 @@
# found in the LICENSE file.
import logging
import sys
import time
from telemetry.results import page_test_results
@ -25,7 +24,7 @@ class GTestTestResults(page_test_results.PageTestResults):
print >> self._output_stream, self._GetStringFromExcInfo(err)
print >> self._output_stream, '[ FAILED ]', page.display_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
self._output_stream.flush()
def AddError(self, page, err):
super(GTestTestResults, self).AddError(page, err)
@ -37,27 +36,25 @@ class GTestTestResults(page_test_results.PageTestResults):
def StartTest(self, page):
super(GTestTestResults, self).StartTest(page)
print >> self._output_stream, '[ RUN ]', (
page.display_name)
sys.stdout.flush()
print >> self._output_stream, '[ RUN ]', page.display_name
self._output_stream.flush()
self._timestamp = time.time()
def AddSuccess(self, page):
super(GTestTestResults, self).AddSuccess(page)
test_name = page.display_name
print >> self._output_stream, '[ OK ]', test_name, (
print >> self._output_stream, '[ OK ]', page.display_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
self._output_stream.flush()
def AddSkip(self, page, reason):
super(GTestTestResults, self).AddSkip(page, reason)
test_name = page.display_name
logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
logging.warning('===== SKIPPING TEST %s: %s =====',
page.display_name, reason)
if self._timestamp == None:
self._timestamp = time.time()
print >> self._output_stream, '[ OK ]', test_name, (
print >> self._output_stream, '[ OK ]', page.display_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
self._output_stream.flush()
def PrintSummary(self):
unit = 'test' if len(self.successes) == 1 else 'tests'
@ -77,4 +74,4 @@ class GTestTestResults(page_test_results.PageTestResults):
unit = 'TEST' if count == 1 else 'TESTS'
print >> self._output_stream, '%d FAILED %s' % (count, unit)
print >> self._output_stream
sys.stdout.flush()
self._output_stream.flush()

@ -1,48 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a unittest TestRunner with GTest output.
This output is ported from gtest.cc's PrettyUnitTestResultPrinter, but
designed to be a drop-in replacement for unittest's TextTestRunner.
"""
import sys
import time
import unittest
from telemetry.unittest import gtest_unittest_results
class GTestTestSuite(unittest.TestSuite):
def __call__(self, *args, **kwargs):
result = args[0]
timestamp = time.time()
unit = 'test' if len(self._tests) == 1 else 'tests'
if not any(isinstance(x, unittest.TestSuite) for x in self._tests):
print '[----------] %d %s' % (len(self._tests), unit)
for test in self._tests:
if result.shouldStop:
break
test(result)
endts = time.time()
ms = (endts - timestamp) * 1000
if not any(isinstance(x, unittest.TestSuite) for x in self._tests):
print '[----------] %d %s (%d ms total)' % (len(self._tests), unit, ms)
print
return result
class GTestTestRunner(object):
def __init__(self, print_result_after_run=True):
self.print_result_after_run = print_result_after_run
self.result = gtest_unittest_results.GTestUnittestResults(sys.stdout)
def run(self, test):
"Run the given test case or test suite."
test(self.result)
if self.print_result_after_run:
self.result.PrintSummary()
return self.result

@ -8,11 +8,20 @@ import time
import unittest
class GTestTestSuite(unittest.TestSuite):
def run(self, result): # pylint: disable=W0221
result.StartTestSuite(self)
result = super(GTestTestSuite, self).run(result)
result.StopTestSuite(self)
return result
class GTestUnittestResults(unittest.TestResult):
def __init__(self, output_stream):
super(GTestUnittestResults, self).__init__()
self._output_stream = output_stream
self._timestamp = None
self._test_start_time = None
self._test_suite_start_time = None
self._successes_count = 0
@property
@ -20,7 +29,7 @@ class GTestUnittestResults(unittest.TestResult):
return self._successes_count
def _GetMs(self):
return (time.time() - self._timestamp) * 1000
return (time.time() - self._test_start_time) * 1000
@property
def num_errors(self):
@ -51,7 +60,7 @@ class GTestUnittestResults(unittest.TestResult):
print >> self._output_stream, '[ RUN ]', (
GTestUnittestResults._formatTestname(test))
sys.stdout.flush()
self._timestamp = time.time()
self._test_start_time = time.time()
def addSuccess(self, test):
super(GTestUnittestResults, self).addSuccess(test)
@ -65,12 +74,31 @@ class GTestUnittestResults(unittest.TestResult):
super(GTestUnittestResults, self).addSkip(test, reason)
test_name = GTestUnittestResults._formatTestname(test)
logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
if self._timestamp == None:
self._timestamp = time.time()
if self._test_start_time == None:
self._test_start_time = time.time()
print >> self._output_stream, '[ OK ]', test_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
def StartTestSuite(self, suite):
contains_test_suites = any(isinstance(test, unittest.TestSuite)
for test in suite)
if not contains_test_suites:
test_count = len([test for test in suite])
unit = 'test' if test_count == 1 else 'tests'
print '[----------]', test_count, unit
self._test_suite_start_time = time.time()
def StopTestSuite(self, suite):
contains_test_suites = any(isinstance(test, unittest.TestSuite)
for test in suite)
if not contains_test_suites:
elapsed_ms = (time.time() - self._test_suite_start_time) * 1000
test_count = len([test for test in suite])
unit = 'test' if test_count == 1 else 'tests'
print '[----------]', test_count, unit, '(%d ms total)' % elapsed_ms
print
def PrintSummary(self):
unit = 'test' if self._successes_count == 1 else 'tests'
print >> self._output_stream, '[ PASSED ]', (

@ -9,14 +9,16 @@ if unit tests are not running.
This allows multiple unit tests to use a specific
browser, in face of multiple options."""
_options = None
_browser_type = None
def Set(options, browser_type):
def Set(options):
global _options
global _browser_type
_options = options
_browser_type = browser_type
def GetCopy():
if not _options:
@ -24,10 +26,8 @@ def GetCopy():
return _options.Copy()
def AreSet():
if _options:
return True
return False
def GetBrowserType():
return _browser_type

@ -3,73 +3,86 @@
# found in the LICENSE file.
import logging
import sys
import unittest
from telemetry import decorators
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import util
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import gtest_unittest_results
from telemetry.unittest import options_for_unittests
class Environment(object):
def __init__(self, top_level_dir, test_dirs):
self._top_level_dir = top_level_dir
self._test_dirs = tuple(test_dirs)
@property
def top_level_dir(self):
return self._top_level_dir
@property
def test_dirs(self):
return self._test_dirs
def Discover(start_dir, top_level_dir=None, pattern='test*.py'):
loader = unittest.defaultTestLoader
loader.suiteClass = gtest_testrunner.GTestTestSuite
subsuites = []
loader.suiteClass = gtest_unittest_results.GTestTestSuite
test_suites = []
modules = discover.DiscoverModules(start_dir, top_level_dir, pattern)
for module in modules:
if hasattr(module, 'suite'):
new_suite = module.suite()
suite = module.suite()
else:
new_suite = loader.loadTestsFromModule(module)
if new_suite.countTestCases():
subsuites.append(new_suite)
return gtest_testrunner.GTestTestSuite(subsuites)
suite = loader.loadTestsFromModule(module)
if suite.countTestCases():
test_suites.append(suite)
return test_suites
def FilterSuite(suite, predicate):
new_suite = suite.__class__()
for x in suite:
if isinstance(x, unittest.TestSuite):
subsuite = FilterSuite(x, predicate)
if subsuite.countTestCases() == 0:
continue
new_suite.addTest(subsuite)
continue
assert isinstance(x, unittest.TestCase)
if predicate(x):
new_suite.addTest(x)
for test in suite:
if isinstance(test, unittest.TestSuite):
subsuite = FilterSuite(test, predicate)
if subsuite.countTestCases():
new_suite.addTest(subsuite)
else:
assert isinstance(test, unittest.TestCase)
if predicate(test):
new_suite.addTest(test)
return new_suite
def DiscoverAndRunTests(dir_name, args, top_level_dir, possible_browser,
default_options, runner):
if not runner:
runner = gtest_testrunner.GTestTestRunner(print_result_after_run=True)
suite = Discover(dir_name, top_level_dir, '*_unittest.py')
def DiscoverTests(search_dirs, top_level_dir, possible_browser,
selected_tests=None, run_disabled_tests=False):
def IsTestSelected(test):
if len(args) != 0:
if selected_tests:
found = False
for name in args:
for name in selected_tests:
if name in test.id():
found = True
if not found:
return False
if default_options.run_disabled_tests:
if run_disabled_tests:
return True
# pylint: disable=W0212
if not hasattr(test, '_testMethodName'):
return True
method = getattr(test, test._testMethodName)
return decorators.IsEnabled(method, possible_browser)
filtered_suite = FilterSuite(suite, IsTestSelected)
test_result = runner.run(filtered_suite)
return test_result
wrapper_suite = gtest_unittest_results.GTestTestSuite()
for search_dir in search_dirs:
wrapper_suite.addTests(Discover(search_dir, top_level_dir, '*_unittest.py'))
return FilterSuite(wrapper_suite, IsTestSelected)
def RestoreLoggingLevel(func):
@ -87,53 +100,61 @@ def RestoreLoggingLevel(func):
return _LoggingRestoreWrapper
@RestoreLoggingLevel
def Main(args, start_dir, top_level_dir, runner=None):
"""Unit test suite that collects all test cases for telemetry."""
# Add unittest_data to the path so we can import packages from it.
util.AddDirToPythonPath(util.GetUnittestDataDir())
environment = None
default_options = browser_options.BrowserFinderOptions()
default_options.browser_type = 'any'
parser = default_options.CreateParser('run_tests [options] [test names]')
parser.add_option('--repeat-count', dest='run_test_repeat_count',
type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Ignore @Disabled and @Enabled restrictions.')
class RunTestsCommand(command_line.OptparseCommand):
"""Run unit tests"""
_, args = parser.parse_args(args)
usage = '[test_name ...] [<options>]'
if default_options.verbosity == 0:
logging.getLogger().setLevel(logging.WARN)
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
options.browser_type = 'any'
parser = options.CreateParser('%%prog %s' % cls.usage)
return parser
from telemetry.core import browser_finder
try:
browser_to_create = browser_finder.FindBrowser(default_options)
except browser_finder.BrowserFinderException, ex:
logging.error(str(ex))
return 1
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--repeat-count', dest='run_test_repeat_count',
type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Ignore @Disabled and @Enabled restrictions.')
if browser_to_create == None:
logging.error('No browser found of type %s. Cannot run tests.',
default_options.browser_type)
logging.error('Re-run with --browser=list to see available browser types.')
return 1
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.verbosity == 0:
logging.getLogger().setLevel(logging.WARN)
options_for_unittests.Set(default_options,
browser_to_create.browser_type)
try:
success = True
for _ in xrange(default_options.run_test_repeat_count):
success = success and DiscoverAndRunTests(
start_dir, args, top_level_dir, browser_to_create, default_options,
runner)
if success:
return 0
finally:
options_for_unittests.Set(None, None)
try:
possible_browser = browser_finder.FindBrowser(args)
except browser_finder.BrowserFinderException, ex:
parser.error(ex)
return 1
if not possible_browser:
parser.error('No browser found of type %s. Cannot run tests.\n'
'Re-run with --browser=list to see '
'available browser types.' % args.browser_type)
cls.test_suite = DiscoverTests(
environment.test_dirs, environment.top_level_dir, possible_browser,
args.positional_args, args.run_disabled_tests)
@RestoreLoggingLevel
def Run(self, args):
util.AddDirToPythonPath(util.GetUnittestDataDir())
result = gtest_unittest_results.GTestUnittestResults(sys.stdout)
try:
options_for_unittests.Set(args)
for _ in xrange(args.run_test_repeat_count):
self.test_suite(result)
finally:
options_for_unittests.Set(None)
result.PrintSummary()
return len(result.failures) + len(result.errors)

@ -32,8 +32,9 @@ class MockPlatform(object):
class RunTestsUnitTest(unittest.TestCase):
def setUp(self):
self.suite = run_tests.Discover(
util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py')
self.suite = unittest.TestSuite()
self.suite.addTests(run_tests.Discover(
util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py'))
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control):
@ -45,8 +46,8 @@ class RunTestsUnitTest(unittest.TestCase):
enabled_tests = set()
for i in run_tests.FilterSuite(self.suite, MockPredicate)._tests:
for j in i._tests:
for k in j._tests:
for j in i:
for k in j:
enabled_tests.add(k._testMethodName)
return enabled_tests