0

[Android] Implement instrumentation tests in platform mode.

BUG=428729

Review URL: https://codereview.chromium.org/794923003

Cr-Commit-Position: refs/heads/master@{#311189}
This commit is contained in:
jbudorick
2015-01-12 18:51:06 -08:00
committed by Commit bot
parent f8330b75d7
commit 911be58db8
12 changed files with 710 additions and 189 deletions

@ -63,7 +63,8 @@ def CommonChecks(input_api, output_api):
unit_tests=[
J('pylib', 'device', 'device_utils_test.py'),
J('pylib', 'gtest', 'gtest_test_instance_test.py'),
J('pylib', 'instrumentation', 'test_runner_test.py'),
J('pylib', 'instrumentation',
'instrumentation_test_instance_test.py'),
J('pylib', 'results', 'json_results_test.py'),
J('pylib', 'utils', 'md5sum_test.py'),
],

@ -64,6 +64,11 @@ class BaseTestResult(object):
"""Get the test name."""
return self._name
def SetType(self, test_type):
"""Set the test result type."""
assert test_type in ResultType.GetTypes()
self._test_type = test_type
def GetType(self):
"""Get the test result type."""
return self._test_type

@ -4,6 +4,7 @@
from pylib import constants
from pylib.gtest import gtest_test_instance
from pylib.instrumentation import instrumentation_test_instance
from pylib.utils import isolator
from pylib.uirobot import uirobot_test_instance
@ -14,8 +15,11 @@ def CreateTestInstance(args, error_func):
if args.command == 'gtest':
return gtest_test_instance.GtestTestInstance(
args, isolator.Isolator(constants.ISOLATE_DEPS_DIR), error_func)
if args.command == 'uirobot':
elif args.command == 'instrumentation':
return instrumentation_test_instance.InstrumentationTestInstance(
args, isolator.Isolator(constants.ISOLATE_DEPS_DIR), error_func)
elif args.command == 'uirobot':
return uirobot_test_instance.UirobotTestInstance(args)
# TODO(jbudorick) Add instrumentation test instance.
error_func('Unable to create %s test instance.' % args.command)

@ -4,7 +4,9 @@
from pylib.gtest import gtest_test_instance
from pylib.gtest import local_device_gtest_run
from pylib.instrumentation import instrumentation_test_instance
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_instrumentation_test_run
from pylib.remote.device import remote_device_environment
from pylib.remote.device import remote_device_gtest_run
from pylib.remote.device import remote_device_uirobot_run
@ -15,16 +17,19 @@ def CreateTestRun(_args, env, test_instance, error_func):
if isinstance(env, local_device_environment.LocalDeviceEnvironment):
if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
return local_device_gtest_run.LocalDeviceGtestRun(env, test_instance)
if isinstance(test_instance,
instrumentation_test_instance.InstrumentationTestInstance):
return (local_device_instrumentation_test_run
.LocalDeviceInstrumentationTestRun(env, test_instance))
if isinstance(env, remote_device_environment.RemoteDeviceEnvironment):
if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
return remote_device_gtest_run.RemoteDeviceGtestRun(env, test_instance)
# TODO(rnephew): Add remote_device instrumentation test runs.
if isinstance(test_instance, uirobot_test_instance.UirobotTestInstance):
return remote_device_uirobot_run.RemoteDeviceUirobotRun(
env, test_instance)
# TODO(jbudorick): Add local instrumentation test runs.
# TODO(rnephew): Add remote_device instrumentation test runs.
error_func('Unable to create test run for %s tests in %s environment'
% (str(test_instance), str(env)))

@ -0,0 +1,473 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import sys
from pylib import cmd_helper
from pylib import constants
from pylib import flag_changer
from pylib.base import base_test_result
from pylib.base import test_instance
from pylib.instrumentation import test_result
from pylib.utils import apk_helper
from pylib.utils import md5sum
from pylib.utils import proguard
sys.path.append(
os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib', 'common'))
import unittest_util
_DEFAULT_ANNOTATIONS = [
'Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
'EnormousTest', 'IntegrationTest']
_PICKLE_FORMAT_VERSION = 10
# TODO(jbudorick): Make these private class methods of
# InstrumentationTestInstance once the instrumentation test_runner is
# deprecated.
def ParseAmInstrumentRawOutput(raw_output):
"""Parses the output of an |am instrument -r| call.
Args:
raw_output: the output of an |am instrument -r| call as a list of lines
Returns:
A 3-tuple containing:
- the instrumentation code as an integer
- the instrumentation result as a list of lines
- the instrumentation statuses received as a list of 2-tuples
containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to a list of
strings, one for each line.
"""
INSTR_STATUS = 'INSTRUMENTATION_STATUS: '
INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: '
INSTR_RESULT = 'INSTRUMENTATION_RESULT: '
INSTR_CODE = 'INSTRUMENTATION_CODE: '
last = None
instr_code = None
instr_result = []
instr_statuses = []
bundle = {}
for line in raw_output:
if line.startswith(INSTR_STATUS):
instr_var = line[len(INSTR_STATUS):]
if '=' in instr_var:
k, v = instr_var.split('=', 1)
bundle[k] = [v]
last = INSTR_STATUS
last_key = k
else:
logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line))
elif line.startswith(INSTR_STATUS_CODE):
instr_status = line[len(INSTR_STATUS_CODE):]
instr_statuses.append((int(instr_status), bundle))
bundle = {}
last = INSTR_STATUS_CODE
elif line.startswith(INSTR_RESULT):
instr_result.append(line[len(INSTR_RESULT):])
last = INSTR_RESULT
elif line.startswith(INSTR_CODE):
instr_code = int(line[len(INSTR_CODE):])
last = INSTR_CODE
elif last == INSTR_STATUS:
bundle[last_key].append(line)
elif last == INSTR_RESULT:
instr_result.append(line)
return (instr_code, instr_result, instr_statuses)
def GenerateTestResult(test_name, instr_statuses, start_ms, duration_ms):
"""Generate the result of |test| from |instr_statuses|.
Args:
test_name: The name of the test as "class#method"
instr_statuses: A list of 2-tuples containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to string values
Note that this is the same as the third item in the 3-tuple returned by
|_ParseAmInstrumentRawOutput|.
start_ms: The start time of the test in milliseconds.
duration_ms: The duration of the test in milliseconds.
Returns:
An InstrumentationTestResult object.
"""
INSTR_STATUS_CODE_START = 1
INSTR_STATUS_CODE_OK = 0
INSTR_STATUS_CODE_ERROR = -1
INSTR_STATUS_CODE_FAIL = -2
log = ''
result_type = base_test_result.ResultType.UNKNOWN
for status_code, bundle in instr_statuses:
if status_code == INSTR_STATUS_CODE_START:
pass
elif status_code == INSTR_STATUS_CODE_OK:
bundle_test = '%s#%s' % (
''.join(bundle.get('class', [''])),
''.join(bundle.get('test', [''])))
skipped = ''.join(bundle.get('test_skipped', ['']))
if (test_name == bundle_test and
result_type == base_test_result.ResultType.UNKNOWN):
result_type = base_test_result.ResultType.PASS
elif skipped.lower() in ('true', '1', 'yes'):
result_type = base_test_result.ResultType.SKIP
logging.info('Skipped ' + test_name)
else:
if status_code not in (INSTR_STATUS_CODE_ERROR,
INSTR_STATUS_CODE_FAIL):
logging.error('Unrecognized status code %d. Handling as an error.',
status_code)
result_type = base_test_result.ResultType.FAIL
if 'stack' in bundle:
log = '\n'.join(bundle['stack'])
return test_result.InstrumentationTestResult(
test_name, result_type, start_ms, duration_ms, log=log)
class InstrumentationTestInstance(test_instance.TestInstance):
def __init__(self, args, isolate_delegate, error_func):
super(InstrumentationTestInstance, self).__init__()
self._apk_under_test = None
self._package_info = None
self._test_apk = None
self._test_jar = None
self._test_package = None
self._test_runner = None
self._test_support_apk = None
self.__inititalizeApkAttributes(args, error_func)
self._data_deps = None
self._isolate_abs_path = None
self._isolate_delegate = None
self._isolated_abs_path = None
self._test_data = None
self.__initializeDataDependencyAttributes(args, isolate_delegate)
self._annotations = None
self._excluded_annotations = None
self._test_filter = None
self.__initializeTestFilterAttributes(args)
self._flags = None
self.__initializeFlagAttributes(args)
def __initializeApkAttributes(self, args, error_func):
if args.apk_under_test.endswith('.apk'):
self._apk_under_test = args.apk_under_test
else:
self._apk_under_test = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
'%s.apk' % args.apk_under_test)
if not os.path.exists(self._apk_under_test):
error_func('Unable to find APK under test: %s' % self._apk_under_test)
if args.test_apk.endswith('.apk'):
test_apk_root = os.path.splitext(os.path.basename(args.test_apk))[0]
self._test_apk = args.test_apk
else:
test_apk_root = args.test_apk
self._test_apk = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
'%s.apk' % args.test_apk)
self._test_jar = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % test_apk_root)
self._test_support_apk = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%sSupport.apk' % test_apk_root)
if not os.path.exists(self._test_apk):
error_func('Unable to find test APK: %s' % self._test_apk)
if not os.path.exists(self._test_jar):
error_func('Unable to find test JAR: %s' % self._test_jar)
self._test_package = apk_helper.GetPackageName(self.test_apk)
self._test_runner = apk_helper.GetInstrumentationName(self.test_apk)
self._package_info = None
for package_info in constants.PACKAGE_INFO.itervalues():
if self._test_package == package_info.test_package:
self._package_info = package_info
if not self._package_info:
error_func('Unable to find package info for %s' % self._test_package)
def __initializeDataDependencyAttributes(self, args, isolate_delegate):
self._data_deps = []
if args.isolate_file_path:
self._isolate_abs_path = os.path.abspath(args.isolate_file_path)
self._isolate_delegate = isolate_delegate
self._isolated_abs_path = os.path.join(
constants.GetOutDirectory(), '%s.isolated' % self._test_package)
else:
self._isolate_delegate = None
# TODO(jbudorick): Deprecate and remove --test-data once data dependencies
# are fully converted to isolate.
if args.test_data:
logging.info('Data dependencies specified via --test-data')
self._test_data = args.test_data
else:
self._test_data = None
if not self._isolate_delegate and not self._test_data:
logging.warning('No data dependencies will be pushed.')
def __initializeTestFilterAttributes(self, args):
self._test_filter = args.test_filter
def annotation_dict_element(a):
a = a.split('=')
return (a[0], a[1] if len(a) == 2 else None)
if args.annotation_str:
self._annotations = dict(
annotation_dict_element(a)
for a in args.annotation_str.split(','))
elif not self._test_filter:
self._annotations = dict(
annotation_dict_element(a)
for a in _DEFAULT_ANNOTATIONS)
else:
self._annotations = {}
if args.exclude_annotation_str:
self._excluded_annotations = dict(
annotation_dict_element(a)
for a in args.exclude_annotation_str.split(','))
else:
self._excluded_annotations = {}
def __initializeFlagAttributes(self, args):
self._flags = ['--disable-fre', '--enable-test-intents']
# TODO(jbudorick): Transition "--device-flags" to "--device-flags-file"
if hasattr(args, 'device_flags') and args.device_flags:
with open(args.device_flags) as device_flags_file:
stripped_lines = (l.strip() for l in device_flags_file)
self._flags.extend([flag for flag in stripped_lines if flag])
if hasattr(args, 'device_flags_file') and args.device_flags_file:
with open(args.device_flags_file) as device_flags_file:
stripped_lines = (l.strip() for l in device_flags_file)
self._flags.extend([flag for flag in stripped_lines if flag])
@property
def apk_under_test(self):
return self._apk_under_test
@property
def flags(self):
return self._flags
@property
def package_info(self):
return self._package_info
@property
def test_apk(self):
return self._test_apk
@property
def test_jar(self):
return self._test_jar
@property
def test_support_apk(self):
return self._test_support_apk
@property
def test_package(self):
return self._test_package
@property
def test_runner(self):
return self._test_runner
#override
def TestType(self):
return 'instrumentation'
#override
def SetUp(self):
if self._isolate_delegate:
self._isolate_delegate.Remap(
self._isolate_abs_path, self._isolated_abs_path)
self._isolate_delegate.MoveOutputDeps()
self._data_deps.extend([(constants.ISOLATE_DEPS_DIR, None)])
# TODO(jbudorick): Convert existing tests that depend on the --test-data
# mechanism to isolate, then remove this.
if self._test_data:
for t in self._test_data:
device_rel_path, host_rel_path = t.split(':')
host_abs_path = os.path.join(constants.DIR_SOURCE_ROOT, host_rel_path)
self._data_deps.extend(
[(host_abs_path,
[None, 'chrome', 'test', 'data', device_rel_path])])
def GetDataDependencies(self):
return self._data_deps
def GetTests(self):
pickle_path = '%s-proguard.pickle' % self.test_jar
try:
tests = self._GetTestsFromPickle(pickle_path, self.test_jar)
except self.ProguardPickleException as e:
logging.info('Getting tests from JAR via proguard. (%s)' % str(e))
tests = self._GetTestsFromProguard(self.test_jar)
self._SaveTestsToPickle(pickle_path, self.test_jar, tests)
return self._InflateTests(self._FilterTests(tests))
class ProguardPickleException(Exception):
pass
def _GetTestsFromPickle(self, pickle_path, jar_path):
if not os.path.exists(pickle_path):
raise self.ProguardPickleException('%s does not exist.' % pickle_path)
if os.path.getmtime(pickle_path) <= os.path.getmtime(jar_path):
raise self.ProguardPickleException(
'%s newer than %s.' % (jar_path, pickle_path))
with open(pickle_path, 'r') as pickle_file:
pickle_data = pickle.loads(pickle_file.read())
jar_md5, _ = md5sum.CalculateHostMd5Sums(jar_path)[0]
try:
if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION:
raise self.ProguardPickleException('PICKLE_FORMAT_VERSION has changed.')
if pickle_data['JAR_MD5SUM'] != jar_md5:
raise self.ProguardPickleException('JAR file MD5 sum differs.')
return pickle_data['TEST_METHODS']
except TypeError as e:
logging.error(pickle_data)
raise self.ProguardPickleException(str(e))
def _GetTestsFromProguard(self, jar_path):
p = proguard.Dump(jar_path)
def is_test_class(c):
return c['class'].endswith('Test')
def is_test_method(m):
return m['method'].startswith('test')
class_lookup = dict((c['class'], c) for c in p['classes'])
def recursive_get_class_annotations(c):
s = c['superclass']
if s in class_lookup:
a = recursive_get_class_annotations(class_lookup[s])
else:
a = {}
a.update(c['annotations'])
return a
def stripped_test_class(c):
return {
'class': c['class'],
'annotations': recursive_get_class_annotations(c),
'methods': [m for m in c['methods'] if is_test_method(m)],
}
return [stripped_test_class(c) for c in p['classes']
if is_test_class(c)]
def _SaveTestsToPickle(self, pickle_path, jar_path, tests):
jar_md5, _ = md5sum.CalculateHostMd5Sums(jar_path)[0]
pickle_data = {
'VERSION': _PICKLE_FORMAT_VERSION,
'JAR_MD5SUM': jar_md5,
'TEST_METHODS': tests,
}
with open(pickle_path, 'w') as pickle_file:
pickle.dump(pickle_data, pickle_file)
def _FilterTests(self, tests):
def gtest_filter(c, m):
t = ['%s.%s' % (c['class'].split('.')[-1], m['method'])]
return (not self._test_filter
or unittest_util.FilterTestNames(t, self._test_filter))
def annotation_filter(all_annotations):
if not self._annotations:
return True
return any_annotation_matches(self._annotations, all_annotations)
def excluded_annotation_filter(all_annotations):
if not self._excluded_annotations:
return True
return not any_annotation_matches(self._excluded_annotations,
all_annotations)
def any_annotation_matches(annotations, all_annotations):
return any(
ak in all_annotations and (av is None or av == all_annotations[ak])
for ak, av in annotations.iteritems())
filtered_classes = []
for c in tests:
filtered_methods = []
for m in c['methods']:
# Gtest filtering
if not gtest_filter(c, m):
continue
all_annotations = dict(c['annotations'])
all_annotations.update(m['annotations'])
if (not annotation_filter(all_annotations)
or not excluded_annotation_filter(all_annotations)):
continue
filtered_methods.append(m)
if filtered_methods:
filtered_class = dict(c)
filtered_class['methods'] = filtered_methods
filtered_classes.append(filtered_class)
return filtered_classes
def _InflateTests(self, tests):
inflated_tests = []
for c in tests:
for m in c['methods']:
a = dict(c['annotations'])
a.update(m['annotations'])
inflated_tests.append({
'class': c['class'],
'method': m['method'],
'annotations': a,
})
return inflated_tests
@staticmethod
def ParseAmInstrumentRawOutput(raw_output):
return ParseAmInstrumentRawOutput(raw_output)
@staticmethod
def GenerateTestResult(test_name, instr_statuses, start_ms, duration_ms):
return GenerateTestResult(test_name, instr_statuses, start_ms, duration_ms)
#override
def TearDown(self):
if self._isolate_delegate:
self._isolate_delegate.Clear()

@ -14,25 +14,22 @@ import unittest
from pylib import constants
from pylib.base import base_test_result
from pylib.instrumentation import test_runner
from pylib.instrumentation import instrumentation_test_instance
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class InstrumentationTestRunnerTest(unittest.TestCase):
class InstrumentationTestInstanceTest(unittest.TestCase):
def setUp(self):
options = mock.Mock()
options.tool = ''
package = mock.Mock()
self.instance = test_runner.TestRunner(
options, '123456789abcdef0', 0, package)
def testParseAmInstrumentRawOutput_nothing(self):
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(['']))
instrumentation_test_instance.ParseAmInstrumentRawOutput(['']))
self.assertEqual(None, code)
self.assertEqual([], result)
self.assertEqual([], statuses)
@ -48,7 +45,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
]
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(None, code)
self.assertEqual([], result)
self.assertEqual([], statuses)
@ -61,7 +58,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
]
code, result, _ = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(-1, code)
self.assertEqual(['foo', 'bar'], result)
@ -76,7 +73,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
]
_, _, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
expected = [
(0, {
@ -106,7 +103,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
]
_, _, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
expected = [
(1, {'class': ['foo'], 'test': ['bar'],}),
@ -133,14 +130,14 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
]
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(0, code)
self.assertEqual(['hello', 'world', '', ''], result)
self.assertEqual([(1, {'class': ['foo'], 'test': ['bar']})], statuses)
def testGenerateTestResult_noStatus(self):
result = self.instance._GenerateTestResult(
result = instrumentation_test_instance.GenerateTestResult(
'test.package.TestClass#testMethod', [], 0, 1000)
self.assertEqual('test.package.TestClass#testMethod', result.GetName())
self.assertEqual(base_test_result.ResultType.UNKNOWN, result.GetType())
@ -158,7 +155,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
result = instrumentation_test_instance.GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.PASS, result.GetType())
@ -176,7 +173,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
result = instrumentation_test_instance.GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.SKIP, result.GetType())
@ -194,7 +191,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
'test_skipped': ['true'],
}),
]
result = self.instance._GenerateTestResult(
result = instrumentation_test_instance.GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.SKIP, result.GetType())
@ -212,7 +209,7 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
result = instrumentation_test_instance.GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.PASS, result.GetType())
@ -227,49 +224,10 @@ class InstrumentationTestRunnerTest(unittest.TestCase):
'test': ['testMethod'],
}),
]
self.instance.device.old_interface.DismissCrashDialogIfNeeded = mock.Mock(
return_value=None)
result = self.instance._GenerateTestResult(
result = instrumentation_test_instance.GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.FAIL, result.GetType())
def testGenerateTestResult_testCrashed(self):
self.instance.test_pkg.GetPackageName = mock.Mock(
return_value='generate.test.result.test.package')
self.instance.device.old_interface.DismissCrashDialogIfNeeded = mock.Mock(
return_value='generate.test.result.test.package')
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(-1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
'stack': ['', 'foo/bar.py (27)', 'hello/world.py (42)'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.CRASH, result.GetType())
self.assertEqual('\nfoo/bar.py (27)\nhello/world.py (42)', result.GetLog())
def test_RunTest_verifyAdbShellCommand(self):
self.instance.options.test_runner = 'MyTestRunner'
self.instance.device.StartInstrumentation = mock.Mock()
self.instance.test_pkg.GetPackageName = mock.Mock(
return_value='test.package')
self.instance._GetInstrumentationArgs = mock.Mock(
return_value={'test_arg_key': 'test_arg_value'})
self.instance._RunTest('test.package.TestClass#testMethod', 100)
self.instance.device.StartInstrumentation.assert_called_with(
'test.package/MyTestRunner', raw=True,
extras={
'test_arg_key': 'test_arg_value',
'class': 'test.package.TestClass#testMethod'
},
timeout=100, retries=0)
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -16,8 +16,10 @@ from pylib import valgrind_tools
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.device import device_errors
from pylib.instrumentation import instrumentation_test_instance
from pylib.instrumentation import json_perf_parser
from pylib.instrumentation import test_result
from pylib.local.device import local_device_instrumentation_test_run
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
@ -319,124 +321,6 @@ class TestRunner(base_test_runner.BaseTestRunner):
'%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner),
raw=True, extras=extras, timeout=timeout, retries=0)
@staticmethod
def _ParseAmInstrumentRawOutput(raw_output):
"""Parses the output of an |am instrument -r| call.
Args:
raw_output: the output of an |am instrument -r| call as a list of lines
Returns:
A 3-tuple containing:
- the instrumentation code as an integer
- the instrumentation result as a list of lines
- the instrumentation statuses received as a list of 2-tuples
containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to a list of
strings, one for each line.
"""
INSTR_STATUS = 'INSTRUMENTATION_STATUS: '
INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: '
INSTR_RESULT = 'INSTRUMENTATION_RESULT: '
INSTR_CODE = 'INSTRUMENTATION_CODE: '
last = None
instr_code = None
instr_result = []
instr_statuses = []
bundle = {}
for line in raw_output:
if line.startswith(INSTR_STATUS):
instr_var = line[len(INSTR_STATUS):]
if '=' in instr_var:
k, v = instr_var.split('=', 1)
bundle[k] = [v]
last = INSTR_STATUS
last_key = k
else:
logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line))
elif line.startswith(INSTR_STATUS_CODE):
instr_status = line[len(INSTR_STATUS_CODE):]
instr_statuses.append((int(instr_status), bundle))
bundle = {}
last = INSTR_STATUS_CODE
elif line.startswith(INSTR_RESULT):
instr_result.append(line[len(INSTR_RESULT):])
last = INSTR_RESULT
elif line.startswith(INSTR_CODE):
instr_code = int(line[len(INSTR_CODE):])
last = INSTR_CODE
elif last == INSTR_STATUS:
bundle[last_key].append(line)
elif last == INSTR_RESULT:
instr_result.append(line)
return (instr_code, instr_result, instr_statuses)
def _GenerateTestResult(self, test, instr_statuses, start_ms, duration_ms):
"""Generate the result of |test| from |instr_statuses|.
Args:
instr_statuses: A list of 2-tuples containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to string values
Note that this is the same as the third item in the 3-tuple returned by
|_ParseAmInstrumentRawOutput|.
start_ms: The start time of the test in milliseconds.
duration_ms: The duration of the test in milliseconds.
Returns:
An InstrumentationTestResult object.
"""
INSTR_STATUS_CODE_START = 1
INSTR_STATUS_CODE_OK = 0
INSTR_STATUS_CODE_ERROR = -1
INSTR_STATUS_CODE_FAIL = -2
log = ''
result_type = base_test_result.ResultType.UNKNOWN
for status_code, bundle in instr_statuses:
if status_code == INSTR_STATUS_CODE_START:
pass
elif status_code == INSTR_STATUS_CODE_OK:
bundle_test = '%s#%s' % (
''.join(bundle.get('class', [''])),
''.join(bundle.get('test', [''])))
skipped = ''.join(bundle.get('test_skipped', ['']))
if (test == bundle_test and
result_type == base_test_result.ResultType.UNKNOWN):
result_type = base_test_result.ResultType.PASS
elif skipped.lower() in ('true', '1', 'yes'):
result_type = base_test_result.ResultType.SKIP
logging.info('Skipped ' + test)
else:
if status_code not in (INSTR_STATUS_CODE_ERROR,
INSTR_STATUS_CODE_FAIL):
logging.info('Unrecognized status code %d. Handling as an error.',
status_code)
result_type = base_test_result.ResultType.FAIL
if 'stack' in bundle:
log = '\n'.join(bundle['stack'])
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
for _ in xrange(10):
package = self.device.old_interface.DismissCrashDialogIfNeeded()
if not package:
break
# Assume test package convention of ".test" suffix
if package in self.test_pkg.GetPackageName():
result_type = base_test_result.ResultType.CRASH
break
return test_result.InstrumentationTestResult(
test, result_type, start_ms, duration_ms, log=log)
#override
def RunTest(self, test):
results = base_test_result.TestRunResults()
@ -458,8 +342,13 @@ class TestRunner(base_test_runner.BaseTestRunner):
duration_ms = time_ms() - start_ms
# Parse the test output
_, _, statuses = self._ParseAmInstrumentRawOutput(raw_output)
result = self._GenerateTestResult(test, statuses, start_ms, duration_ms)
_, _, statuses = (
instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
result = instrumentation_test_instance.GenerateTestResult(
test, statuses, start_ms, duration_ms)
if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
self.test_pkg.GetPackageName(), self.device):
result.SetType(base_test_result.ResultType.CRASH)
results.AddResult(result)
except device_errors.CommandTimeoutError as e:
results.AddResult(test_result.InstrumentationTestResult(

@ -0,0 +1,159 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from pylib import flag_changer
from pylib.base import base_test_result
from pylib.base import test_run
from pylib.local.device import local_device_test_run
TIMEOUT_ANNOTATIONS = [
('Manual', 10 * 60 * 60),
('IntegrationTest', 30 * 60),
('External', 10 * 60),
('EnormousTest', 10 * 60),
('LargeTest', 5 * 60),
('MediumTest', 3 * 60),
('SmallTest', 1 * 60),
]
# TODO(jbudorick): Make this private once the instrumentation test_runner is
# deprecated.
def DidPackageCrashOnDevice(package_name, device):
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
for _ in xrange(10):
package = device.old_interface.DismissCrashDialogIfNeeded()
if not package:
return False
# Assume test package convention of ".test" suffix
if package in package_name:
return True
return False
class LocalDeviceInstrumentationTestRun(
local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
super(LocalDeviceInstrumentationTestRun, self).__init__(env, test_instance)
self._flag_changers = {}
def TestPackage(self):
return None
def SetUp(self):
def substitute_external_storage(d, external_storage):
if not d:
return external_storage
elif isinstance(d, list):
return '/'.join(p if p else external_storage for p in d)
else:
return d
def individual_device_set_up(dev, host_device_tuples):
dev.Install(self._test_instance.apk_under_test)
dev.Install(self._test_instance.test_apk)
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, substitute_external_storage(d, external_storage))
for h, d in host_device_tuples]
logging.info('instrumentation data deps:')
for h, d in host_device_tuples:
logging.info('%r -> %r', h, d)
dev.PushChangedFiles(host_device_tuples)
if self._test_instance.flags:
if not self._test_instance.package_info:
logging.error("Couldn't set flags: no package info")
elif not self._test_instance.package_info.cmdline_file:
logging.error("Couldn't set flags: no cmdline_file")
else:
self._flag_changers[str(dev)] = flag_changer.FlagChanger(
dev, self._test_instance.package_info.cmdline_file)
logging.debug('Attempting to set flags: %r',
self._test_instance.flags)
self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
self._env.parallel_devices.pMap(
individual_device_set_up,
self._test_instance.GetDataDependencies())
def TearDown(self):
def individual_device_tear_down(dev):
if str(dev) in self._flag_changers:
self._flag_changers[str(dev)].Restore()
self._env.parallel_devices.pMap(individual_device_tear_down)
#override
def _CreateShards(self, tests):
return tests
#override
def _GetTests(self):
return self._test_instance.GetTests()
#override
def _GetTestName(self, test):
return '%s#%s' % (test['class'], test['method'])
#override
def _RunTest(self, device, test):
test_name = self._GetTestName(test)
logging.info('preparing to run %s: %s' % (test_name, test))
extras = {
'class': test_name,
'org.chromium.chrome.test.ChromeInstrumentationTestRunner'
'.EnableTestHttpServer': '',
}
timeout = self._GetTimeoutFromAnnotations(test['annotations'], test_name)
time_ms = lambda: int(time.time() * 1e3)
start_ms = time_ms()
output = device.StartInstrumentation(
'%s/%s' % (self._test_instance.test_package,
self._test_instance.test_runner),
raw=True, extras=extras, timeout=timeout, retries=0)
duration_ms = time_ms() - start_ms
# TODO(jbudorick): Make instrumentation tests output a JSON so this
# doesn't have to parse the output.
logging.info('output from %s:' % test_name)
for l in output:
logging.info(' %s' % l)
_, _, statuses = self._test_instance.ParseAmInstrumentRawOutput(output)
result = self._test_instance.GenerateTestResult(
test_name, statuses, start_ms, duration_ms)
if DidPackageCrashOnDevice(self._test_instance.test_package, device):
result.SetType(base_test_result.ResultType.CRASH)
return result
#override
def _ShouldShard(self):
return True
@staticmethod
def _GetTimeoutFromAnnotations(annotations, test_name):
for k, v in TIMEOUT_ANNOTATIONS:
if k in annotations:
timeout = v
else:
logging.warning('Using default 1 minute timeout for %s' % test_name)
timeout = 60
try:
scale = int(annotations.get('TimeoutScale', 1))
except ValueError as e:
logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
scale = 1
timeout *= scale
return timeout

@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from pylib import valgrind_tools
from pylib.base import base_test_result
@ -35,8 +36,12 @@ class LocalDeviceTestRun(test_run.TestRun):
tries = 0
results = base_test_result.TestRunResults()
fail_results = []
all_fail_results = {}
while tries < self._env.max_tries and tests:
logging.debug('try %d, will run %d tests:', tries, len(tests))
for t in tests:
logging.debug(' %s', t)
if self._ShouldShard():
tc = test_collection.TestCollection(self._CreateShards(tests))
try_results = self._env.parallel_devices.pMap(
@ -44,26 +49,31 @@ class LocalDeviceTestRun(test_run.TestRun):
else:
try_results = self._env.parallel_devices.pMap(
run_tests_on_device, tests).pGet(None)
fail_results = []
for try_result in try_results:
for result in try_result.GetAll():
if result.GetType() in (base_test_result.ResultType.PASS,
base_test_result.ResultType.SKIP):
results.AddResult(result)
else:
fail_results.append(result)
all_fail_results[result.GetName()] = result
results_names = set(r.GetName() for r in results.GetAll())
tests = [t for t in tests if t not in results_names]
tries += 1
if tests:
all_unknown_test_names = set(tests)
all_failed_test_names = set(all_fail_results.iterkeys())
unknown_tests = all_unknown_test_names.difference(all_failed_test_names)
failed_tests = all_failed_test_names.intersection(all_unknown_test_names)
if unknown_tests:
results.AddResults(
base_test_result.BaseTestResult(
t, base_test_result.ResultType.UNKNOWN)
for t in tests)
if fail_results:
results.AddResults(fail_results)
if failed_tests:
results.AddResults(all_fail_results[f] for f in failed_tests)
return results
def GetTool(self, device):

@ -293,6 +293,8 @@ def AddInstrumentationTestOptions(parser):
group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
group.add_argument('--apk-under-test', dest='apk_under_test',
help=('the name of the apk under test.'))
group.add_argument('--test-apk', dest='test_apk', required=True,
help=('The name of the apk containing the tests '
'(without the .apk extension; '
@ -303,6 +305,9 @@ def AddInstrumentationTestOptions(parser):
group.add_argument('--device-flags', dest='device_flags', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--device-flags-file', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
@ -898,7 +903,9 @@ def RunTestsCommand(args, parser):
_SUPPORTED_IN_PLATFORM_MODE = [
# TODO(jbudorick): Add support for more test types.
'gtest', 'uirobot',
'gtest',
'instrumentation',
'uirobot',
]
@ -913,7 +920,7 @@ def RunTestsInPlatformMode(args, parser):
args, env, test, parser.error) as test_run:
results = test_run.RunTests()
if args.trigger:
if args.environment == 'remote_device' and args.trigger:
return 0 # Not returning results, only triggering.
report_results.LogFull(

@ -6,7 +6,11 @@
['OS=="android"', {
'variables': {
'files': [
'<(DEPTH)/chrome/test/data/android/contextmenu/',
'<(DEPTH)/chrome/test/data/android/device_files/',
'<(DEPTH)/chrome/test/data/geolocation/geolocation_on_load.html',
'<(DEPTH)/chrome/test/data/popup_blocker/popup-window-open.html',
'<(DEPTH)/chrome/test/data/translate/fr_test.html',
],
},
}],

@ -7,6 +7,12 @@
'variables': {
'files': [
'<(DEPTH)/content/test/data/android/device_files/',
'<(DEPTH)/net/data/ssl/certificates/crit-codeSigning-chain.pem',
'<(DEPTH)/net/data/ssl/certificates/eku-test-root.pem',
'<(DEPTH)/net/data/ssl/certificates/invalid_key_usage_cert.der',
'<(DEPTH)/net/data/ssl/certificates/non-crit-codeSigning-chain.pem',
'<(DEPTH)/net/data/ssl/certificates/ok_cert.pem',
'<(DEPTH)/net/data/ssl/certificates/root_ca_cert.pem',
],
},
}],