0

Reland "Remove perf_expectations"

This is a reland of 9d5ef42ab6

Original change's description:
> Remove perf_expectations
>
> This is a modified reland of [1] with most of the changes to sizes.py reverted.
> Those changes are still used by the perf dashboard so must be kept around for
> the time being.
>
> [1] 5f552b3996
>
> R=dpranke
> Bug: 572393
> Cq-Include-Trybots: luci.chromium.try:linux_chromium_archive_rel_ng;luci.chromium.try:mac_chromium_archive_rel_ng;luci.chromium.try:win_archive;luci.chromium.try:win_x64_archive;master.tryserver.chromium.android:android_archive_rel_ng
>
> Change-Id: I459387d50f4a328192110c7b75983523a14f787d
> Reviewed-on: https://chromium-review.googlesource.com/c/1255303
> Commit-Queue: Thomas Anderson <thomasanderson@chromium.org>
> Reviewed-by: Dirk Pranke <dpranke@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#597614}

R=dpranke
Bug: 572393
Cq-Include-Trybots: luci.chromium.try:linux_chromium_archive_rel_ng;luci.chromium.try:mac_chromium_archive_rel_ng;luci.chromium.try:win_archive;luci.chromium.try:win_x64_archive;master.tryserver.chromium.android:android_archive_rel_ng;master.luci.chromium.try:android_cronet

Change-Id: I19d2e305ff842b61aa518b2a3f4d63a393126981
Reviewed-on: https://chromium-review.googlesource.com/c/1269104
Reviewed-by: Dirk Pranke <dpranke@chromium.org>
Commit-Queue: Thomas Anderson <thomasanderson@chromium.org>
Cr-Commit-Position: refs/heads/master@{#597738}
This commit is contained in:
Tom Anderson
2018-10-08 23:49:23 +00:00
committed by Commit Bot
parent 4337c799f5
commit 104673f5cd
14 changed files with 56 additions and 1179 deletions

@ -9,7 +9,7 @@ http://neugierig.org/software/chromium/notes/2011/08/static-initializers.html
# How Static Initializers are Checked
* For Linux and Mac:
* The expected count is stored in [//tools/perf_expectations/perf_expectations.json](https://cs.chromium.org/chromium/src/tools/perf_expectations/perf_expectations.json)
* The expected count is stored in [//infra/scripts/legacy/scripts/slave/chromium/sizes.py](https://cs.chromium.org/chromium/src/infra/scripts/legacy/scripts/slave/chromium/sizes.py)
* For Android:
* The expected count is stored in the build target [//chrome/android:monochrome_static_initializers](https://cs.chromium.org/chromium/src/chrome/android/BUILD.gn)

@ -29,10 +29,17 @@ from slave import build_directory
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..'))
EXPECTED_LINUX_SI_COUNTS = {
'chrome': 8,
'nacl_helper': 6,
'nacl_helper_bootstrap': 0,
}
class ResultsCollector(object):
def __init__(self):
self.results = {}
self.failures = []
def add_result(self, name, identifier, value, units):
assert name not in self.results
@ -45,6 +52,9 @@ class ResultsCollector(object):
# Legacy printing, previously used for parsing the text logs.
print 'RESULT %s: %s= %s %s' % (name, identifier, value, units)
def add_failure(self, failure):
self.failures.append(failure)
def get_size(filename):
return os.stat(filename)[stat.ST_SIZE]
@ -165,6 +175,10 @@ def main_mac(options, args, results_collector):
# For Release builds only, use dump-static-initializers.py to print the
# list of static initializers.
if si_count > 0 and options.target == 'Release':
result = 125
results_collector.add_failure(
'Expected 0 static initializers in %s, but found %d' %
(chromium_framework_executable, si_count))
print '\n# Static initializers in %s:' % chromium_framework_executable
# First look for a dSYM to get information about the initializers. If
@ -230,7 +244,7 @@ def main_mac(options, args, results_collector):
return 66
def check_linux_binary(target_dir, binary_name, options):
def check_linux_binary(target_dir, binary_name, options, results_collector):
"""Collect appropriate size information about the built Linux binary given.
Returns a tuple (result, sizes). result is the first non-zero exit
@ -302,16 +316,23 @@ def check_linux_binary(target_dir, binary_name, options):
# For Release builds only, use dump-static-initializers.py to print the list
# of static initializers.
if si_count > 0 and options.target == 'Release':
build_dir = os.path.dirname(target_dir)
dump_static_initializers = os.path.join(os.path.dirname(build_dir),
'tools', 'linux',
'dump-static-initializers.py')
result, stdout = run_process(result, [dump_static_initializers,
'-d', binary_file])
print '\n# Static initializers in %s:' % binary_file
print_si_fail_hint('tools/linux/dump-static-initializers.py')
print stdout
if options.target == 'Release':
if (binary_name in EXPECTED_LINUX_SI_COUNTS and
si_count > EXPECTED_LINUX_SI_COUNTS[binary_name]):
result = 125
results_collector.add_failure(
'Expected <= %d static initializers in %s, but found %d' %
(EXPECTED_LINUX_SI_COUNTS[binary_name], binary_name, si_count))
if si_count > 0:
build_dir = os.path.dirname(target_dir)
dump_static_initializers = os.path.join(os.path.dirname(build_dir),
'tools', 'linux',
'dump-static-initializers.py')
result, stdout = run_process(result, [dump_static_initializers,
'-d', binary_file])
print '\n# Static initializers in %s:' % binary_file
print_si_fail_hint('tools/linux/dump-static-initializers.py')
print stdout
# Determine if the binary has the DT_TEXTREL marker.
result, stdout = run_process(result, ['readelf', '-Wd', binary_file])
@ -350,7 +371,8 @@ def main_linux(options, args, results_collector):
totals = {}
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
this_result, this_sizes = check_linux_binary(target_dir, binary, options,
results_collector)
if result == 0:
result = this_result
for name, identifier, totals_id, value, units in this_sizes:
@ -399,7 +421,8 @@ def check_android_binaries(binaries, target_dir, options, results_collector,
binaries_to_print = binaries
for (binary, binary_to_print) in zip(binaries, binaries_to_print):
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
this_result, this_sizes = check_linux_binary(target_dir, binary, options,
results_collector)
if result == 0:
result = this_result
for name, identifier, _, value, units in this_sizes:
@ -534,6 +557,8 @@ def main():
help='specify platform (%s) [default: %%default]'
% ', '.join(platforms))
option_parser.add_option('--json', help='Path to JSON output file')
option_parser.add_option('--failures',
help='Path to JSON output file for failures')
options, args = option_parser.parse_args()
@ -554,6 +579,10 @@ def main():
with open(options.json, 'w') as f:
json.dump(results_collector.results, f)
if options.failures:
with open(options.failures, 'w') as f:
json.dump(results_collector.failures, f)
return rc

@ -44,51 +44,19 @@ def main_run(script_args):
os.path.join(
common.SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts', 'slave',
'chromium', 'sizes.py'),
'--json', tempfile_path
'--failures', tempfile_path
]
if args.platform:
sizes_cmd.extend(['--platform', args.platform])
rc = common.run_runtest(script_args, runtest_args + sizes_cmd)
with open(tempfile_path) as f:
results = json.load(f)
with open(os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
'perf_expectations.json')) as f:
perf_expectations = json.load(f)
valid = (rc == 0)
failures = []
for name, result in results.iteritems():
fqtn = '%s/%s/%s' % (args.prefix, name, result['identifier'])
if fqtn not in perf_expectations:
continue
if perf_expectations[fqtn]['type'] != 'absolute':
print 'ERROR: perf expectation %r is not yet supported' % fqtn
valid = False
continue
actual = result['value']
expected = perf_expectations[fqtn]['regress']
better = perf_expectations[fqtn]['better']
check_result = ((actual <= expected) if better == 'lower'
else (actual >= expected))
if not check_result:
failures.append(fqtn)
print 'FAILED %s: actual %s, expected %s, better %s' % (
fqtn, actual, expected, better)
failures = json.load(f)
json.dump({
'valid': valid,
'valid': (rc == 0 or rc == 125),
'failures': failures,
}, script_args.output)
# sizes.py itself doesn't fail on regressions.
if failures and rc == 0:
rc = 1
return rc

@ -38,8 +38,10 @@ NOTES = {
IS_GIT_WORKSPACE = (subprocess.Popen(
['git', 'rev-parse'], stderr=subprocess.PIPE).wait() == 0)
class Demangler(object):
"""A wrapper around c++filt to provide a function to demangle symbols."""
def __init__(self, toolchain):
self.cppfilt = subprocess.Popen([toolchain + 'c++filt'],
stdin=subprocess.PIPE,
@ -50,6 +52,7 @@ class Demangler(object):
self.cppfilt.stdin.write(sym + '\n')
return self.cppfilt.stdout.readline().strip()
# Matches for example: "cert_logger.pb.cc", capturing "cert_logger".
protobuf_filename_re = re.compile(r'(.*)\.pb\.cc$')
def QualifyFilenameAsProto(filename):
@ -72,6 +75,7 @@ def QualifyFilenameAsProto(filename):
candidate = line.strip()
return candidate
# Regex matching the substring of a symbol's demangled text representation most
# likely to appear in a source file.
# Example: "v8::internal::Builtins::InitBuiltinFunctionTable()" becomes
@ -99,6 +103,7 @@ def QualifyFilename(filename, symbol):
candidate = line.strip()
return candidate
# Regex matching nm output for the symbols we're interested in.
# See test_ParseNmLine for examples.
nm_re = re.compile(r'(\S+) (\S+) t (?:_ZN12)?_GLOBAL__(?:sub_)?I_(.*)')
@ -123,6 +128,7 @@ def test_ParseNmLine():
'_GLOBAL__sub_I_extension_specifics.pb.cc')
assert parse == ('extension_specifics.pb.cc', 40607408, 36), parse
# Just always run the test; it is fast enough.
test_ParseNmLine()
@ -136,6 +142,7 @@ def ParseNm(toolchain, binary):
if parse:
yield parse
# Regex matching objdump output for the symbols we're interested in.
# Example line:
# 12354ab: (disassembly, including <FunctionReference>)
@ -158,13 +165,14 @@ def ExtractSymbolReferences(toolchain, binary, start, end):
if ref.startswith('.LC') or ref.startswith('_DYNAMIC'):
# Ignore these, they are uninformative.
continue
if ref.startswith('_GLOBAL__I_'):
if re.match('_GLOBAL__(?:sub_)?I_', ref):
# Probably a relative jump within this function.
continue
refs.add(ref)
return sorted(refs)
def main():
parser = optparse.OptionParser(usage='%prog [option] filename')
parser.add_option('-d', '--diffable', dest='diffable',
@ -236,5 +244,6 @@ def main():
return 0
if '__main__' == __name__:
sys.exit(main())

@ -1,3 +0,0 @@
jochen@chromium.org
thakis@chromium.org
thestig@chromium.org

@ -1,44 +0,0 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for perf_expectations.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
PERF_EXPECTATIONS = 'tools/perf_expectations/perf_expectations.json'
CONFIG_FILE = 'tools/perf_expectations/chromium_perf_expectations.cfg'
def CheckChangeOnUpload(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
path = path.replace('\\', '/')
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
return output
def CheckChangeOnCommit(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
path = path.replace('\\', '/')
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
output.extend(input_api.canned_checks.CheckDoNotSubmit(input_api,
output_api))
return output

@ -1,24 +0,0 @@
This is where old perf machinery used to live, keeping track of binary sizes,
etc. Now that lives elsewhere and has a team to support it (see
https://www.chromium.org/developers/tree-sheriffs/perf-sheriffs). This code
remains to ensure that no static initializers get into Chromium.
Because this code has this history, it's far more complicated than it needs to
be. TODO(dpranke): Simplify it. https://crbug.com/572393
In the meanwhile, if you're trying to update perf_expectations.json, there are
no instructions for doing so, and the tools that you used to use don't work
because they rely on data files that were last updated at the end of 2015. So
here's what to do to reset the expected static initializer count value.
The expected static initializer count value is in the "regress" field for the
platform. In addition, each platform has a checksum in the "sha1" field to
ensure that you properly used the magic tools. Since the magic tools don't work
anymore, dpranke added a bypass to the verification. If you run:
> tools/perf_expectations/make_expectations.py --checksum --verbose
the script will tell you what the checksum *should* be. Alter the "sha1" field
to be that value, and you can commit changes to that file.
Please see https://crbug.com/572393 for more information.

@ -1,4 +0,0 @@
{
"base_url": "http://build.chromium.org/f/chromium/perf",
"perf_file": "perf_expectations.json"
}

@ -1,383 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# For instructions see:
# http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs
import hashlib
import math
import optparse
import os
import re
import subprocess
import sys
import time
import urllib2
try:
import json
except ImportError:
import simplejson as json
__version__ = '1.0'
EXPECTATIONS_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_FILE = os.path.join(EXPECTATIONS_DIR,
'chromium_perf_expectations.cfg')
DEFAULT_TOLERANCE = 0.05
USAGE = ''
def ReadFile(filename):
try:
file = open(filename, 'rb')
except IOError, e:
print >> sys.stderr, ('I/O Error reading file %s(%s): %s' %
(filename, e.errno, e.strerror))
raise e
contents = file.read()
file.close()
return contents
def ConvertJsonIntoDict(string):
"""Read a JSON string and convert its contents into a Python datatype."""
if len(string) == 0:
print >> sys.stderr, ('Error could not parse empty string')
raise Exception('JSON data missing')
try:
jsondata = json.loads(string)
except ValueError, e:
print >> sys.stderr, ('Error parsing string: "%s"' % string)
raise e
return jsondata
# Floating point representation of last time we fetched a URL.
last_fetched_at = None
def FetchUrlContents(url):
global last_fetched_at
if last_fetched_at and ((time.time() - last_fetched_at) <= 0.5):
# Sleep for half a second to avoid overloading the server.
time.sleep(0.5)
try:
last_fetched_at = time.time()
connection = urllib2.urlopen(url)
except urllib2.HTTPError, e:
if e.code == 404:
return None
raise e
text = connection.read().strip()
connection.close()
return text
def GetRowData(data, key):
rowdata = []
# reva and revb always come first.
for subkey in ['reva', 'revb']:
if subkey in data[key]:
rowdata.append('"%s": %s' % (subkey, data[key][subkey]))
# Strings, like type, come next.
for subkey in ['type', 'better']:
if subkey in data[key]:
rowdata.append('"%s": "%s"' % (subkey, data[key][subkey]))
# Finally the main numbers come last.
for subkey in ['improve', 'regress', 'tolerance']:
if subkey in data[key]:
rowdata.append('"%s": %s' % (subkey, data[key][subkey]))
return rowdata
def GetRowDigest(rowdata, key):
sha1 = hashlib.sha1()
rowdata = [str(possibly_unicode_string).encode('ascii')
for possibly_unicode_string in rowdata]
sha1.update(str(rowdata) + key)
return sha1.hexdigest()[0:8]
def WriteJson(filename, data, keys, calculate_sha1=True):
"""Write a list of |keys| in |data| to the file specified in |filename|."""
try:
file = open(filename, 'wb')
except IOError, e:
print >> sys.stderr, ('I/O Error writing file %s(%s): %s' %
(filename, e.errno, e.strerror))
return False
jsondata = []
for key in keys:
rowdata = GetRowData(data, key)
if calculate_sha1:
# Include an updated checksum.
rowdata.append('"sha1": "%s"' % GetRowDigest(rowdata, key))
else:
if 'sha1' in data[key]:
rowdata.append('"sha1": "%s"' % (data[key]['sha1']))
jsondata.append('"%s": {%s}' % (key, ', '.join(rowdata)))
jsondata.append('"load": true')
jsontext = '{%s\n}' % ',\n '.join(jsondata)
file.write(jsontext + '\n')
file.close()
return True
def FloatIsInt(f):
epsilon = 1.0e-10
return abs(f - int(f)) <= epsilon
last_key_printed = None
def Main(args):
def OutputMessage(message, verbose_message=True):
global last_key_printed
if not options.verbose and verbose_message:
return
if key != last_key_printed:
last_key_printed = key
print '\n' + key + ':'
print ' %s' % message
parser = optparse.OptionParser(usage=USAGE, version=__version__)
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='enable verbose output')
parser.add_option('-s', '--checksum', action='store_true',
help='test if any changes are pending')
parser.add_option('-c', '--config', dest='config_file',
default=DEFAULT_CONFIG_FILE,
help='set the config file to FILE', metavar='FILE')
options, args = parser.parse_args(args)
if options.verbose:
print 'Verbose output enabled.'
config = ConvertJsonIntoDict(ReadFile(options.config_file))
# Get the list of summaries for a test.
base_url = config['base_url']
# Make the perf expectations file relative to the path of the config file.
perf_file = os.path.join(
os.path.dirname(options.config_file), config['perf_file'])
perf = ConvertJsonIntoDict(ReadFile(perf_file))
# Fetch graphs.dat for this combination.
perfkeys = perf.keys()
# In perf_expectations.json, ignore the 'load' key.
perfkeys.remove('load')
perfkeys.sort()
write_new_expectations = False
found_checksum_mismatch = False
for key in perfkeys:
value = perf[key]
tolerance = value.get('tolerance', DEFAULT_TOLERANCE)
better = value.get('better', None)
# Verify the checksum.
original_checksum = value.get('sha1', '')
if 'sha1' in value:
del value['sha1']
rowdata = GetRowData(perf, key)
computed_checksum = GetRowDigest(rowdata, key)
if original_checksum == computed_checksum:
OutputMessage('checksum matches, skipping')
continue
elif options.checksum:
OutputMessage('checksum mismatch, original = %s, computed = %s' %
(original_checksum, computed_checksum))
found_checksum_mismatch = True
continue
# Skip expectations that are missing a reva or revb. We can't generate
# expectations for those.
if not(value.has_key('reva') and value.has_key('revb')):
OutputMessage('missing revision range, skipping')
continue
revb = int(value['revb'])
reva = int(value['reva'])
# Ensure that reva is less than revb.
if reva > revb:
temp = reva
reva = revb
revb = temp
# Get the system/test/graph/tracename and reftracename for the current key.
matchData = re.match(r'^([^/]+)\/([^/]+)\/([^/]+)\/([^/]+)$', key)
if not matchData:
OutputMessage('cannot parse key, skipping')
continue
system = matchData.group(1)
test = matchData.group(2)
graph = matchData.group(3)
tracename = matchData.group(4)
reftracename = tracename + '_ref'
# Create the summary_url and get the json data for that URL.
# FetchUrlContents() may sleep to avoid overloading the server with
# requests.
summary_url = '%s/%s/%s/%s-summary.dat' % (base_url, system, test, graph)
summaryjson = FetchUrlContents(summary_url)
if not summaryjson:
OutputMessage('ERROR: cannot find json data, please verify',
verbose_message=False)
return 0
# Set value's type to 'relative' by default.
value_type = value.get('type', 'relative')
summarylist = summaryjson.split('\n')
trace_values = {}
traces = [tracename]
if value_type == 'relative':
traces += [reftracename]
for trace in traces:
trace_values.setdefault(trace, {})
# Find the high and low values for each of the traces.
scanning = False
for line in summarylist:
jsondata = ConvertJsonIntoDict(line)
try:
rev = int(jsondata['rev'])
except ValueError:
print ('Warning: skipping rev %r because could not be parsed '
'as an integer.' % jsondata['rev'])
continue
if rev <= revb:
scanning = True
if rev < reva:
break
# We found the upper revision in the range. Scan for trace data until we
# find the lower revision in the range.
if scanning:
for trace in traces:
if trace not in jsondata['traces']:
OutputMessage('trace %s missing' % trace)
continue
if type(jsondata['traces'][trace]) != type([]):
OutputMessage('trace %s format not recognized' % trace)
continue
try:
tracevalue = float(jsondata['traces'][trace][0])
except ValueError:
OutputMessage('trace %s value error: %s' % (
trace, str(jsondata['traces'][trace][0])))
continue
for bound in ['high', 'low']:
trace_values[trace].setdefault(bound, tracevalue)
trace_values[trace]['high'] = max(trace_values[trace]['high'],
tracevalue)
trace_values[trace]['low'] = min(trace_values[trace]['low'],
tracevalue)
if 'high' not in trace_values[tracename]:
OutputMessage('no suitable traces matched, skipping')
continue
if value_type == 'relative':
# Calculate assuming high deltas are regressions and low deltas are
# improvements.
regress = (float(trace_values[tracename]['high']) -
float(trace_values[reftracename]['low']))
improve = (float(trace_values[tracename]['low']) -
float(trace_values[reftracename]['high']))
elif value_type == 'absolute':
# Calculate assuming high absolutes are regressions and low absolutes are
# improvements.
regress = float(trace_values[tracename]['high'])
improve = float(trace_values[tracename]['low'])
# So far we've assumed better is lower (regress > improve). If the actual
# values for regress and improve are equal, though, and better was not
# specified, alert the user so we don't let them create a new file with
# ambiguous rules.
if better == None and regress == improve:
OutputMessage('regress (%s) is equal to improve (%s), and "better" is '
'unspecified, please fix by setting "better": "lower" or '
'"better": "higher" in this perf trace\'s expectation' % (
regress, improve), verbose_message=False)
return 1
# If the existing values assume regressions are low deltas relative to
# improvements, swap our regress and improve. This value must be a
# scores-like result.
if 'regress' in perf[key] and 'improve' in perf[key]:
if perf[key]['regress'] < perf[key]['improve']:
assert(better != 'lower')
better = 'higher'
temp = regress
regress = improve
improve = temp
else:
# Sometimes values are equal, e.g., when they are both 0,
# 'better' may still be set to 'higher'.
assert(better != 'higher' or
perf[key]['regress'] == perf[key]['improve'])
better = 'lower'
# If both were ints keep as int, otherwise use the float version.
originally_ints = False
if FloatIsInt(regress) and FloatIsInt(improve):
originally_ints = True
if better == 'higher':
if originally_ints:
regress = int(math.floor(regress - abs(regress*tolerance)))
improve = int(math.ceil(improve + abs(improve*tolerance)))
else:
regress = regress - abs(regress*tolerance)
improve = improve + abs(improve*tolerance)
else:
if originally_ints:
improve = int(math.floor(improve - abs(improve*tolerance)))
regress = int(math.ceil(regress + abs(regress*tolerance)))
else:
improve = improve - abs(improve*tolerance)
regress = regress + abs(regress*tolerance)
# Calculate the new checksum to test if this is the only thing that may have
# changed.
checksum_rowdata = GetRowData(perf, key)
new_checksum = GetRowDigest(checksum_rowdata, key)
if ('regress' in perf[key] and 'improve' in perf[key] and
perf[key]['regress'] == regress and perf[key]['improve'] == improve and
original_checksum == new_checksum):
OutputMessage('no change')
continue
write_new_expectations = True
OutputMessage('traces: %s' % trace_values, verbose_message=False)
OutputMessage('before: %s' % perf[key], verbose_message=False)
perf[key]['regress'] = regress
perf[key]['improve'] = improve
OutputMessage('after: %s' % perf[key], verbose_message=False)
if options.checksum:
if found_checksum_mismatch:
return 1
else:
return 0
if write_new_expectations:
print '\nWriting expectations... ',
WriteJson(perf_file, perf, perfkeys)
print 'done'
else:
if options.verbose:
print ''
print 'No changes.'
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))

@ -1,9 +0,0 @@
{"linux-release-64/sizes/chrome-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 8, "regress": 8, "tolerance": 0, "sha1": "3c815259"},
"linux-release-64/sizes/nacl_helper-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 6, "regress": 8, "sha1": "8416f450"},
"linux-release-64/sizes/nacl_helper_bootstrap-si/initializers": {"reva": 114822, "revb": 115019, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "228221af"},
"linux-release/sizes/chrome-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 9, "regress": 9, "tolerance": 0, "sha1": "03dc3cfd"},
"linux-release/sizes/nacl_helper-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 7, "regress": 9, "sha1": "1a3c5b2b"},
"linux-release/sizes/nacl_helper_bootstrap-si/initializers": {"reva": 114822, "revb": 115019, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "dd908f29"},
"mac-release/sizes/chrome-si/initializers": {"reva": 281731, "revb": 281731, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "tolerance": 0, "sha1": "01759b7f"},
"load": true
}

@ -1,28 +0,0 @@
{"linux-release/media_tests_av_perf/audio_latency/latency": {"reva": 180005, "revb": 180520, "type": "absolute", "better": "lower", "improve": 190, "regress": 222, "sha1": "fc9815d5"},
"linux-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fb8157f9"},
"linux-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "c0fb3421"},
"linux-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fa9582d3"},
"linux-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 166, "regress": 231, "sha1": "ca3a7a47"},
"linux-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
"linux-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
"linux-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"win-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "646c02f2"},
"win-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "46c97b57"},
"win-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "9b709aab"},
"win-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 174, "regress": 204, "sha1": "4c0270a6"},
"win-release/media_tests_av_perf/fps/crowd1080.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 53, "regress": 43, "sha1": "7ad49461"},
"win-release/media_tests_av_perf/fps/crowd2160.webm": {"reva": 176330, "revb": 176978, "type": "absolute", "better": "higher", "improve": 26.0399945997, "regress": 25.9062437562, "sha1": "700526a9"},
"win-release/media_tests_av_perf/fps/crowd360.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 51, "regress": 47, "sha1": "7f8ef21c"},
"win-release/media_tests_av_perf/fps/crowd480.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 50, "regress": 47, "sha1": "5dc96881"},
"win-release/media_tests_av_perf/fps/crowd720.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 52, "regress": 47, "sha1": "4fcfb653"},
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "54d94538"},
"win-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "113aef17"},
"win-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "a22847d0"},
"win-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "6ee2e716"},
"win-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 32, "regress": 26, "sha1": "dfadb872"},
"win-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "530c5bf5"},
"win-release/media_tests_av_perf/fps/tulip2.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "35b91c8e"}
}

@ -1,167 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verify perf_expectations.json can be loaded using simplejson.
perf_expectations.json is a JSON-formatted file. This script verifies
that simplejson can load it correctly. It should catch most common
formatting problems.
"""
import subprocess
import sys
import os
import unittest
import re
simplejson = None
def OnTestsLoad():
old_path = sys.path
script_path = os.path.dirname(sys.argv[0])
load_path = None
global simplejson
# This test script should be stored in src/tools/perf_expectations/. That
# directory will most commonly live in 2 locations:
#
# - a regular Chromium checkout, in which case src/third_party
# is where to look for simplejson
#
# - a buildbot checkout, in which case .../pylibs is where
# to look for simplejson
#
# Locate and install the correct path based on what we can find.
#
for path in ('../../../third_party', '../../../../../pylibs'):
path = os.path.join(script_path, path)
if os.path.exists(path) and os.path.isdir(path):
load_path = os.path.abspath(path)
break
if load_path is None:
msg = "%s expects to live within a Chromium checkout" % sys.argv[0]
raise Exception, "Error locating simplejson load path (%s)" % msg
# Try importing simplejson once. If this succeeds, we found it and will
# load it again later properly. Fail if we cannot load it.
sys.path.append(load_path)
try:
import simplejson as Simplejson
simplejson = Simplejson
except ImportError, e:
msg = "%s expects to live within a Chromium checkout" % sys.argv[0]
raise Exception, "Error trying to import simplejson from %s (%s)" % \
(load_path, msg)
finally:
sys.path = old_path
return True
def LoadJsonFile(filename):
f = open(filename, 'r')
try:
data = simplejson.load(f)
except ValueError, e:
f.seek(0)
print "Error reading %s:\n%s" % (filename,
f.read()[:50]+'...')
raise e
f.close()
return data
OnTestsLoad()
CONFIG_JSON = os.path.join(os.path.dirname(sys.argv[0]),
'../chromium_perf_expectations.cfg')
MAKE_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../make_expectations.py')
PERF_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../perf_expectations.json')
class PerfExpectationsUnittest(unittest.TestCase):
def testPerfExpectations(self):
# Test data is dictionary.
perf_data = LoadJsonFile(PERF_EXPECTATIONS)
if not isinstance(perf_data, dict):
raise Exception('perf expectations is not a dict')
# Test the 'load' key.
if not 'load' in perf_data:
raise Exception("perf expectations is missing a load key")
if not isinstance(perf_data['load'], bool):
raise Exception("perf expectations load key has non-bool value")
# Test all key values are dictionaries.
bad_keys = []
for key in perf_data:
if key == 'load':
continue
if not isinstance(perf_data[key], dict):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations keys have non-dict values"
raise Exception("%s: %s" % (msg, bad_keys))
# Test all key values have delta and var keys.
for key in perf_data:
if key == 'load':
continue
# First check if regress/improve is in the key's data.
if 'regress' in perf_data[key]:
if 'improve' not in perf_data[key]:
bad_keys.append(key)
if (not isinstance(perf_data[key]['regress'], int) and
not isinstance(perf_data[key]['regress'], float)):
bad_keys.append(key)
if (not isinstance(perf_data[key]['improve'], int) and
not isinstance(perf_data[key]['improve'], float)):
bad_keys.append(key)
else:
# Otherwise check if delta/var is in the key's data.
if 'delta' not in perf_data[key] or 'var' not in perf_data[key]:
bad_keys.append(key)
if (not isinstance(perf_data[key]['delta'], int) and
not isinstance(perf_data[key]['delta'], float)):
bad_keys.append(key)
if (not isinstance(perf_data[key]['var'], int) and
not isinstance(perf_data[key]['var'], float)):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations key values missing or invalid delta/var"
raise Exception("%s: %s" % (msg, bad_keys))
# Test all keys have the correct format.
for key in perf_data:
if key == 'load':
continue
# tools/buildbot/scripts/master/log_parser.py should have a matching
# regular expression.
if not re.match(r"^([\w\.-]+)/([\w\.-]+)/([\w\.-]+)/([\w\.-]+)$", key):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations keys in bad format, expected a/b/c/d"
raise Exception("%s: %s" % (msg, bad_keys))
def testNoUpdatesNeeded(self):
p = subprocess.Popen([MAKE_EXPECTATIONS, '-s'], stdout=subprocess.PIPE)
p.wait();
self.assertEqual(p.returncode, 0,
msg='Update expectations first by running ./make_expectations.py')
def testConfigFile(self):
# Test that the config file can be parsed as JSON.
config = LoadJsonFile(CONFIG_JSON)
# Require the following keys.
if 'base_url' not in config:
raise Exception('base_url not specified in config file')
if 'perf_file' not in config:
raise Exception('perf_file not specified in config file')
if __name__ == '__main__':
unittest.main()

@ -1,263 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare tests that require re-baselining for input to make_expectations.py.
The regularly running perf-AV tests require re-baselineing of expectations
about once a week. The steps involved in rebaselining are:
1.) Identify the tests to update, based off reported e-mail results.
2.) Figure out reva and revb values, which is the starting and ending revision
numbers for the range that we should use to obtain new thresholds.
3.) Modify lines in perf_expectations.json referring to the tests to be updated,
so that they may be used as input to make_expectations.py.
This script automates the last step above.
Here's a sample line from perf_expectations.json:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, \
"revb": 164141, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0, "sha1": "54d94538"},
To get the above test ready for input to make_expectations.py, it should become:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": <new reva>, \
"revb": <new revb>, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0},
Examples:
1.) To update the test specified above and get baseline
values using the revision range 12345 and 23456, run this script with a command
line like this:
python update_perf_expectations.py -f \
win-release/media_tests_av_perf/fps/tulip2.m4a --reva 12345 --revb 23456
Or, using an input file,
where the input file contains a single line with text
win-release/media_tests_av_perf/fps/tulip2.m4a
run with this command line:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
2.) Let's say you want to update all seek tests on windows, and get baseline
values using the revision range 12345 and 23456.
Run this script with this command line:
python update_perf_expectations.py -f win-release/media_tests_av_perf/seek/ \
--reva 12345 --revb 23456
Or:
python update_perf_expectations.py -f win-release/.*/seek/ --reva 12345 \
--revb 23456
Or, using an input file,
where the input file contains a single line with text win-release/.*/seek/:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
3.) Similarly, if you want to update seek tests on all platforms
python update_perf_expectations.py -f .*-release/.*/seek/ --reva 12345 \
--revb 23456
"""
import logging
from optparse import OptionParser
import os
import re
import make_expectations as perf_ex_lib
# Default logging is INFO. Use --verbose to enable DEBUG logging.
_DEFAULT_LOG_LEVEL = logging.INFO
def GetTestsToUpdate(contents, all_test_keys):
"""Parses input contents and obtains tests to be re-baselined.
Args:
contents: string containing contents of input file.
all_test_keys: list of keys of test dictionary.
Returns:
A list of keys for tests that should be updated.
"""
# Each line of the input file specifies a test case to update.
tests_list = []
for test_case_filter in contents.splitlines():
# Skip any empty lines.
if test_case_filter:
# Sample expected line:
# win-release/media_tests_av_perf/seek/\
# CACHED_BUFFERED_SEEK_NoConstraints_crowd1080.ogv
# Or, if reg-ex, then sample line:
# win-release/media-tests_av_perf/seek*
# Skip any leading spaces if they exist in the input file.
logging.debug('Trying to match %s', test_case_filter)
tests_list.extend(GetMatchingTests(test_case_filter.strip(),
all_test_keys))
return tests_list
def GetMatchingTests(tests_to_update, all_test_keys):
"""Parses input reg-ex filter and obtains tests to be re-baselined.
Args:
tests_to_update: reg-ex string specifying tests to be updated.
all_test_keys: list of keys of tests dictionary.
Returns:
A list of keys for tests that should be updated.
"""
tests_list = []
search_string = re.compile(tests_to_update)
# Get matching tests from the dictionary of tests
for test_key in all_test_keys:
if search_string.match(test_key):
tests_list.append(test_key)
logging.debug('%s will be updated', test_key)
logging.info('%s tests found matching reg-ex: %s', len(tests_list),
tests_to_update)
return tests_list
def PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb):
"""Modifies value of tests that are to re-baselined:
Set reva and revb values to specified new values. Remove sha1.
Args:
tests_to_update: list of tests to be updated.
all_tests: dictionary of all tests.
reva: oldest revision in range to use for new values.
revb: newest revision in range to use for new values.
Raises:
ValueError: If reva or revb are not valid ints, or if either
of them are negative.
"""
reva = int(reva)
revb = int(revb)
if reva < 0 or revb < 0:
raise ValueError('Revision values should be positive.')
# Ensure reva is less than revb.
# (this is similar to the check done in make_expectations.py)
if revb < reva:
temp = revb
revb = reva
reva = temp
for test_key in tests_to_update:
# Get original test from the dictionary of tests
test_value = all_tests[test_key]
if test_value:
# Sample line in perf_expectations.json:
# "linux-release/media_tests _av_perf/dropped_frames/crowd360.webm":\
# {"reva": 155180, "revb": 155280, "type": "absolute", \
# "better": "lower", "improve": 0, "regress": 3, "sha1": "276ba29c"},
# Set new revision range
test_value['reva'] = reva
test_value['revb'] = revb
# Remove sha1 to indicate this test requires an update
# Check first to make sure it exist.
if 'sha1' in test_value:
del test_value['sha1']
else:
logging.warning('%s does not exist.', test_key)
logging.info('Done preparing tests for update.')
def GetCommandLineOptions():
"""Parse command line arguments.
Returns:
An options object containing command line arguments and their values.
"""
parser = OptionParser()
parser.add_option('--reva', dest='reva', type='int',
help='Starting revision of new range.',
metavar='START_REVISION')
parser.add_option('--revb', dest='revb', type='int',
help='Ending revision of new range.',
metavar='END_REVISION')
parser.add_option('-f', dest='tests_filter',
help='Regex to use for filtering tests to be updated. '
'At least one of -filter or -input_file must be provided. '
'If both are provided, then input-file is used.',
metavar='FILTER', default='')
parser.add_option('-i', dest='input_file',
help='Optional path to file with reg-exes for tests to'
' update. If provided, it overrides the filter argument.',
metavar='INPUT_FILE', default='')
parser.add_option('--config', dest='config_file',
default=perf_ex_lib.DEFAULT_CONFIG_FILE,
help='Set the config file to FILE.', metavar='FILE')
parser.add_option('-v', dest='verbose', action='store_true', default=False,
help='Enable verbose output.')
options = parser.parse_args()[0]
return options
def Main():
"""Main driver function."""
options = GetCommandLineOptions()
_SetLogger(options.verbose)
# Do some command-line validation
if not options.input_file and not options.tests_filter:
logging.error('At least one of input-file or test-filter must be provided.')
exit(1)
if options.input_file and options.tests_filter:
logging.error('Specify only one of input file or test-filter.')
exit(1)
if not options.reva or not options.revb:
logging.error('Start and end revision of range must be specified.')
exit(1)
# Load config.
config = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(options.config_file))
# Obtain the perf expectations file from the config file.
perf_file = os.path.join(
os.path.dirname(options.config_file), config['perf_file'])
# We should have all the information we require now.
# On to the real thang.
# First, get all the existing tests from the original perf_expectations file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(perf_file))
all_test_keys = all_tests.keys()
# Remove the load key, because we don't want to modify it.
all_test_keys.remove('load')
# Keep tests sorted, like in the original file.
all_test_keys.sort()
# Next, get all tests that have been identified for an update.
tests_to_update = []
if options.input_file:
# Tests to update have been specified in an input_file.
# Get contents of file.
tests_filter = perf_ex_lib.ReadFile(options.input_file)
elif options.tests_filter:
# Tests to update have been specified as a reg-ex filter.
tests_filter = options.tests_filter
# Get tests to update based on filter specified.
tests_to_update = GetTestsToUpdate(tests_filter, all_test_keys)
logging.info('Done obtaining matching tests.')
# Now, prepare tests for update.
PrepareTestsForUpdate(tests_to_update, all_tests, options.reva, options.revb)
# Finally, write modified tests back to perf_expectations file.
perf_ex_lib.WriteJson(perf_file, all_tests, all_test_keys,
calculate_sha1=False)
logging.info('Done writing tests for update to %s.', perf_file)
def _SetLogger(verbose):
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(message)s')
if __name__ == '__main__':
Main()

@ -1,204 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for update_perf_expectations."""
import copy
from StringIO import StringIO
import unittest
import make_expectations as perf_ex_lib
import update_perf_expectations as upe_mod
# A separate .json file contains the list of test cases we'll use.
# The tests used to be defined inline here, but are >80 characters in length.
# Now they are expected to be defined in file ./sample_test_cases.json.
# Create a dictionary of tests using .json file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile('sample_test_cases.json'))
# Get all keys.
all_tests_keys = all_tests.keys()
def VerifyPreparedTests(self, tests_to_update, reva, revb):
# Work with a copy of the set of tests.
all_tests_copy = copy.deepcopy(all_tests)
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests_copy, reva, revb)
# Make sure reva < revb
if reva > revb:
temp = reva
reva = revb
revb = temp
# Run through all tests and make sure only those that were
# specified to be modified had their 'sha1' value removed.
for test_key in all_tests_keys:
new_test_value = all_tests_copy[test_key]
original_test_value = all_tests[test_key]
if test_key in tests_to_update:
# Make sure there is no "sha1".
self.assertFalse('sha1' in new_test_value)
# Make sure reva and revb values are correctly set.
self.assertEqual(reva, new_test_value['reva'])
self.assertEqual(revb, new_test_value['revb'])
else:
# Make sure there is an "sha1" value
self.assertTrue('sha1' in new_test_value)
# Make sure the sha1, reva and revb values have not changed.
self.assertEqual(original_test_value['sha1'], new_test_value['sha1'])
self.assertEqual(original_test_value['reva'], new_test_value['reva'])
self.assertEqual(original_test_value['revb'], new_test_value['revb'])
class UpdatePerfExpectationsTest(unittest.TestCase):
def testFilterMatch(self):
"""Verifies different regular expressions test filter."""
self.maxDiff = None
# Tests to update specified by a single literal string.
tests_to_update = 'win-release/media_tests_av_perf/fps/tulip2.webm'
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
self.assertEqual(expected_tests_list,
upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys))
# Tests to update specified by a single reg-ex
tests_to_update = 'win-release/media_tests_av_perf/fps.*'
expected_tests_list = ['win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
# Tests to update are specified by a single reg-ex, spanning multiple OSes.
tests_to_update = '.*-release/media_tests_av_perf/fps.*'
expected_tests_list = ['linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
def testLinesFromInputFile(self):
"""Verifies different string formats specified in input file."""
# Tests to update have been specified by a single literal string in
# an input file.
# Use the StringIO class to mock a file object.
lines_from_file = StringIO(
'win-release/media_tests_av_perf/fps/tulip2.webm')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by a single reg-ex in an input file.
lines_from_file = StringIO('win-release/media_tests_av_perf/fps/tulip2.*\n')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by multiple lines in an input file.
lines_from_file = StringIO(
'.*-release/media_tests_av_perf/fps/tulip2.*\n'
'win-release/media_tests_av_perf/dropped_fps/tulip2.*\n'
'linux-release/media_tests_av_perf/audio_latency/latency')
contents = lines_from_file.read()
expected_tests_list = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
def testPreparingForUpdate(self):
"""Verifies that tests to be modified are changed as expected."""
tests_to_update = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
# Test regular positive integers.
reva = 12345
revb = 54321
VerifyPreparedTests(self, tests_to_update, reva, revb)
# Test negative values.
reva = -54321
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test reva greater than revb.
reva = 54321
revb = 12345
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test non-integer values
reva = 'sds'
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
if __name__ == '__main__':
unittest.main()