Commit 7315d7b3 authored by Vadim Gorbachev (bmsdave)'s avatar Vadim Gorbachev (bmsdave) Committed by Commit Bot

Preparing v8 to use with python3 /tools

There are now less that 400 days until the end of life
of Python 2(aka _legacy_ Python) https://pythonclock.org/ .
The code compatibility check for python2 and python3
used the following tools: futurize, flake8
You can see the reports here: https://travis-ci.com/bmsdave/v8/builds

This CL was uploaded by git cl split.

Bug: v8:8594
Change-Id: I661c52a70527e8ddde841fee6d4dcba282b4a938
Reviewed-on: https://chromium-review.googlesource.com/c/1470123
Commit-Queue: Sergiy Belozorov <sergiyb@chromium.org>
Reviewed-by: 's avatarSergiy Belozorov <sergiyb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59675}
parent ab2180cd
......@@ -165,6 +165,7 @@ Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Tobias Nießen <tniessen@tnie.de>
Ujjwal Sharma <usharma1998@gmail.com>
Vadim Gorbachev <bmsdave@gmail.com>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
......
......@@ -35,6 +35,9 @@
# and output special error string in case of non-zero exit code.
# Then we parse the output of 'adb shell' and look for that error string.
# for py2/py3 compatibility
from __future__ import print_function
import os
from os.path import join, dirname, abspath
import subprocess
......@@ -58,8 +61,8 @@ def Execute(cmdline):
exit_code = process.wait()
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
output = open(outname).read()
errors = open(errname).read()
os.unlink(outname)
os.unlink(errname)
sys.stdout.write(output)
......
#!/usr/bin/env python3
#!/usr/bin/env python
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
......@@ -22,6 +23,9 @@ will output
[10/10] [default] : avg 22,885.80 stddev 1,941.80 ( 17,584.00 - 24,266.00) Kps
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import math
import re
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import math
import multiprocessing
......@@ -294,7 +297,7 @@ def WrapRunOne(args):
return RunOne(*args)
def RunAll(args):
for op in args.op:
for r in xrange(args.runs):
for r in range(args.runs):
yield (op, args.num_inputs, args.binary)
def Main():
......
......@@ -17,6 +17,9 @@ Commands:
For each command, you can try ./runtime-call-stats.py help command.
'''
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import json
import os
......@@ -46,7 +49,7 @@ def print_command(cmd_args):
elif ' ' in arg:
arg = "'{}'".format(arg)
return arg
print " ".join(map(fix_for_printing, cmd_args))
print(" ".join(map(fix_for_printing, cmd_args)))
def start_replay_server(args, sites, discard_output=True):
......@@ -66,15 +69,15 @@ def start_replay_server(args, sites, discard_output=True):
"--inject_scripts=deterministic.js,{}".format(injection),
args.replay_wpr,
]
print "=" * 80
print("=" * 80)
print_command(cmd_args)
if discard_output:
with open(os.devnull, 'w') as null:
server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
else:
server = subprocess.Popen(cmd_args)
print "RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid)
print "=" * 80
print("RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid))
print("=" * 80)
return {'process': server, 'injection': injection}
......@@ -85,7 +88,7 @@ def stop_replay_server(server):
def generate_injection(f, sites, refreshes=0):
print >> f, """\
print("""\
(function() {
var s = window.sessionStorage.getItem("refreshCounter");
var refreshTotal = """, refreshes, """;
......@@ -127,7 +130,7 @@ def generate_injection(f, sites, refreshes=0):
var sites =
""", json.dumps(sites), """;
onLoad(window.location.href);
})();"""
})();""", file=f)
def get_chrome_flags(js_flags, user_data_dir, arg_delimiter=""):
return [
......@@ -161,9 +164,9 @@ def get_chrome_replay_flags(args, arg_delimiter=""):
]
def run_site(site, domain, args, timeout=None):
print "="*80
print "RUNNING DOMAIN %s" % domain
print "="*80
print("="*80)
print("RUNNING DOMAIN %s" % domain)
print("="*80)
result_template = "{domain}#{count}.txt" if args.repeat else "{domain}.txt"
count = 0
if timeout is None: timeout = args.timeout
......@@ -196,9 +199,9 @@ def run_site(site, domain, args, timeout=None):
"timeout", str(timeout),
args.with_chrome
] + chrome_flags + [ site ]
print "- " * 40
print("- " * 40)
print_command(cmd_args)
print "- " * 40
print("- " * 40)
with open(result, "wt") as f:
with open(args.log_stderr or os.devnull, 'at') as err:
status = subprocess.call(cmd_args, stdout=f, stderr=err)
......@@ -212,8 +215,8 @@ def run_site(site, domain, args, timeout=None):
if os.path.isfile(result) and os.path.getsize(result) > 0:
if args.print_url:
with open(result, "at") as f:
print >> f
print >> f, "URL: {}".format(site)
print(file=f)
print("URL: {}".format(site), file=f)
retries_since_good_run = 0
break
if retries_since_good_run > MAX_NOF_RETRIES:
......@@ -294,7 +297,7 @@ def do_run(args):
# Run them.
for site, domain, count, timeout in L:
if count is not None: domain = "{}%{}".format(domain, count)
print(site, domain, timeout)
print((site, domain, timeout))
run_site(site, domain, args, timeout)
finally:
if replay_server:
......@@ -459,11 +462,11 @@ def print_stats(S, args):
def stats(s, units=""):
conf = "{:0.1f}({:0.2f}%)".format(s['ci']['abs'], s['ci']['perc'])
return "{:8.1f}{} +/- {:15s}".format(s['average'], units, conf)
print "{:>50s} {} {}".format(
print("{:>50s} {} {}".format(
key,
stats(value['time_stat'], units="ms"),
stats(value['count_stat'])
)
))
# Print and calculate partial sums, if necessary.
for i in range(low, high):
print_entry(*L[i])
......@@ -479,7 +482,7 @@ def print_stats(S, args):
partial['count_list'][j] += v
# Print totals, if necessary.
if args.totals:
print '-' * 80
print('-' * 80)
if args.limit != 0 and not args.aggregate:
partial['time_stat'] = statistics(partial['time_list'])
partial['count_stat'] = statistics(partial['count_list'])
......@@ -500,9 +503,9 @@ def do_stats(args):
create_total_page_stats(domains, args)
for i, domain in enumerate(sorted(domains)):
if len(domains) > 1:
if i > 0: print
print "{}:".format(domain)
print '=' * 80
if i > 0: print()
print("{}:".format(domain))
print('=' * 80)
domain_stats = domains[domain]
for key in domain_stats:
domain_stats[key]['time_stat'] = \
......@@ -575,7 +578,7 @@ def do_json(args):
entry.append(round(s['ci']['perc'], 2))
stats.append(entry)
domains[domain] = stats
print json.dumps(versions, separators=(',', ':'))
print(json.dumps(versions, separators=(',', ':')))
# Help.
......
......@@ -2,7 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
print """
# for py2/py3 compatibility
from __future__ import print_function
print("""
1
v8-foozzie source: name/to/a/file.js
2
......@@ -11,4 +14,4 @@ v8-foozzie source: name/to/file.js
^
3
unknown
"""
""")
......@@ -2,7 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
print """
# for py2/py3 compatibility
from __future__ import print_function
print("""
1
v8-foozzie source: name/to/a/file.js
2
......@@ -11,4 +14,4 @@ v8-foozzie source: name/to/file.js
^
3
unknown
"""
""")
......@@ -2,7 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
print """
# for py2/py3 compatibility
from __future__ import print_function
print("""
1
v8-foozzie source: name/to/a/file.js
2
......@@ -11,4 +14,4 @@ v8-foozzie source: name/to/file.js
^
3
not unknown
"""
""")
......@@ -7,6 +7,9 @@
V8 correctness fuzzer launcher script.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import hashlib
import itertools
......@@ -227,8 +230,8 @@ def content_bailout(content, ignore_fun):
"""Print failure state and return if ignore_fun matches content."""
bug = (ignore_fun(content) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug)
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug))
return True
return False
......@@ -238,10 +241,10 @@ def pass_bailout(output, step_number):
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
print '# V8 correctness - T-I-M-E-O-U-T %d' % step_number
print('# V8 correctness - T-I-M-E-O-U-T %d' % step_number)
return True
if output.HasCrashed():
print '# V8 correctness - C-R-A-S-H %d' % step_number
print('# V8 correctness - C-R-A-S-H %d' % step_number)
return True
return False
......@@ -250,8 +253,8 @@ def fail_bailout(output, ignore_by_output_fun):
"""Print failure state and return if ignore_by_output_fun matches output."""
bug = (ignore_by_output_fun(output.stdout) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug)
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug))
return True
return False
......@@ -289,7 +292,7 @@ def main():
if options.first_arch != options.second_arch:
preamble.append(ARCH_MOCKS)
args = [d8] + config_flags + preamble + [options.testcase]
print " ".join(args)
print(" ".join(args))
if d8.endswith('.py'):
# Wrap with python in tests.
args = [sys.executable] + args
......@@ -333,7 +336,7 @@ def main():
# will require changes on the clusterfuzz side.
first_config_label = '%s,%s' % (options.first_arch, options.first_config)
second_config_label = '%s,%s' % (options.second_arch, options.second_config)
print (FAILURE_TEMPLATE % dict(
print((FAILURE_TEMPLATE % dict(
configs='%s:%s' % (first_config_label, second_config_label),
source_key=source_key,
suppression='', # We can't tie bugs to differences.
......@@ -347,14 +350,14 @@ def main():
second_config_output.stdout.decode('utf-8', 'replace'),
source=source,
difference=difference.decode('utf-8', 'replace'),
)).encode('utf-8', 'replace')
)).encode('utf-8', 'replace'))
return RETURN_FAIL
# TODO(machenbach): Figure out if we could also return a bug in case there's
# no difference, but one of the line suppressions has matched - and without
# the match there would be a difference.
print '# V8 correctness - pass'
print('# V8 correctness - pass')
return RETURN_PASS
......@@ -364,17 +367,17 @@ if __name__ == "__main__":
except SystemExit:
# Make sure clusterfuzz reports internal errors and wrong usage.
# Use one label for all internal and usage errors.
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='wrong_usage')
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='wrong_usage'))
result = RETURN_FAIL
except MemoryError:
# Running out of memory happens occasionally but is not actionable.
print '# V8 correctness - pass'
print('# V8 correctness - pass')
result = RETURN_PASS
except Exception as e:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='internal_error')
print '# Internal error: %s' % e
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='internal_error'))
print('# Internal error: %s' % e)
traceback.print_exc(file=sys.stdout)
result = RETURN_FAIL
......
......@@ -35,6 +35,9 @@
# on all supported build platforms, but Python is, and hence this provides
# us with an easy and uniform way of doing this on all platforms.
# for py2/py3 compatibility
from __future__ import print_function
import optparse
......@@ -49,7 +52,7 @@ def Concatenate(filenames):
True, if the operation was successful.
"""
if len(filenames) < 2:
print "An error occurred generating %s:\nNothing to do." % filenames[-1]
print("An error occurred generating %s:\nNothing to do." % filenames[-1])
return False
try:
......@@ -59,7 +62,7 @@ def Concatenate(filenames):
target.write(current.read())
return True
except IOError as e:
print "An error occurred when writing %s:\n%s" % (filenames[-1], e)
print("An error occurred when writing %s:\n%s" % (filenames[-1], e))
return False
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
from datetime import datetime
import re
......
......@@ -15,6 +15,10 @@ The example usage is as follows:
If no <arch> is given, it generates tags file for all arches:
$ tools/dev/gen-tags.py
"""
# for py2/py3 compatibility
from __future__ import print_function
import os
import subprocess
import sys
......
......@@ -38,6 +38,9 @@ v8gen.py list
-------------------------------------------------------------------------------
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import re
......@@ -144,8 +147,8 @@ class GenerateGnArgs(object):
# Check for builder/config in mb config.
if self._options.builder not in self._mbw.masters[self._options.master]:
print '%s does not exist in %s for %s' % (
self._options.builder, CONFIG, self._options.master)
print('%s does not exist in %s for %s' % (
self._options.builder, CONFIG, self._options.master))
return 1
# TODO(machenbach): Check if the requested configurations has switched to
......@@ -186,19 +189,19 @@ class GenerateGnArgs(object):
return 0
def cmd_list(self):
print '\n'.join(sorted(self._mbw.masters[self._options.master]))
print('\n'.join(sorted(self._mbw.masters[self._options.master])))
return 0
def verbose_print_1(self, text):
if self._options.verbosity >= 1:
print '#' * 80
print text
print('#' * 80)
print(text)
def verbose_print_2(self, text):
if self._options.verbosity >= 2:
indent = ' ' * 2
for l in text.splitlines():
print indent + l
print(indent + l)
def _call_cmd(self, args):
self.verbose_print_1(' '.join(args))
......@@ -290,9 +293,9 @@ class GenerateGnArgs(object):
self._mbw.ReadConfigFile()
if not self._options.master in self._mbw.masters:
print '%s not found in %s\n' % (self._options.master, CONFIG)
print 'Choose one of:\n%s\n' % (
'\n'.join(sorted(self._mbw.masters.keys())))
print('%s not found in %s\n' % (self._options.master, CONFIG))
print('Choose one of:\n%s\n' % (
'\n'.join(sorted(self._mbw.masters.keys()))))
return 1
return self._options.func()
......
......@@ -6,6 +6,9 @@
# This script executes dumpcpp.js, collects all dumped C++ symbols,
# and merges them back into v8 log.
# for py2/py3 compatibility
from __future__ import print_function
import os
import platform
import re
......@@ -44,10 +47,10 @@ if __name__ == '__main__':
if d8_line:
d8_exec = d8_line.group(1)
if not is_file_executable(d8_exec):
print 'd8 binary path found in {} is not executable.'.format(log_file)
print('d8 binary path found in {} is not executable.'.format(log_file))
sys.exit(-1)
else:
print 'No d8 binary path found in {}.'.format(log_file)
print('No d8 binary path found in {}.'.format(log_file))
sys.exit(-1)
args = [d8_exec] + JS_FILES + ['--'] + args
......@@ -57,9 +60,9 @@ if __name__ == '__main__':
stdin=f)
out, err = sp.communicate()
if debug:
print err
print(err)
if sp.returncode != 0:
print out
print(out)
exit(-1)
if on_windows and out:
......
......@@ -7,10 +7,14 @@
"""This script is used to analyze GCTracer's NVP output."""
# for py2/py3 compatibility
from __future__ import print_function
from argparse import ArgumentParser
from copy import deepcopy
from gc_nvp_common import split_nvp
from math import ceil,log
from math import ceil, log
from sys import stdin
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import subprocess
import sys
......
......@@ -4,6 +4,9 @@
"""Small utility function to find depot_tools and add it to the python path.
"""
# for py2/py3 compatibility
from __future__ import print_function
import os
import sys
......@@ -36,5 +39,5 @@ def add_depot_tools_to_path():
return i
previous_dir = root_dir
root_dir = os.path.dirname(root_dir)
print >> sys.stderr, 'Failed to find depot_tools'
print('Failed to find depot_tools', file=sys.stderr)
return None
......@@ -11,20 +11,25 @@
# Usage: gc-nvp-to-csv.py <GC-trace-filename>
#
# for py2/py3 compatibility
from __future__ import print_function
import sys
import gc_nvp_common
def process_trace(filename):
trace = gc_nvp_common.parse_gc_trace(filename)
if len(trace):
keys = trace[0].keys()
print ', '.join(keys)
print(', '.join(keys))
for entry in trace:
print ', '.join(map(lambda key: str(entry[key]), keys))
print(', '.join(map(lambda key: str(entry[key]), keys)))
if len(sys.argv) != 2:
print "Usage: %s <GC-trace-filename>" % sys.argv[0]
print("Usage: %s <GC-trace-filename>" % sys.argv[0])
sys.exit(1)
process_trace(sys.argv[1])
......@@ -37,10 +37,21 @@
#
# for py2/py3 compatibility
from __future__ import with_statement
from __future__ import print_function
from functools import reduce
import sys, types, subprocess, math
import gc_nvp_common
try:
long # Python 2
except NameError:
long = int # Python 3
def flatten(l):
flat = []
for i in l: flat.extend(i)
......@@ -62,7 +73,7 @@ class Item(object):
self.title = title
self.axis = axis
self.props = keywords
if type(field) is types.ListType:
if type(field) is list:
self.field = field
else:
self.field = [field]
......@@ -135,7 +146,7 @@ def is_y2_used(plot):
def get_field(trace_line, field):
t = type(field)
if t is types.StringType:
if t is bytes:
return trace_line[field]
elif t is types.FunctionType:
return field(trace_line)
......@@ -177,7 +188,7 @@ def plot_all(plots, trace, prefix):
outfilename = "%s_%d.png" % (prefix, len(charts))
charts.append(outfilename)
script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
print 'Plotting %s...' % outfilename
print('Plotting %s...' % outfilename)
gnuplot(script)
return charts
......@@ -350,10 +361,10 @@ def process_trace(filename):
out.write('<img src="%s">' % chart)
out.write('</body></html>')
print "%s generated." % (filename + '.html')
print("%s generated." % (filename + '.html'))
if len(sys.argv) != 2:
print "Usage: %s <GC-trace-filename>" % sys.argv[0]
print("Usage: %s <GC-trace-filename>" % sys.argv[0])
sys.exit(1)
process_trace(sys.argv[1])
......@@ -20,6 +20,9 @@ ______________ file2
______________ finish <exit code of clang --opt file2> ______________
"""
# for py2/py3 compatibility
from __future__ import print_function
import itertools
import multiprocessing
import subprocess
......@@ -39,6 +42,6 @@ if __name__ == '__main__':
cmdlines = ["%s %s" % (sys.argv[1], filename) for filename in sys.argv[2:]]
for filename, result in itertools.izip(
sys.argv[2:], pool.imap(invoke, cmdlines)):
print "______________ %s" % filename
print result[0]
print "______________ finish %d ______________" % result[1]
print("______________ %s" % filename)
print(result[0])
print("______________ finish %d ______________" % result[1])
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import os
import os.path
import signal
......@@ -19,8 +22,8 @@ BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
assert len(sys.argv) == 2
if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-from-dsl.h"):
print "Expected generated headers in out/Release/gen."
print "Either build v8 in out/Release or change gcmole.lua:115"
print("Expected generated headers in out/Release/gen.")
print("Either build v8 in out/Release or change gcmole.lua:115")
sys.exit(-1)
proc = subprocess.Popen(
......
......@@ -25,12 +25,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import re
import tempfile
import os
import subprocess
import time
import gdb
kSmiTag = 0
kSmiTagSize = 1
......
#!/usr/bin/env python3
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
from collections import namedtuple
import textwrap
......
......@@ -46,6 +46,9 @@
# the generated libv8 binary.
#
# for py2/py3 compatibility
from __future__ import print_function
import re
import sys
......@@ -628,7 +631,7 @@ def emit_set(out, consts):
# Emit the whole output file.
#
def emit_config():
out = file(sys.argv[1], 'w');
out = open(sys.argv[1], 'w');
out.write(header);
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import json
import optparse
import os
......
......@@ -13,6 +13,9 @@ BUILD.gn. Just compile to check whether there are any violations to the rule
that each header must be includable in isolation.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import os.path
......@@ -58,7 +61,7 @@ def parse_args():
def printv(line):
if args.verbose:
print line
print(line)
def find_all_headers():
......
......@@ -8,6 +8,9 @@ This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
# for py2/py3 compatibility
from __future__ import print_function
import sys
......@@ -21,23 +24,23 @@ def print_landmines(): # pylint: disable=invalid-name
# dependency problems, fix the dependency problems instead of adding a
# landmine.
# See the Chromium version in src/build/get_landmines.py for usage examples.
print 'Need to clobber after ICU52 roll.'
print 'Landmines test.'
print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
print 'Cleanup after windows ninja switch attempt.'
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
print 'Clobber after ICU roll.'
print 'Clobber after Android NDK update.'
print 'Clober to fix windows build problems.'
print 'Clober again to fix windows build problems.'
print 'Clobber to possibly resolve failure on win-32 bot.'
print 'Clobber for http://crbug.com/668958.'
print('Need to clobber after ICU52 roll.')
print('Landmines test.')
print('Activating MSVS 2013.')
print('Revert activation of MSVS 2013.')
print('Activating MSVS 2013 again.')
print('Clobber after ICU roll.')
print('Moar clobbering...')
print('Remove build/android.gypi')
print('Cleanup after windows ninja switch attempt.')
print('Switching to pinned msvs toolchain.')
print('Clobbering to hopefully resolve problem with mksnapshot')
print('Clobber after ICU roll.')
print('Clobber after Android NDK update.')
print('Clober to fix windows build problems.')
print('Clober again to fix windows build problems.')
print('Clobber to possibly resolve failure on win-32 bot.')
print('Clobber for http://crbug.com/668958.')
return 0
......
This diff is collapsed.
......@@ -5,6 +5,9 @@
# found in the LICENSE file.
#
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import heapq
import json
......@@ -54,8 +57,8 @@ def warn_if_counter_may_have_saturated(dispatches_table):
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
if counter == __COUNTER_MAX:
print "WARNING: {} -> {} may have saturated.".format(source,
destination)
print("WARNING: {} -> {} may have saturated.".format(source,
destination))
def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
......@@ -71,9 +74,9 @@ def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
top_bytecode_dispatch_pairs = (
find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
print "Top {} bytecode dispatch pairs:".format(top_count)
print("Top {} bytecode dispatch pairs:".format(top_count))
for source, destination, counter in top_bytecode_dispatch_pairs:
print "{:>12d}\t{} -> {}".format(counter, source, destination)
print("{:>12d}\t{} -> {}".format(counter, source, destination))
def find_top_bytecodes(dispatches_table):
......@@ -87,9 +90,9 @@ def find_top_bytecodes(dispatches_table):
def print_top_bytecodes(dispatches_table):
top_bytecodes = find_top_bytecodes(dispatches_table)
print "Top bytecodes:"
print("Top bytecodes:")
for bytecode, counter in top_bytecodes:
print "{:>12d}\t{}".format(counter, bytecode)
print("{:>12d}\t{}".format(counter, bytecode))
def find_top_dispatch_sources_and_destinations(
......@@ -116,13 +119,13 @@ def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
top_count, sort_relative):
top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_relative)
print "Top sources of dispatches to {}:".format(bytecode)
print("Top sources of dispatches to {}:".format(bytecode))
for source_name, counter, ratio in top_sources:
print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name)
print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name))
print "\nTop destinations of dispatches from {}:".format(bytecode)
print("\nTop destinations of dispatches from {}:".format(bytecode))
for destination_name, counter, ratio in top_destinations:
print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name)
print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name))
def build_counters_matrix(dispatches_table):
......
#! /usr/bin/python2
#! /usr/bin/python
#
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import collections
import os
......@@ -94,18 +97,18 @@ def print_disassembly_annotation(offset_counts, bytecode_disassembly):
return offsets.pop() if offsets else -1
current_offset = next_offset()
print current_offset;
print(current_offset);
for line in bytecode_disassembly:
disassembly_offset = int(line.split()[1])
if disassembly_offset == current_offset:
count = offset_counts[current_offset]
percentage = 100.0 * count / total
print "{:>8d} ({:>5.1f}%) ".format(count, percentage),
print("{:>8d} ({:>5.1f}%) ".format(count, percentage), end=' ')
current_offset = next_offset()
else:
print " ",
print line
print(" ", end=' ')
print(line)
if offsets:
print ("WARNING: Offsets not empty. Output is most likely invalid due to "
......
......@@ -31,6 +31,9 @@
# char arrays. It is used for embedded JavaScript code in the V8
# library.
# for py2/py3 compatibility
from functools import reduce
import os, re
import optparse
import textwrap
......@@ -249,7 +252,7 @@ def BuildMetadata(sources, source_bytes, native_type):
get_script_name_cases = []
get_script_source_cases = []
offset = 0
for i in xrange(len(sources.modules)):
for i in range(len(sources.modules)):
native_name = "native %s.js" % sources.names[i]
d = {
"i": i,
......@@ -290,7 +293,7 @@ def PutInt(blob_file, value):
value_with_length = (value << 2) | (size - 1)
byte_sequence = bytearray()
for i in xrange(size):
for i in range(size):
byte_sequence.append(value_with_length & 255)
value_with_length >>= 8;
blob_file.write(byte_sequence)
......@@ -312,7 +315,7 @@ def WriteStartupBlob(sources, startup_blob):
output = open(startup_blob, "wb")
PutInt(output, len(sources.names))
for i in xrange(len(sources.names)):
for i in range(len(sources.names)):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
......
......@@ -27,6 +27,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import bisect
import collections
import ctypes
......@@ -157,7 +161,7 @@ class Code(object):
# Print annotated lines.
address = lines[0][0]
total_count = 0
for i in xrange(len(lines)):
for i in range(len(lines)):
start_offset = lines[i][0] - address
if i == len(lines) - 1:
end_offset = self.end_address - self.start_address
......@@ -183,10 +187,10 @@ class Code(object):
# 6 for the percentage number, incl. the '.'
# 1 for the '%' sign
# => 15
print "%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1])
print("%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1]))
else:
print "%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1])
print
print("%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1]))
print()
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
......@@ -267,9 +271,8 @@ class CodeMap(object):
pages = 0
while page_id < limit_id:
if max_pages >= 0 and pages > max_pages:
print >>sys.stderr, \
"Warning: page limit (%d) reached for %s [%s]" % (
max_pages, code.name, code.origin)
print("Warning: page limit (%d) reached for %s [%s]" % (
max_pages, code.name, code.origin), file=sys.stderr)
break
if page_id in self.pages:
page = self.pages[page_id]
......@@ -309,7 +312,7 @@ class CodeMap(object):
def Print(self):
for code in self.AllCode():
print code
print(code)
def Find(self, pc):
if pc < self.min_address or pc >= self.max_address:
......@@ -411,7 +414,7 @@ class LogReader(object):
continue
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
print("Warning: Not found %x" % old_start_address, file=sys.stderr)
continue
assert code.start_address == old_start_address, \
"Inexact move address %x for %s" % (old_start_address, code)
......@@ -591,7 +594,7 @@ class TraceReader(object):
self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
print >>sys.stderr, "Warning: unsupported trace header magic"
print("Warning: unsupported trace header magic", file=sys.stderr)
self.offset = self.trace_header.data_offset
self.limit = self.trace_header.data_offset + self.trace_header.data_size
assert self.limit <= self.trace.size(), \
......@@ -642,7 +645,7 @@ class TraceReader(object):
return sample
sample.ips = []
offset += self.header_size + ctypes.sizeof(sample)
for _ in xrange(sample.nr):
for _ in range(sample.nr):
sample.ips.append(
self.ip_struct.from_buffer(self.trace, offset).value)
offset += self.ip_size
......@@ -786,7 +789,7 @@ class LibraryRepo(object):
def _LoadKernelSymbols(self, code_map):
if not os.path.exists(KERNEL_ALLSYMS_FILE):
print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
print("Warning: %s not found" % KERNEL_ALLSYMS_FILE, file=sys.stderr)
return False
kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
code = None
......@@ -804,35 +807,35 @@ class LibraryRepo(object):
def PrintReport(code_map, library_repo, arch, ticks, options):
print "Ticks per symbol:"
print("Ticks per symbol:")
used_code = [code for code in code_map.UsedCode()]
used_code.sort(key=lambda x: x.self_ticks, reverse=True)
for i, code in enumerate(used_code):
code_ticks = code.self_ticks
print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
code.FullName(), code.origin)
print("%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
code.FullName(), code.origin))
if options.disasm_all or i < options.disasm_top:
code.PrintAnnotated(arch, options)
print
print "Ticks per library:"
print()
print("Ticks per library:")
mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
mmap_info.unique_name)
print("%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
mmap_info.unique_name))
def PrintDot(code_map, options):
print "digraph G {"
print("digraph G {")
for code in code_map.UsedCode():
if code.self_ticks < 10:
continue
print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
print("n%d [shape=box,label=\"%s\"];" % (code.id, code.name))
if code.callee_ticks:
for callee, ticks in code.callee_ticks.iteritems():
print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
print "}"
print("n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks))
print("}")
if __name__ == "__main__":
......@@ -877,8 +880,8 @@ if __name__ == "__main__":
options, args = parser.parse_args()
if not options.quiet:
print "V8 log: %s, %s.ll" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
print("V8 log: %s, %s.ll" % (options.log, options.log))
print("Perf trace file: %s" % options.trace)
V8_GC_FAKE_MMAP = options.gc_fake_mmap
HOST_ROOT = options.host_root
......@@ -886,7 +889,7 @@ if __name__ == "__main__":
disasm.OBJDUMP_BIN = options.objdump
OBJDUMP_BIN = options.objdump
else:
print "Cannot find %s, falling back to default objdump" % options.objdump
print("Cannot find %s, falling back to default objdump" % options.objdump)
# Stats.
events = 0
......@@ -904,8 +907,8 @@ if __name__ == "__main__":
log_reader = LogReader(log_name=options.log + ".ll",
code_map=code_map)
if not options.quiet:
print "Generated code architecture: %s" % log_reader.arch
print
print("Generated code architecture: %s" % log_reader.arch)
print()
sys.stdout.flush()
# Process the code and trace logs.
......@@ -968,11 +971,11 @@ if __name__ == "__main__":
def PrintTicks(number, total, description):
print("%10d %5.1f%% ticks in %s" %
(number, 100.0*number/total, description))
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
print()
print("Stats:")
print("%10d total trace events" % events)
print("%10d total ticks" % ticks)
print("%10d ticks not in symbols" % missed_ticks)
unaccounted = "unaccounted ticks"
if really_missed_ticks > 0:
unaccounted += " (probably in the kernel, try --kernel)"
......@@ -980,10 +983,10 @@ if __name__ == "__main__":
PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
print "%9.2fs tick processing time" % sample_time
print("%10d total symbols" % len([c for c in code_map.AllCode()]))
print("%10d used symbols" % len([c for c in code_map.UsedCode()]))
print("%9.2fs library processing time" % mmap_time)
print("%9.2fs tick processing time" % sample_time)
log_reader.Dispose()
trace_reader.Dispose()
......@@ -5,6 +5,9 @@
# Load this file by adding this to your ~/.lldbinit:
# command script import <this_dir>/lldb_commands.py
# for py2/py3 compatibility
from __future__ import print_function
import lldb
import re
......
#!/usr/bin/env python3
#!/usr/bin/env python
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
......@@ -7,6 +8,9 @@
Consult --help for more information.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import json
import os
......
......@@ -10,6 +10,7 @@ MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
......@@ -36,6 +37,12 @@ sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
import gn_helpers
try:
cmp # Python 2
except NameError: # Python 3
def cmp(x, y): # pylint: disable=redefined-builtin
return (x > y) - (x < y)
def main(args):
mbw = MetaBuildWrapper()
......@@ -1155,7 +1162,7 @@ class MetaBuildWrapper(object):
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
except OSError as e:
if e.errno != errno.EEXIST:
raise
......
......@@ -284,7 +284,7 @@ class UnitTest(unittest.TestCase):
self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
def test_analyze_handles_way_too_many_results(self):
too_many_files = ', '.join(['"//foo:foo%d"' % i for i in xrange(4 * 1024)])
too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(4 * 1024)])
files = {'/tmp/in.json': '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
......
......@@ -9,6 +9,9 @@ Use this script to fetch all dependencies for V8 to run build_gn.py.
Usage: fetch_deps.py <v8-path>
"""
# for py2/py3 compatibility
from __future__ import print_function
import os
import subprocess
import sys
......@@ -52,9 +55,9 @@ def EnsureGit(v8_path):
expected_git_dir = os.path.join(v8_path, ".git")
actual_git_dir = git("rev-parse --absolute-git-dir")
if expected_git_dir == actual_git_dir:
print "V8 is tracked stand-alone by git."
print("V8 is tracked stand-alone by git.")
return False
print "Initializing temporary git repository in v8."
print("Initializing temporary git repository in v8.")
git("init")
git("config user.name \"Ada Lovelace\"")
git("config user.email ada@lovela.ce")
......@@ -71,7 +74,7 @@ def FetchDeps(v8_path):
temporary_git = EnsureGit(v8_path)
try:
print "Fetching dependencies."
print("Fetching dependencies.")
env = os.environ.copy()
# gclient needs to have depot_tools in the PATH.
env["PATH"] = depot_tools + os.pathsep + env["PATH"]
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import os
import pipes
import shutil
......@@ -23,7 +26,7 @@ def EnsureDepotTools(v8_path, fetch_if_not_exist):
except:
pass
if fetch_if_not_exist:
print "Checking out depot_tools."
print("Checking out depot_tools.")
# shell=True needed on Windows to resolve git.bat.
subprocess.check_call("git clone {} {}".format(
pipes.quote(DEPOT_TOOLS_URL),
......@@ -36,14 +39,14 @@ def EnsureDepotTools(v8_path, fetch_if_not_exist):
return None
depot_tools = _Get(v8_path)
assert depot_tools is not None
print "Using depot tools in %s" % depot_tools
print("Using depot tools in %s" % depot_tools)
return depot_tools
def UninitGit(v8_path):
print "Uninitializing temporary git repository"
print("Uninitializing temporary git repository")
target = os.path.join(v8_path, ".git")
if os.path.isdir(target):
print ">> Cleaning up %s" % target
print(">> Cleaning up %s" % target)
def OnRmError(func, path, exec_info):
# This might happen on Windows
os.chmod(path, stat.S_IWRITE)
......
......@@ -23,6 +23,9 @@ Optional flags:
--with-patch Also include currently staged files in the V8 checkout.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import shutil
......@@ -57,7 +60,7 @@ FILES_TO_KEEP = [ "gypfiles" ]
def RunGclient(path):
assert os.path.isdir(path)
print ">> Running gclient sync"
print(">> Running gclient sync")
subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
def CommitPatch(options):
......@@ -67,7 +70,7 @@ def CommitPatch(options):
the fake git clone fetch it into node.js. We can leave the commit, as
bot_update will ensure a clean state on each run.
"""
print ">> Committing patch"
print(">> Committing patch")
subprocess.check_call(
["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
"commit", "--allow-empty", "-m", "placeholder-commit"],
......@@ -77,8 +80,8 @@ def CommitPatch(options):
def UpdateTarget(repository, options, files_to_keep):
source = os.path.join(options.v8_path, *repository)
target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
print ">> Updating target directory %s" % target
print ">> from active branch at %s" % source
print(">> Updating target directory %s" % target)
print(">> from active branch at %s" % source)
if not os.path.exists(target):
os.makedirs(target)
# Remove possible remnants of previous incomplete runs.
......@@ -111,17 +114,17 @@ def UpdateTarget(repository, options, files_to_keep):
def UpdateGitIgnore(options):
file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
assert os.path.isfile(file_name)
print ">> Updating .gitignore with lines"
print(">> Updating .gitignore with lines")
with open(file_name) as gitignore:
content = gitignore.readlines()
content = [x.strip() for x in content]
for x in DELETE_FROM_GITIGNORE:
if x in content:
print "- %s" % x
print("- %s" % x)
content.remove(x)
for x in ADD_TO_GITIGNORE:
if x not in content:
print "+ %s" % x
print("+ %s" % x)
content.append(x)
content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
with open(file_name, "w") as gitignore:
......@@ -129,7 +132,7 @@ def UpdateGitIgnore(options):
gitignore.write("%s\n" % x)
def CreateCommit(options):
print ">> Creating commit."
print(">> Creating commit.")
# Find git hash from source.
githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
cwd=options.v8_path).strip()
......
......@@ -11,6 +11,9 @@ Examples:
%prog -t "x64 results" ../result.json master.json -o results.html
'''
# for py2/py3 compatibility
from __future__ import print_function
from collections import OrderedDict
import json
import math
......@@ -418,7 +421,7 @@ def Render(args):
run_names[run_name] = 0
for error in data["errors"]:
print "Error:", error
print("Error:", error)
for trace in data["traces"]:
suite_name = trace["graphs"][0]
......
......@@ -14,11 +14,16 @@ The command is run up to three times and the printed allocation hash is
compared. Differences are reported as errors.
"""
# for py2/py3 compatibility
from __future__ import print_function
import sys
from testrunner.local import command
from testrunner.local import utils
MAX_TRIES = 3
TIMEOUT = 120
......@@ -36,19 +41,19 @@ def main(args):
previous_allocations = None
for run in range(1, MAX_TRIES + 1):
print '### Predictable run #%d' % run
print('### Predictable run #%d' % run)
output = cmd.execute()
if output.stdout:
print '### Stdout:'
print output.stdout
print('### Stdout:')
print(output.stdout)
if output.stderr:
print '### Stderr:'
print output.stderr
print '### Return code: %s' % output.exit_code
print('### Stderr:')
print(output.stderr)
print('### Return code: %s' % output.exit_code)
if output.HasTimedOut():
# If we get a timeout in any run, we are in an unpredictable state. Just
# report it as a failure and don't rerun.
print '### Test timed out'
print('### Test timed out')
return 1
allocations = allocation_str(output.stdout)
if not allocations:
......@@ -57,7 +62,7 @@ def main(args):
'--verify-predictable is passed at the cmd line.')
return 2
if previous_allocations and previous_allocations != allocations:
print '### Allocations differ'
print('### Allocations differ')
return 3
if run >= MAX_TRIES:
# No difference on the last run -> report a success.
......
......@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import json
import os
......@@ -63,7 +66,7 @@ class LastReleaseBailout(Step):
format="%H", git_hash="%s..%s" % (last_release, self["candidate"]))
if not commits:
print "Already pushed current candidate %s" % self["candidate"]
print("Already pushed current candidate %s" % self["candidate"])
return True
......@@ -71,7 +74,7 @@ class CreateRelease(Step):
MESSAGE = "Creating release if specified."
def RunStep(self):
print "Creating release for %s." % self["candidate"]
print("Creating release for %s." % self["candidate"])
args = [
"--author", self._options.author,
......@@ -96,7 +99,7 @@ class AutoPush(ScriptsBase):
def _ProcessOptions(self, options):
if not options.author or not options.reviewer: # pragma: no cover
print "You need to specify author and reviewer."
print("You need to specify author and reviewer.")
return False
options.requires_editor = False
return True
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import sys
......@@ -160,9 +163,9 @@ class UploadCL(Step):
cq=self._options.use_commit_queue,
cq_dry_run=self._options.use_dry_run,
cwd=cwd)
print "CL uploaded."
print("CL uploaded.")
else:
print "Dry run - don't upload."
print("Dry run - don't upload.")
self.GitCheckout("master", cwd=cwd)
self.GitDeleteBranch("work-branch", cwd=cwd)
......@@ -205,7 +208,7 @@ class AutoRoll(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
print "A reviewer (-r) and an author (-a) are required."
print("A reviewer (-r) and an author (-a) are required.")
return False
options.requires_editor = False
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import sys
......@@ -15,7 +18,7 @@ class Preparation(Step):
def RunStep(self):
# TODO(machenbach): Remove after the git switch.
if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
print "This script is disabled until after the v8 git migration."
print("This script is disabled until after the v8 git migration.")
return True
self.CommonPrepare()
......@@ -80,7 +83,7 @@ class GetOldestUntaggedVersion(Step):
self["candidate_version"] = version
if not self["candidate"] or not self["candidate_version"]:
print "Nothing found to tag."
print("Nothing found to tag.")
self.CommonCleanup()
return True
......@@ -120,18 +123,18 @@ class CalculateTagRevision(Step):
# Don't include the version change commit itself if there is no upper
# limit yet.
candidate_svn = str(int(candidate_svn) + 1)
next_svn = sys.maxint
next_svn = sys.maxsize
lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
if not lkgr_svn:
print "There is no lkgr since the candidate version yet."
print("There is no lkgr since the candidate version yet.")
self.CommonCleanup()
return True
# Let's check if the lkgr is at least three hours old.
self["lkgr"] = self.vc.SvnGit(lkgr_svn)
if not self["lkgr"]:
print "Couldn't find git hash for lkgr %s" % lkgr_svn
print("Couldn't find git hash for lkgr %s" % lkgr_svn)
self.CommonCleanup()
return True
......@@ -139,11 +142,11 @@ class CalculateTagRevision(Step):
current_utc_time = self._side_effect_handler.GetUTCStamp()
if current_utc_time < lkgr_utc_time + 10800:
print "Candidate lkgr %s is too recent for tagging." % lkgr_svn
print("Candidate lkgr %s is too recent for tagging." % lkgr_svn)
self.CommonCleanup()
return True
print "Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"])
print("Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"]))
class MakeTag(Step):
......@@ -172,7 +175,7 @@ class AutoTag(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.dry_run and not options.author:
print "Specify your chromium.org email with -a"
print("Specify your chromium.org email with -a")
return False
options.wait_for_lgtm = False
options.force_readline_defaults = True
......
......@@ -13,6 +13,8 @@ written to public logs. Public automated callers of this script should
suppress stdout and stderr and only process contents of the results_file.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import httplib
......@@ -222,7 +224,7 @@ def Main():
with open(options.results_file, "w") as f:
f.write(json.dumps(results))
else:
print results
print(results)
if __name__ == "__main__":
......
......@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import datetime
import httplib
......@@ -199,8 +202,8 @@ def Command(cmd, args="", prefix="", pipe=True, cwd=None):
cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print "Command: %s" % cmd_line
print "in %s" % cwd
print("Command: %s" % cmd_line)
print("in %s" % cwd)
sys.stdout.flush()
try:
if pipe:
......@@ -272,8 +275,8 @@ class SideEffectHandler(object): # pragma: no cover
try:
return json.loads(data)
except:
print data
print "ERROR: Could not read response. Is your key valid?"
print(data)
print("ERROR: Could not read response. Is your key valid?")
raise
def Sleep(self, seconds):
......@@ -448,7 +451,7 @@ class Step(GitRecipesMixin):
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
print ">>> Step %d: %s" % (self._number, self._text)
print(">>> Step %d: %s" % (self._number, self._text))
try:
return self.RunStep()
finally:
......@@ -484,16 +487,16 @@ class Step(GitRecipesMixin):
raise Exception("Retried too often. Giving up. Reason: %s" %
str(got_exception))
wait_time = wait_plan.pop()
print "Waiting for %f seconds." % wait_time
print("Waiting for %f seconds." % wait_time)
self._side_effect_handler.Sleep(wait_time)
print "Retrying..."
print("Retrying...")
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
print "%s (forced)" % default
print("%s (forced)" % default)
return default
else:
return self._side_effect_handler.ReadLine()
......@@ -529,8 +532,8 @@ class Step(GitRecipesMixin):
def Die(self, msg=""):
if msg != "":
print "Error: %s" % msg
print "Exiting"
print("Error: %s" % msg)
print("Exiting")
raise Exception(msg)
def DieNoManualMode(self, msg=""):
......@@ -539,7 +542,7 @@ class Step(GitRecipesMixin):
self.Die(msg)
def Confirm(self, msg):
print "%s [Y/n] " % msg,
print("%s [Y/n] " % msg, end=' ')
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
......@@ -549,7 +552,7 @@ class Step(GitRecipesMixin):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name, cwd=cwd)
print "Branch %s deleted." % name
print("Branch %s deleted." % name)
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
......@@ -612,10 +615,10 @@ class Step(GitRecipesMixin):
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
print "> ",
print("> ", end=' ')
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
print "That was not 'LGTM'."
print("That was not 'LGTM'.")
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
......@@ -627,8 +630,8 @@ class Step(GitRecipesMixin):
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
print "That was not 'RESOLVED' or 'ABORT'."
print "> ",
print("That was not 'RESOLVED' or 'ABORT'.")
print("> ", end=' ')
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
......@@ -769,16 +772,18 @@ class UploadStep(Step):
def RunStep(self):
reviewer = None
if self._options.reviewer:
print "Using account %s for review." % self._options.reviewer
print("Using account %s for review." % self._options.reviewer)
reviewer = self._options.reviewer
tbr_reviewer = None
if self._options.tbr_reviewer:
print "Using account %s for TBR review." % self._options.tbr_reviewer
print("Using account %s for TBR review." % self._options.tbr_reviewer)
tbr_reviewer = self._options.tbr_reviewer
if not reviewer and not tbr_reviewer:
print "Please enter the email address of a V8 reviewer for your patch: ",
print(
"Please enter the email address of a V8 reviewer for your patch: ",
end=' ')
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
......@@ -854,7 +859,7 @@ class ScriptsBase(object):
# Process common options.
if options.step < 0: # pragma: no cover
print "Bad step number %d" % options.step
print("Bad step number %d" % options.step)
parser.print_help()
return None
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import sys
......@@ -27,7 +30,7 @@ class PrepareBranchRevision(Step):
self["push_hash"] = (self._options.revision or
self.GitLog(n=1, format="%H", branch="origin/master"))
assert self["push_hash"]
print "Release revision %s" % self["push_hash"]
print("Release revision %s" % self["push_hash"])
class IncrementVersion(Step):
......@@ -138,7 +141,7 @@ class PushBranchRef(Step):
def RunStep(self):
cmd = "push origin %s:refs/heads/%s" % (self["push_hash"], self["version"])
if self._options.dry_run:
print "Dry run. Command:\ngit %s" % cmd
print("Dry run. Command:\ngit %s" % cmd)
else:
self.Git(cmd)
......@@ -216,7 +219,7 @@ class LandBranch(Step):
def RunStep(self):
if self._options.dry_run:
print "Dry run - upload CL."
print("Dry run - upload CL.")
else:
self.GitUpload(force=True,
bypass_hooks=True,
......@@ -224,7 +227,7 @@ class LandBranch(Step):
message_file=self.Config("COMMITMSG_FILE"))
cmd = "cl land --bypass-hooks -f"
if self._options.dry_run:
print "Dry run. Command:\ngit %s" % cmd
print("Dry run. Command:\ngit %s" % cmd)
else:
self.Git(cmd)
......@@ -270,7 +273,7 @@ class CreateRelease(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
print "Reviewer (-r) and author (-a) are required."
print("Reviewer (-r) and author (-a) are required.")
return False
return True
......
......@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
from collections import OrderedDict
import sys
......@@ -186,10 +189,10 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
print "*** SUMMARY ***"
print "branch: %s" % self["merge_to_branch"]
print("*** SUMMARY ***")
print("branch: %s" % self["merge_to_branch"])
if self["revision_list"]:
print "patches: %s" % self["revision_list"]
print("patches: %s" % self["revision_list"])
class MergeToBranch(ScriptsBase):
......@@ -215,10 +218,10 @@ class MergeToBranch(ScriptsBase):
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
print("Either a patch file or revision numbers must be specified")
return False
if not options.message:
print "You must specify a merge comment if no patches are specified"
print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
......@@ -233,8 +236,8 @@ class MergeToBranch(ScriptsBase):
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
print "Please provide full git hashes of the patches to merge."
print "Got: %s" % revision
print("Please provide full git hashes of the patches to merge.")
print("Got: %s" % revision)
return False
return True
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import sys
......@@ -77,32 +80,32 @@ def get_first_v8_version(branches):
return version
def print_analysis(git_working_dir, hash_to_search):
print '1.) Searching for "' + hash_to_search + '"'
print '=====================ORIGINAL COMMIT START==================='
print describe_commit(git_working_dir, hash_to_search)
print '=====================ORIGINAL COMMIT END====================='
print '2.) General information:'
print('1.) Searching for "' + hash_to_search + '"')
print('=====================ORIGINAL COMMIT START===================')
print(describe_commit(git_working_dir, hash_to_search))
print('=====================ORIGINAL COMMIT END=====================')
print('2.) General information:')
branches = get_branches_for_commit(git_working_dir, hash_to_search)
print 'Is LKGR: ' + str(is_lkgr(branches))
print 'Is on Canary: ' + str(get_first_canary(branches))
print 'First V8 branch: ' + str(get_first_v8_version(branches)) + \
' (Might not be the rolled version)'
print '3.) Found follow-up commits, reverts and ports:'
print('Is LKGR: ' + str(is_lkgr(branches)))
print('Is on Canary: ' + str(get_first_canary(branches)))
print('First V8 branch: ' + str(get_first_v8_version(branches)) + \
' (Might not be the rolled version)')
print('3.) Found follow-up commits, reverts and ports:')
followups = get_followup_commits(git_working_dir, hash_to_search)
for followup in followups:
print describe_commit(git_working_dir, followup, True)
print(describe_commit(git_working_dir, followup, True))
print '4.) Found merges:'
print('4.) Found merges:')
merges = get_merge_commits(git_working_dir, hash_to_search)
for currentMerge in merges:
print describe_commit(git_working_dir, currentMerge, True)
print '---Merged to:'
print(describe_commit(git_working_dir, currentMerge, True))
print('---Merged to:')
mergeOutput = git_execute(git_working_dir, ['branch',
'--contains',
currentMerge,
'-r']).strip()
print mergeOutput
print 'Finished successfully'
print(mergeOutput)
print('Finished successfully')
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser('Tool to check where a git commit was'
......
......@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import sys
......@@ -46,7 +49,7 @@ class Preparation(Step):
if(self["current_branch"] == self.Config("CANDIDATESBRANCH")
or self["current_branch"] == self.Config("BRANCHNAME")):
print "Warning: Script started on branch %s" % self["current_branch"]
print("Warning: Script started on branch %s" % self["current_branch"])
self.PrepareBranch()
self.DeleteBranch(self.Config("CANDIDATESBRANCH"))
......@@ -347,10 +350,10 @@ class PushToCandidates(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.manual and not options.reviewer:
print "A reviewer (-r) is required in (semi-)automatic mode."
print("A reviewer (-r) is required in (semi-)automatic mode.")
return False
if not options.manual and not options.author:
print "Specify your chromium.org email with -a in (semi-)automatic mode."
print("Specify your chromium.org email with -a in (semi-)automatic mode.")
return False
options.tbr_commit = not options.manual
......
......@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
from collections import OrderedDict
import sys
......@@ -202,7 +205,7 @@ class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
print "Creating tag %s" % self["version"]
print("Creating tag %s" % self["version"])
self.vc.Tag(self["version"],
self.vc.RemoteBranch(self["merge_to_branch"]),
self["commit_title"])
......@@ -213,11 +216,11 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
print "*** SUMMARY ***"
print "version: %s" % self["version"]
print "branch: %s" % self["merge_to_branch"]
print("*** SUMMARY ***")
print("version: %s" % self["version"])
print("branch: %s" % self["merge_to_branch"])
if self["revision_list"]:
print "patches: %s" % self["revision_list"]
print("patches: %s" % self["revision_list"])
class RollMerge(ScriptsBase):
......@@ -241,10 +244,10 @@ class RollMerge(ScriptsBase):
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
print("Either a patch file or revision numbers must be specified")
return False
if not options.message:
print "You must specify a merge comment if no patches are specified"
print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
......@@ -254,8 +257,8 @@ class RollMerge(ScriptsBase):
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
print "Please provide full git hashes of the patches to merge."
print "Got: %s" % revision
print("Please provide full git hashes of the patches to merge.")
print("Got: %s" % revision)
return False
return True
......
......@@ -29,6 +29,9 @@
# Wraps test execution with a coverage analysis. To get the best speed, the
# native python coverage version >= 3.7.1 should be installed.
# for py2/py3 compatibility
from __future__ import print_function
import coverage
import os
import unittest
......@@ -46,7 +49,7 @@ def Main(argv):
])
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests))
cov.stop()
print cov.report()
print(cov.report())
if __name__ == '__main__':
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import operator
import os
......@@ -17,7 +20,7 @@ def search_all_related_commits(
all_commits_raw = _find_commits_inbetween(
start_hash, until, git_working_dir, verbose)
if verbose:
print "All commits between <of> and <until>: " + all_commits_raw
print("All commits between <of> and <until>: " + all_commits_raw)
# Adding start hash too
all_commits = [start_hash]
......@@ -61,7 +64,7 @@ def _search_related_commits(
commit_position = matches.group(2)
if verbose:
print "1.) Commit position to look for: " + commit_position
print("1.) Commit position to look for: " + commit_position)
search_range = start_hash + ".." + until
......@@ -78,13 +81,13 @@ def _search_related_commits(
git_working_dir, git_args(start_hash), verbose).strip()
if verbose:
print "2.) Found by hash: " + found_by_hash
print("2.) Found by hash: " + found_by_hash)
found_by_commit_pos = git_execute(
git_working_dir, git_args(commit_position), verbose).strip()
if verbose:
print "3.) Found by commit position: " + found_by_commit_pos
print("3.) Found by commit position: " + found_by_commit_pos)
# Replace brackets or else they are wrongly interpreted by --grep
title = title.replace("[", "\\[")
......@@ -94,7 +97,7 @@ def _search_related_commits(
git_working_dir, git_args(title), verbose).strip()
if verbose:
print "4.) Found by title: " + found_by_title
print("4.) Found by title: " + found_by_title)
hits = (
_convert_to_array(found_by_hash) +
......@@ -132,8 +135,8 @@ def _remove_duplicates(array):
def git_execute(working_dir, args, verbose=False):
command = ["git", "-C", working_dir] + args
if verbose:
print "Git working dir: " + working_dir
print "Executing git command:" + str(command)
print("Git working dir: " + working_dir)
print("Executing git command:" + str(command))
p = Popen(args=command, stdin=PIPE,
stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
......@@ -141,7 +144,7 @@ def git_execute(working_dir, args, verbose=False):
if rc != 0:
raise Exception(err)
if verbose:
print "Git return value: " + output
print("Git return value: " + output)
return output
def _pretty_print_entry(hash, git_dir, pre_text, verbose):
......@@ -215,4 +218,4 @@ if __name__ == "__main__": # pragma: no cover
args = sys.argv[1:]
options = parser.parse_args(args)
for current_line in main(options):
print current_line
print(current_line)
......@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import os
import shutil
import tempfile
......@@ -383,11 +386,11 @@ class ScriptTest(unittest.TestCase):
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
def Call(self, fun, *args, **kwargs):
print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
print("Calling %s with %s and %s" % (str(fun), str(args), str(kwargs)))
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
print "%s %s" % (cmd, args)
print "in %s" % cwd
print("%s %s" % (cmd, args))
print("in %s" % cwd)
return self._mock.Call("command", cmd + " " + args, cwd=cwd)
def ReadLine(self):
......
......@@ -4,6 +4,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import json
import multiprocessing
import optparse
......@@ -376,29 +379,29 @@ def main():
options.build_folder = DetectBuildFolder()
if not CheckClangTidy():
print 'Could not find clang-tidy'
print('Could not find clang-tidy')
elif options.build_folder is None or not os.path.isdir(options.build_folder):
print 'Please provide a build folder with -b'
print('Please provide a build folder with -b')
elif options.gen_compdb:
GenerateCompileCommands(options.build_folder)
elif not CheckCompDB(options.build_folder):
print 'Could not find compilation database, ' \
'please generate it with --gen-compdb'
print('Could not find compilation database, ' \
'please generate it with --gen-compdb')
else:
print 'Using build folder:', options.build_folder
print('Using build folder:', options.build_folder)
if options.full:
print 'Running clang-tidy - full'
print('Running clang-tidy - full')
ClangTidyRunFull(options.build_folder,
options.no_output_filter,
options.checks,
options.auto_fix)
elif options.aggregate:
print 'Running clang-tidy - aggregating warnings'
print('Running clang-tidy - aggregating warnings')
if options.auto_fix:
print 'Auto fix not working in aggregate mode, running without.'
print('Auto fix not working in aggregate mode, running without.')
ClangTidyRunAggregate(options.build_folder, options.show_loc)
elif options.single:
print 'Running clang-tidy - single on ' + options.file_name
print('Running clang-tidy - single on ' + options.file_name)
if options.file_name is not None:
line_ranges = []
for match in re.findall(r'(\[.*?\])', options.line_ranges):
......@@ -409,9 +412,9 @@ def main():
options.auto_fix,
line_ranges)
else:
print 'Filename provided, please specify a filename with --file'
print('Filename provided, please specify a filename with --file')
else:
print 'Running clang-tidy'
print('Running clang-tidy')
ClangTidyRunDiff(options.build_folder,
options.diff_branch,
options.auto_fix)
......
......@@ -99,6 +99,10 @@ Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
"""
# for py2/py3 compatibility
from __future__ import print_function
from functools import reduce
from collections import OrderedDict
import json
import logging
......@@ -114,6 +118,11 @@ from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
try:
basestring # Python 2
except NameError: # Python 3
basestring = str
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["arm",
"ia32",
......@@ -1074,7 +1083,7 @@ def Main(args):
def Runner():
"""Output generator that reruns several times."""
total_runs = runnable.run_count * options.run_count_multiplier
for i in xrange(0, max(1, total_runs)):
for i in range(0, max(1, total_runs)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
try:
......@@ -1092,12 +1101,12 @@ def Main(args):
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
print results
print(results)
if options.json_test_results_secondary:
results_secondary.WriteToFile(options.json_test_results_secondary)
else: # pragma: no cover
print results_secondary
print(results_secondary)
if results.errors or have_failed_tests[0]:
return 1
......
......@@ -39,6 +39,10 @@ directory. It's not checked out by default and must be added as a custom deps:
'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
"""
# for py2/py3 compatibility
from __future__ import print_function
from functools import reduce
import argparse
import json
import logging
......@@ -426,26 +430,26 @@ def main(args=None):
options.build_dir = os.path.abspath(options.build_dir)
if options.action.lower() == 'all':
if not options.json_output:
print '--json-output is required'
print('--json-output is required')
return 1
write_instrumented(options)
elif options.action.lower() == 'merge':
if not options.coverage_dir:
print '--coverage-dir is required'
print('--coverage-dir is required')
return 1
if not options.json_input:
print '--json-input is required'
print('--json-input is required')
return 1
if not options.json_output:
print '--json-output is required'
print('--json-output is required')
return 1
merge(options)
elif options.action.lower() == 'split':
if not options.json_input:
print '--json-input is required'
print('--json-input is required')
return 1
if not options.output_dir:
print '--output-dir is required'
print('--output-dir is required')
return 1
split(options)
return 0
......
......@@ -106,7 +106,7 @@ def generate_inputs(keep, coverage_dir, file_map, cpus):
n = max(2, int(math.ceil(len(files) / float(cpus))))
# Chop files into buckets.
buckets = [files[i:i+n] for i in xrange(0, len(files), n)]
buckets = [files[i:i+n] for i in range(0, len(files), n)]
# Inputs for multiprocessing. List of tuples containing:
# Keep-files option, base path, executable name, index of bucket,
......
......@@ -5,7 +5,10 @@
"""Corrects objdump output. The logic is from sancov.py, see comments there."""
import sys;
# for py2/py3 compatibility
from __future__ import print_function
import sys
for line in sys.stdin:
print '0x%x' % (int(line.strip(), 16) + 4)
print('0x%x' % (int(line.strip(), 16) + 4))
......@@ -34,6 +34,9 @@ The stats viewer reads counters from a binary file and displays them
in a window, re-reading and re-displaying with regular intervals.
"""
# for py2/py3 compatibility
from __future__ import print_function
import mmap
import optparse
import os
......@@ -100,7 +103,7 @@ class StatsViewer(object):
if not os.path.exists(self.data_name):
maps_name = "/proc/%s/maps" % self.data_name
if not os.path.exists(maps_name):
print "\"%s\" is neither a counter file nor a PID." % self.data_name
print("\"%s\" is neither a counter file nor a PID." % self.data_name)
sys.exit(1)
maps_file = open(maps_name, "r")
try:
......@@ -110,7 +113,7 @@ class StatsViewer(object):
self.data_name = m.group(0)
break
if self.data_name is None:
print "Can't find counter file in maps for PID %s." % self.data_name
print("Can't find counter file in maps for PID %s." % self.data_name)
sys.exit(1)
finally:
maps_file.close()
......@@ -123,7 +126,7 @@ class StatsViewer(object):
return CounterCollection(data_access)
elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
return ChromeCounterCollection(data_access)
print "File %s is not stats data." % self.data_name
print("File %s is not stats data." % self.data_name)
sys.exit(1)
def CleanUp(self):
......@@ -143,7 +146,7 @@ class StatsViewer(object):
self.RefreshCounters()
changed = True
else:
for i in xrange(self.data.CountersInUse()):
for i in range(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
if name in self.ui_counters:
......@@ -188,7 +191,7 @@ class StatsViewer(object):
sorted by prefix.
"""
names = {}
for i in xrange(self.data.CountersInUse()):
for i in range(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
names[name] = counter
......@@ -233,7 +236,7 @@ class StatsViewer(object):
text=counter_name)
name.grid(row=index, column=0, padx=1, pady=1)
count = len(counter_objs)
for i in xrange(count):
for i in range(count):
counter = counter_objs[i]
name = counter.Name()
var = Tkinter.StringVar()
......@@ -435,7 +438,7 @@ class ChromeCounterCollection(object):
def CountersInUse(self):
"""Return the number of counters in active use."""
for i in xrange(self.max_counters):
for i in range(self.max_counters):
name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
if self.data.ByteAt(name_offset) == 0:
return i
......
......@@ -6,6 +6,9 @@
"""This program either generates the parser files for Torque, generating
the source and header files directly in V8's src directory."""
# for py2/py3 compatibility
from __future__ import print_function
import subprocess
import sys
import re
......@@ -96,13 +99,13 @@ def process(filename, lint, should_format):
output, err = p.communicate(preprocess(content))
output = postprocess(output)
rc = p.returncode
if (rc <> 0):
print "error code " + str(rc) + " running clang-format. Exiting..."
if (rc != 0):
print("error code " + str(rc) + " running clang-format. Exiting...")
sys.exit(rc);
if lint:
if (output != original_input):
print >>sys.stderr, filename + ' requires formatting'
print(filename + ' requires formatting', file=sys.stderr)
if should_format:
output_file = open(filename, 'w')
......@@ -110,14 +113,14 @@ def process(filename, lint, should_format):
output_file.close()
def print_usage():
print 'format-torque -i file1[, file2[, ...]]'
print ' format and overwrite input files'
print 'format-torque -l file1[, file2[, ...]]'
print ' merely indicate which files need formatting'
print('format-torque -i file1[, file2[, ...]]')
print(' format and overwrite input files')
print('format-torque -l file1[, file2[, ...]]')
print(' merely indicate which files need formatting')
def Main():
if len(sys.argv) < 3:
print "error: at least 2 arguments required"
print("error: at least 2 arguments required")
print_usage();
sys.exit(-1)
......@@ -137,7 +140,7 @@ def Main():
lint = True
should_format = True
else:
print "error: -i and/or -l flags must be specified"
print("error: -i and/or -l flags must be specified")
print_usage();
sys.exit(-1);
......
......@@ -23,7 +23,7 @@ result = subprocess.call(cargs)
os.chdir(cwd)
def fix_file(filename):
is_header = re.search(r'\.h', filename) <> None;
is_header = re.search(r'\.h', filename) is not None;
header_macro = filename.upper();
header_macro = re.sub('\.', '_', header_macro);
header_macro = "V8_TORQUE_" + header_macro + '_';
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import sys
......@@ -169,4 +172,4 @@ elif action == "count":
reasons_list.append("%8d %s" % (reasons[r], r))
reasons_list.sort(reverse=True)
for r in reasons_list[:20]:
print r
print(r)
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import subprocess
......@@ -67,11 +70,11 @@ def main():
help='Add %s trybot.' % BOTS[option])
options = parser.parse_args()
if not options.bots:
print 'No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS)
print('No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS))
options.bots = DEFAULT_BOTS
if not options.benchmarks:
print 'Please specify the benchmarks to run as arguments.'
print('Please specify the benchmarks to run as arguments.')
return 1
for benchmark in options.benchmarks:
......@@ -79,7 +82,7 @@ def main():
print ('%s not found in our benchmark list. The respective trybot might '
'fail, unless you run something this script isn\'t aware of. '
'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
print 'Proceed anyways? [Y/n] ',
print('Proceed anyways? [Y/n] ', end=' ')
answer = sys.stdin.readline().strip()
if answer != "" and answer != "Y" and answer != "y":
return 1
......@@ -100,7 +103,7 @@ def main():
cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
if options.verbose:
cmd.append('-vv')
print 'Running %s' % ' '.join(cmd)
print('Running %s' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
if __name__ == '__main__': # pragma: no cover
......
......@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import os
import sys
import json
......@@ -25,7 +28,7 @@ def trace_begin():
known_addrs.add(result.group(0))
def trace_end():
print json.dumps(json_obj)
print(json.dumps(json_obj))
def process_event(param_dict):
addr = "0x%x" % int(param_dict['sample']['ip'])
......
......@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
from collections import namedtuple
import coverage
import json
......@@ -103,8 +106,8 @@ class PerfTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
cls._cov.stop()
print ""
print cls._cov.report()
print("")
print(cls._cov.report())
def setUp(self):
self.maxDiff = None
......
......@@ -17,6 +17,9 @@ with different test suite extensions and build configurations.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
# for py2/py3 compatibility
from __future__ import print_function
import collections
import contextlib
import json
......@@ -126,7 +129,7 @@ class SystemTest(unittest.TestCase):
import coverage
if int(coverage.__version__.split('.')[0]) < 4:
cls._cov = None
print 'Python coverage version >= 4 required.'
print('Python coverage version >= 4 required.')
raise ImportError()
cls._cov = coverage.Coverage(
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
......@@ -142,7 +145,7 @@ class SystemTest(unittest.TestCase):
cls._cov.exclude('assert False')
cls._cov.start()
except ImportError:
print 'Running without python coverage.'
print('Running without python coverage.')
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
......@@ -157,8 +160,8 @@ class SystemTest(unittest.TestCase):
def tearDownClass(cls):
if cls._cov:
cls._cov.stop()
print ''
print cls._cov.report(show_missing=True)
print('')
print(cls._cov.report(show_missing=True))
def testPass(self):
"""Test running only passing tests in two variants.
......
......@@ -3,5 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
print 'Richards: 1.2'
print 'DeltaBlue: 2.1'
# for py2/py3 compatibility
from __future__ import print_function
print('Richards: 1.2')
print('DeltaBlue: 2.1')
......@@ -3,8 +3,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
print 'Richards1: 1'
print 'DeltaBlue1: 1'
print 'Richards2: 0.2'
print 'DeltaBlue2: 1.0'
print 'DeltaBlue3: 0.1'
# for py2/py3 compatibility
from __future__ import print_function
print('Richards1: 1')
print('DeltaBlue1: 1')
print('Richards2: 0.2')
print('DeltaBlue2: 1.0')
print('DeltaBlue3: 0.1')
......@@ -3,22 +3,25 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import sys
assert len(sys.argv) == 3
if sys.argv[1] == 'equal':
# 1. Scenario: print equal allocation hashes.
print '### Allocations = 9497, hash = 0xc322c6b0'
print('### Allocations = 9497, hash = 0xc322c6b0')
elif sys.argv[1] == 'differ':
# 2. Scenario: print different allocation hashes. This prints a different
# hash on the second run, based on the content of a semaphore file. This
# file is expected to be empty in the beginning.
with open(sys.argv[2]) as f:
if f.read():
print '### Allocations = 9497, hash = 0xc322c6b0'
print('### Allocations = 9497, hash = 0xc322c6b0')
else:
print '### Allocations = 9497, hash = 0xc322c6b1'
print('### Allocations = 9497, hash = 0xc322c6b1')
with open(sys.argv[2], 'w') as f:
f.write('something')
else:
......
......@@ -7,6 +7,9 @@
Fake results processor for testing that just sums some things up.
"""
# for py2/py3 compatibility
from __future__ import print_function
import fileinput
import re
......@@ -21,5 +24,5 @@ for line in fileinput.input():
if match:
deltablue += float(match.group(1))
print 'Richards: %f' % richards
print 'DeltaBlue: %f' % deltablue
print('Richards: %f' % richards)
print('DeltaBlue: %f' % deltablue)
......@@ -6,10 +6,13 @@
Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
"""
# for py2/py3 compatibility
from __future__ import print_function
import sys
args = ' '.join(sys.argv[1:])
print args
print(args)
# Let all berries fail.
if 'berries' in args:
sys.exit(1)
......
......@@ -6,12 +6,15 @@
Dummy d8 replacement for flaky tests.
"""
# for py2/py3 compatibility
from __future__ import print_function
import os
import sys
PATH = os.path.dirname(os.path.abspath(__file__))
print ' '.join(sys.argv[1:])
print(' '.join(sys.argv[1:]))
# Test files ending in 'flakes' should first fail then pass. We store state in
# a file side by side with the executable. No clean-up required as all tests
......
#!/usr/bin/env python3
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
......
......@@ -27,10 +27,14 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
try:
import hashlib
md5er = hashlib.md5
except ImportError, e:
except ImportError as e:
import md5
md5er = md5.new
......@@ -84,7 +88,7 @@ def CppLintWorker(command):
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print "Failed to process %s" % command.pop()
print("Failed to process %s" % command.pop())
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
......@@ -268,7 +272,7 @@ class CacheableSourceFileProcessor(SourceFileProcessor):
files = cache.FilterUnchangedFiles(files)
if len(files) == 0:
print 'No changes in %s files detected. Skipping check' % self.file_type
print('No changes in %s files detected. Skipping check' % self.file_type)
return True
files_requiring_changes = self.DetectFilesToChange(files)
......@@ -293,7 +297,7 @@ class CacheableSourceFileProcessor(SourceFileProcessor):
try:
results = pool.map_async(worker, commands).get(timeout=240)
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt, terminating workers."
print("\nCaught KeyboardInterrupt, terminating workers.")
pool.terminate()
pool.join()
sys.exit(1)
......@@ -487,12 +491,12 @@ class SourceProcessor(SourceFileProcessor):
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print "%s contains tabs" % name
print("%s contains tabs" % name)
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
print("%s is missing a correct copyright header." % name)
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
......@@ -505,34 +509,34 @@ class SourceProcessor(SourceFileProcessor):
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
print("%s has trailing whitespaces in lines %s." % (name, linenumbers))
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
print("%s has trailing whitespaces in line %s." % (name, linenumbers))
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print "%s does not end with a single new line." % name
print("%s does not end with a single new line." % name)
result = False
# Sanitize flags for fuzzer.
if ".js" in name and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
print "%s Flags should use '-' (not '_')" % name
print("%s Flags should use '-' (not '_')" % name)
result = False
if not "mjsunit/mjsunit.js" in name:
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
not FLAGS_ENABLE_OPT.search(contents):
print "%s Flag --opt should be set if " \
"assertOptimized() is used" % name
print("%s Flag --opt should be set if " \
"assertOptimized() is used" % name)
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
not FLAGS_NO_ALWAYS_OPT.search(contents):
print "%s Flag --no-always-opt should be set if " \
"assertUnoptimized() is used" % name
print("%s Flag --no-always-opt should be set if " \
"assertUnoptimized() is used" % name)
result = False
match = self.runtime_function_call_pattern.search(contents)
if match:
print "%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1))
print("%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1)))
result = False
return result
......@@ -548,7 +552,7 @@ class SourceProcessor(SourceFileProcessor):
violations += 1
finally:
handle.close()
print "Total violating files: %s" % violations
print("Total violating files: %s" % violations)
return success
def _CheckStatusFileForDuplicateKeys(filepath):
......@@ -655,7 +659,7 @@ def PyTests(workspace):
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
]:
print 'Running ' + script
print('Running ' + script)
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
......@@ -677,22 +681,22 @@ def Main():
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print "Running checkdeps..."
print("Running checkdeps...")
success &= CheckDeps(workspace)
use_linter_cache = not options.no_linter_cache
if not options.no_lint:
print "Running C++ lint check..."
print("Running C++ lint check...")
success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
print "Running Torque formatting check..."
print("Running Torque formatting check...")
success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
print("Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check...")
success &= SourceProcessor().RunOnPath(workspace)
print "Running status-files check..."
print("Running status-files check...")
success &= StatusFilesProcessor().RunOnPath(workspace)
print "Running python tests..."
print("Running python tests...")
success &= PyTests(workspace)
if success:
return 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment