run-tests.py 23.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
#       copyright notice, this list of conditions and the following
#       disclaimer in the documentation and/or other materials provided
#       with the distribution.
#     * Neither the name of Google Inc. nor the names of its
#       contributors may be used to endorse or promote products derived
#       from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


31
from collections import OrderedDict
32
import itertools
33 34 35 36
import multiprocessing
import optparse
import os
from os.path import join
37
import platform
38
import random
39
import shlex
40 41 42 43 44 45 46
import subprocess
import sys
import time

from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
47
from testrunner.local.testsuite import VARIANT_FLAGS
48 49 50 51 52 53 54
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context


ARCH_GUESS = utils.DefaultArch()
55 56 57 58 59 60 61
DEFAULT_TESTS = [
  "mjsunit",
  "unittests",
  "cctest",
  "message",
  "preparser",
]
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
  "default": [
    "mjsunit",
    "cctest",
    "message",
    "preparser",
  ],
  "optimize_for_size": [
    "mjsunit",
    "cctest",
    "webkit",
  ],
  "unittests": [
79
    "unittests",
80 81 82
  ],
}

83 84
TIMEOUT_DEFAULT = 60

85
VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
               "--nofold-constants", "--enable-slow-asserts",
               "--debug-code", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
                 "--nofold-constants"]

MODES = {
  "debug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "debug",
  },
  "optdebug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "optdebug",
  },
  "release": {
    "flags": RELEASE_FLAGS,
    "timeout_scalefactor": 1,
    "status_mode": "release",
    "execution_mode": "release",
    "output_folder": "release",
  },
  # This mode requires v8 to be compiled with dchecks and slow dchecks.
  "tryrelease": {
    "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
    "timeout_scalefactor": 2,
    "status_mode": "debug",
    "execution_mode": "release",
    "output_folder": "release",
  },
}
124

125 126 127 128 129
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
                   "--concurrent-recompilation-queue-length=64",
                   "--concurrent-recompilation-delay=500",
                   "--concurrent-recompilation"]

130
SUPPORTED_ARCHS = ["android_arm",
131
                   "android_arm64",
132 133 134
                   "android_ia32",
                   "arm",
                   "ia32",
danno@chromium.org's avatar
danno@chromium.org committed
135
                   "x87",
136
                   "mips",
137
                   "mipsel",
138
                   "mips64el",
139 140
                   "nacl_ia32",
                   "nacl_x64",
141 142
                   "ppc",
                   "ppc64",
143
                   "x64",
144
                   "x32",
145
                   "arm64"]
146 147
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
148
              "android_arm64",
149 150
              "android_ia32",
              "arm",
151
              "mips",
152
              "mipsel",
153
              "mips64el",
154
              "nacl_ia32",
155
              "nacl_x64",
danno@chromium.org's avatar
danno@chromium.org committed
156
              "x87",
157
              "arm64"]
158

159 160 161 162 163 164 165 166 167 168

def BuildOptions():
  result = optparse.OptionParser()
  result.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="ia32,x64,arm")
  result.add_option("--arch-and-mode",
                    help="Architecture and mode in the format 'arch.mode'",
                    default=None)
169 170 171
  result.add_option("--asan",
                    help="Regard test expectations for ASAN",
                    default=False, action="store_true")
172 173 174
  result.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
175 176 177
  result.add_option("--dcheck-always-on",
                    help="Indicates that V8 was compiled with DCHECKs enabled",
                    default=False, action="store_true")
178 179
  result.add_option("--cat", help="Print the source of the tests",
                    default=False, action="store_true")
180 181 182
  result.add_option("--flaky-tests",
                    help="Regard tests marked as flaky (run|skip|dontcare)",
                    default="dontcare")
183 184 185 186 187 188
  result.add_option("--slow-tests",
                    help="Regard slow tests (run|skip|dontcare)",
                    default="dontcare")
  result.add_option("--pass-fail-tests",
                    help="Regard pass|fail tests (run|skip|dontcare)",
                    default="dontcare")
189 190 191
  result.add_option("--gc-stress",
                    help="Switch on GC stress mode",
                    default=False, action="store_true")
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
  result.add_option("--command-prefix",
                    help="Prepended to each shell command used to run a test",
                    default="")
  result.add_option("--download-data", help="Download missing test suite data",
                    default=False, action="store_true")
  result.add_option("--extra-flags",
                    help="Additional flags to pass to each test command",
                    default="")
  result.add_option("--isolates", help="Whether to test isolates",
                    default=False, action="store_true")
  result.add_option("-j", help="The number of parallel tasks to run",
                    default=0, type="int")
  result.add_option("-m", "--mode",
                    help="The test modes in which to run (comma-separated)",
                    default="release,debug")
207 208 209
  result.add_option("--no-harness", "--noharness",
                    help="Run without test harness of a given suite",
                    default=False, action="store_true")
210 211 212
  result.add_option("--no-i18n", "--noi18n",
                    help="Skip internationalization tests",
                    default=False, action="store_true")
213 214 215 216 217 218 219
  result.add_option("--no-network", "--nonetwork",
                    help="Don't distribute tests on the network",
                    default=(utils.GuessOS() != "linux"),
                    dest="no_network", action="store_true")
  result.add_option("--no-presubmit", "--nopresubmit",
                    help='Skip presubmit checks',
                    default=False, dest="no_presubmit", action="store_true")
220 221 222
  result.add_option("--no-snap", "--nosnap",
                    help='Test a build compiled without snapshot.',
                    default=False, dest="no_snap", action="store_true")
223 224 225
  result.add_option("--no-sorting", "--nosorting",
                    help="Don't sort tests according to duration of last run.",
                    default=False, dest="no_sorting", action="store_true")
226 227 228
  result.add_option("--no-stress", "--nostress",
                    help="Don't run crankshaft --always-opt --stress-op test",
                    default=False, dest="no_stress", action="store_true")
229 230 231
  result.add_option("--no-variants", "--novariants",
                    help="Don't run any testing variants",
                    default=False, dest="no_variants", action="store_true")
232 233
  result.add_option("--variants",
                    help="Comma-separated list of testing variants")
234 235
  result.add_option("--outdir", help="Base directory with compile output",
                    default="out")
236 237 238
  result.add_option("--predictable",
                    help="Compare output of several reruns of each test",
                    default=False, action="store_true")
239 240 241 242
  result.add_option("-p", "--progress",
                    help=("The style of progress indicator"
                          " (verbose, dots, color, mono)"),
                    choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
243 244
  result.add_option("--quickcheck", default=False, action="store_true",
                    help=("Quick check mode (skip slow/flaky tests)"))
245 246
  result.add_option("--report", help="Print a summary of the tests to be run",
                    default=False, action="store_true")
247 248
  result.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
249 250 251 252 253 254 255
  result.add_option("--rerun-failures-count",
                    help=("Number of times to rerun each failing test case. "
                          "Very slow tests will be rerun only once."),
                    default=0, type="int")
  result.add_option("--rerun-failures-max",
                    help="Maximum number of failing test cases to rerun.",
                    default=100, type="int")
256 257 258 259 260 261 262 263 264
  result.add_option("--shard-count",
                    help="Split testsuites into this number of shards",
                    default=1, type="int")
  result.add_option("--shard-run",
                    help="Run this shard from the split up tests.",
                    default=1, type="int")
  result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
  result.add_option("--shell-dir", help="Directory containing executables",
                    default="")
265 266 267 268
  result.add_option("--dont-skip-slow-simulator-tests",
                    help="Don't skip more slow tests when using a simulator.",
                    default=False, action="store_true",
                    dest="dont_skip_simulator_slow_tests")
269 270 271 272 273 274 275
  result.add_option("--stress-only",
                    help="Only run tests with --always-opt --stress-opt",
                    default=False, action="store_true")
  result.add_option("--time", help="Print timing information after running",
                    default=False, action="store_true")
  result.add_option("-t", "--timeout", help="Timeout in seconds",
                    default= -1, type="int")
276 277 278
  result.add_option("--tsan",
                    help="Regard test expectations for TSAN",
                    default=False, action="store_true")
279 280 281 282 283 284
  result.add_option("-v", "--verbose", help="Verbose output",
                    default=False, action="store_true")
  result.add_option("--valgrind", help="Run tests through valgrind",
                    default=False, action="store_true")
  result.add_option("--warn-unused", help="Report unused rules",
                    default=False, action="store_true")
285 286 287 288
  result.add_option("--junitout", help="File name of the JUnit output")
  result.add_option("--junittestsuite",
                    help="The testsuite name in the JUnit output file",
                    default="v8tests")
289 290
  result.add_option("--random-seed", default=0, dest="random_seed",
                    help="Default seed for initializing random generator")
291 292 293
  result.add_option("--msan",
                    help="Regard test expectations for MSAN",
                    default=False, action="store_true")
294 295 296 297 298
  return result


def ProcessOptions(options):
  global VARIANT_FLAGS
299
  global VARIANTS
300 301 302

  # Architecture and mode related stuff.
  if options.arch_and_mode:
303 304 305 306
    options.arch_and_mode = [arch_and_mode.split(".")
        for arch_and_mode in options.arch_and_mode.split(",")]
    options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
    options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
307 308
  options.mode = options.mode.split(",")
  for mode in options.mode:
309
    if not mode.lower() in MODES:
310 311 312 313 314 315
      print "Unknown mode %s" % mode
      return False
  if options.arch in ["auto", "native"]:
    options.arch = ARCH_GUESS
  options.arch = options.arch.split(",")
  for arch in options.arch:
316
    if not arch in SUPPORTED_ARCHS:
317 318 319
      print "Unknown architecture %s" % arch
      return False

320 321 322 323 324
  # Store the final configuration in arch_and_mode list. Don't overwrite
  # predefined arch_and_mode since it is more expressive than arch and mode.
  if not options.arch_and_mode:
    options.arch_and_mode = itertools.product(options.arch, options.mode)

325 326 327 328 329 330 331 332 333 334
  # Special processing of other options, sorted alphabetically.

  if options.buildbot:
    # Buildbots run presubmit tests as a separate step.
    options.no_presubmit = True
    options.no_network = True
  if options.command_prefix:
    print("Specifying --command-prefix disables network distribution, "
          "running tests locally.")
    options.no_network = True
335
  options.command_prefix = shlex.split(options.command_prefix)
336
  options.extra_flags = shlex.split(options.extra_flags)
337 338 339 340

  if options.gc_stress:
    options.extra_flags += GC_STRESS_FLAGS

341
  if options.asan:
342
    options.extra_flags.append("--invoke-weak-callbacks")
343

344 345
  if options.tsan:
    VARIANTS = ["default"]
346 347 348 349 350
    suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                     'sanitizers', 'tsan_suppressions.txt')
    tsan_options = '%s suppressions=%s' % (
        os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
    os.environ['TSAN_OPTIONS'] = tsan_options
351

352 353
  if options.j == 0:
    options.j = multiprocessing.cpu_count()
354

355 356 357
  while options.random_seed == 0:
    options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)

358 359 360 361
  def excl(*args):
    """Returns true if zero or one of multiple arguments are true."""
    return reduce(lambda x, y: x + y, args) <= 1

362
  if not excl(options.no_stress, options.stress_only, options.no_variants,
363
              bool(options.variants)):
364
    print("Use only one of --no-stress, --stress-only, --no-variants, "
365
          "or --variants.")
366
    return False
367 368 369 370 371
  if options.quickcheck:
    VARIANTS = ["default", "stress"]
    options.flaky_tests = "skip"
    options.slow_tests = "skip"
    options.pass_fail_tests = "skip"
372
  if options.no_stress:
373
    VARIANTS = ["default", "nocrankshaft"]
374
  if options.no_variants:
375 376 377 378 379 380 381 382
    VARIANTS = ["default"]
  if options.stress_only:
    VARIANTS = ["stress"]
  if options.variants:
    VARIANTS = options.variants.split(",")
    if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
      print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
      return False
383 384 385 386 387
  if options.predictable:
    VARIANTS = ["default"]
    options.extra_flags.append("--predictable")
    options.extra_flags.append("--verify_predictable")
    options.extra_flags.append("--no-inline-new")
388

389 390 391 392 393 394 395
  if not options.shell_dir:
    if options.shell:
      print "Warning: --shell is deprecated, use --shell-dir instead."
      options.shell_dir = os.path.dirname(options.shell)
  if options.valgrind:
    run_valgrind = os.path.join("tools", "run-valgrind.py")
    # This is OK for distributed running, so we don't need to set no_network.
396
    options.command_prefix = (["python", "-u", run_valgrind] +
397
                              options.command_prefix)
398 399 400 401 402 403 404 405 406 407
  def CheckTestMode(name, option):
    if not option in ["run", "skip", "dontcare"]:
      print "Unknown %s mode %s" % (name, option)
      return False
    return True
  if not CheckTestMode("flaky test", options.flaky_tests):
    return False
  if not CheckTestMode("slow test", options.slow_tests):
    return False
  if not CheckTestMode("pass|fail test", options.pass_fail_tests):
408
    return False
409 410
  if not options.no_i18n:
    DEFAULT_TESTS.append("intl")
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
  return True


def ShardTests(tests, shard_count, shard_run):
  if shard_count < 2:
    return tests
  if shard_run < 1 or shard_run > shard_count:
    print "shard-run not a valid number, should be in [1:shard-count]"
    print "defaulting back to running all tests"
    return tests
  count = 0
  shard = []
  for test in tests:
    if count % shard_count == shard_run - 1:
      shard.append(test)
    count += 1
  return shard


def Main():
  parser = BuildOptions()
  (options, args) = parser.parse_args()
  if not ProcessOptions(options):
    parser.print_help()
    return 1

  exit_code = 0
  workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
  if not options.no_presubmit:
    print ">>> running presubmit tests"
441
    exit_code = subprocess.call(
442 443 444 445
        [sys.executable, join(workspace, "tools", "presubmit.py")])

  suite_paths = utils.GetSuitePaths(join(workspace, "test"))

446 447 448 449 450 451 452 453 454 455 456
  # Expand arguments with grouped tests. The args should reflect the list of
  # suites as otherwise filters would break.
  def ExpandTestGroups(name):
    if name in TEST_MAP:
      return [suite for suite in TEST_MAP[arg]]
    else:
      return [name]
  args = reduce(lambda x, y: x + y,
         [ExpandTestGroups(arg) for arg in args],
         [])

457
  if len(args) == 0:
458
    suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
459
  else:
460
    args_suites = OrderedDict() # Used as set
461
    for arg in args:
462
      args_suites[arg.split(os.path.sep)[0]] = True
463
    suite_paths = [ s for s in args_suites if s in suite_paths ]
464 465 466 467 468 469 470 471 472 473 474 475

  suites = []
  for root in suite_paths:
    suite = testsuite.TestSuite.LoadTestSuite(
        os.path.join(workspace, "test", root))
    if suite:
      suites.append(suite)

  if options.download_data:
    for s in suites:
      s.DownloadData()

476
  for (arch, mode) in options.arch_and_mode:
477 478 479 480
    try:
      code = Execute(arch, mode, args, options, suites, workspace)
    except KeyboardInterrupt:
      return 2
481
    exit_code = exit_code or code
482 483 484 485 486 487 488 489 490
  return exit_code


def Execute(arch, mode, args, options, suites, workspace):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
491 492
      # TODO(machenbach): Get rid of different output folder location on
      # buildbot. Currently this is capitalized Release and Debug.
493 494 495
      shell_dir = os.path.join(workspace, options.outdir, mode)
      mode = mode.lower()
    else:
496 497 498 499 500
      shell_dir = os.path.join(
          workspace,
          options.outdir,
          "%s.%s" % (arch, MODES[mode]["output_folder"]),
      )
501 502 503
  shell_dir = os.path.relpath(shell_dir)

  # Populate context object.
504
  mode_flags = MODES[mode]["flags"]
505 506 507
  timeout = options.timeout
  if timeout == -1:
    # Simulators are slow, therefore allow a longer default timeout.
508
    if arch in SLOW_ARCHS:
509
      timeout = 2 * TIMEOUT_DEFAULT;
510 511 512
    else:
      timeout = TIMEOUT_DEFAULT;

513
  timeout *= MODES[mode]["timeout_scalefactor"]
514 515 516 517 518

  if options.predictable:
    # Predictable mode is slower.
    timeout *= 2

519
  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
520
                        mode_flags, options.verbose,
521
                        timeout, options.isolates,
522
                        options.command_prefix,
523
                        options.extra_flags,
524
                        options.no_i18n,
525
                        options.random_seed,
526 527
                        options.no_sorting,
                        options.rerun_failures_count,
528
                        options.rerun_failures_max,
529 530
                        options.predictable,
                        options.no_harness)
531

532 533
  # TODO(all): Combine "simulator" and "simulator_run".
  simulator_run = not options.dont_skip_simulator_slow_tests and \
534 535
      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
               'ppc', 'ppc64'] and \
536
      ARCH_GUESS and arch != ARCH_GUESS
537 538 539
  # Find available test suites and read test cases from them.
  variables = {
    "arch": arch,
540
    "asan": options.asan,
541
    "deopt_fuzzer": False,
542 543
    "gc_stress": options.gc_stress,
    "isolates": options.isolates,
544
    "mode": MODES[mode]["status_mode"],
545
    "no_i18n": options.no_i18n,
546
    "no_snap": options.no_snap,
547
    "simulator_run": simulator_run,
548
    "simulator": utils.UseSimulator(arch),
549
    "system": utils.GuessOS(),
550
    "tsan": options.tsan,
551
    "msan": options.msan,
552
    "dcheck_always_on": options.dcheck_always_on,
553
    "byteorder": sys.byteorder,
554 555 556 557 558 559 560 561 562
  }
  all_tests = []
  num_tests = 0
  test_id = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
563
    all_tests += s.tests
564 565
    s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                              options.slow_tests, options.pass_fail_tests)
566 567 568
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
569
    variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
570 571
    s.tests = [ t.CopyAddingFlags(v)
                for t in s.tests
572
                for v in s.VariantFlags(t, variant_flags) ]
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
    num_tests += len(s.tests)
    for t in s.tests:
      t.id = test_id
      test_id += 1

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  if num_tests == 0:
    print "No tests to run."
    return 0

  # Run the tests, either locally or distributed on the network.
590 591 592 593 594
  start_time = time.time()
  progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
  if options.junitout:
    progress_indicator = progress.JUnitTestProgressIndicator(
        progress_indicator, options.junitout, options.junittestsuite)
595 596
  if options.json_test_results:
    progress_indicator = progress.JsonTestProgressIndicator(
597 598
        progress_indicator, options.json_test_results, arch,
        MODES[mode]["execution_mode"])
599 600 601 602 603 604 605 606 607 608 609 610

  run_networked = not options.no_network
  if not run_networked:
    print("Network distribution disabled, running tests locally.")
  elif utils.GuessOS() != "linux":
    print("Network distribution is only supported on Linux, sorry!")
    run_networked = False
  peers = []
  if run_networked:
    peers = network_execution.GetPeers()
    if not peers:
      print("No connection to distribution server; running tests locally.")
611
      run_networked = False
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
    elif len(peers) == 1:
      print("No other peers on the network; running tests locally.")
      run_networked = False
    elif num_tests <= 100:
      print("Less than 100 tests, running them locally.")
      run_networked = False

  if run_networked:
    runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                               ctx, peers, workspace)
  else:
    runner = execution.Runner(suites, progress_indicator, ctx)

  exit_code = runner.Run(options.j)
  overall_duration = time.time() - start_time
627 628 629 630 631 632 633 634

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
  return exit_code


if __name__ == "__main__":
  sys.exit(Main())