run-tests.py 23.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
#       copyright notice, this list of conditions and the following
#       disclaimer in the documentation and/or other materials provided
#       with the distribution.
#     * Neither the name of Google Inc. nor the names of its
#       contributors may be used to endorse or promote products derived
#       from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


31
from collections import OrderedDict
32
import itertools
33 34 35 36
import multiprocessing
import optparse
import os
from os.path import join
37
import platform
38
import random
39
import shlex
40 41 42 43 44 45 46
import subprocess
import sys
import time

from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
47
from testrunner.local.testsuite import VARIANT_FLAGS
48 49 50 51 52 53 54
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context


ARCH_GUESS = utils.DefaultArch()
55 56 57 58 59 60 61
DEFAULT_TESTS = [
  "mjsunit",
  "unittests",
  "cctest",
  "message",
  "preparser",
]
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
  "default": [
    "mjsunit",
    "cctest",
    "message",
    "preparser",
  ],
  "optimize_for_size": [
    "mjsunit",
    "cctest",
    "webkit",
  ],
  "unittests": [
79
    "unittests",
80 81 82
  ],
}

83 84
TIMEOUT_DEFAULT = 60

85
VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
               "--nofold-constants", "--enable-slow-asserts",
               "--debug-code", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
                 "--nofold-constants"]

MODES = {
  "debug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "debug",
  },
  "optdebug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "optdebug",
  },
  "release": {
    "flags": RELEASE_FLAGS,
    "timeout_scalefactor": 1,
    "status_mode": "release",
    "execution_mode": "release",
    "output_folder": "release",
  },
  # This mode requires v8 to be compiled with dchecks and slow dchecks.
  "tryrelease": {
    "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
    "timeout_scalefactor": 2,
    "status_mode": "debug",
    "execution_mode": "release",
    "output_folder": "release",
  },
}
124

125 126 127 128 129
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
                   "--concurrent-recompilation-queue-length=64",
                   "--concurrent-recompilation-delay=500",
                   "--concurrent-recompilation"]

130
SUPPORTED_ARCHS = ["android_arm",
131
                   "android_arm64",
132
                   "android_ia32",
133
                   "android_x64",
134 135
                   "arm",
                   "ia32",
danno@chromium.org's avatar
danno@chromium.org committed
136
                   "x87",
137
                   "mips",
138
                   "mipsel",
139
                   "mips64el",
140 141
                   "nacl_ia32",
                   "nacl_x64",
142 143
                   "ppc",
                   "ppc64",
144
                   "x64",
145
                   "x32",
146
                   "arm64"]
147 148
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
149
              "android_arm64",
150
              "android_ia32",
151
              "android_x64",
152
              "arm",
153
              "mips",
154
              "mipsel",
155
              "mips64el",
156
              "nacl_ia32",
157
              "nacl_x64",
danno@chromium.org's avatar
danno@chromium.org committed
158
              "x87",
159
              "arm64"]
160

161 162 163 164 165 166 167 168 169 170

def BuildOptions():
  result = optparse.OptionParser()
  result.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="ia32,x64,arm")
  result.add_option("--arch-and-mode",
                    help="Architecture and mode in the format 'arch.mode'",
                    default=None)
171 172 173
  result.add_option("--asan",
                    help="Regard test expectations for ASAN",
                    default=False, action="store_true")
174 175 176
  result.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
177 178 179
  result.add_option("--dcheck-always-on",
                    help="Indicates that V8 was compiled with DCHECKs enabled",
                    default=False, action="store_true")
180 181
  result.add_option("--cat", help="Print the source of the tests",
                    default=False, action="store_true")
182 183 184
  result.add_option("--flaky-tests",
                    help="Regard tests marked as flaky (run|skip|dontcare)",
                    default="dontcare")
185 186 187 188 189 190
  result.add_option("--slow-tests",
                    help="Regard slow tests (run|skip|dontcare)",
                    default="dontcare")
  result.add_option("--pass-fail-tests",
                    help="Regard pass|fail tests (run|skip|dontcare)",
                    default="dontcare")
191 192 193
  result.add_option("--gc-stress",
                    help="Switch on GC stress mode",
                    default=False, action="store_true")
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
  result.add_option("--command-prefix",
                    help="Prepended to each shell command used to run a test",
                    default="")
  result.add_option("--download-data", help="Download missing test suite data",
                    default=False, action="store_true")
  result.add_option("--extra-flags",
                    help="Additional flags to pass to each test command",
                    default="")
  result.add_option("--isolates", help="Whether to test isolates",
                    default=False, action="store_true")
  result.add_option("-j", help="The number of parallel tasks to run",
                    default=0, type="int")
  result.add_option("-m", "--mode",
                    help="The test modes in which to run (comma-separated)",
                    default="release,debug")
209 210 211
  result.add_option("--no-harness", "--noharness",
                    help="Run without test harness of a given suite",
                    default=False, action="store_true")
212 213 214
  result.add_option("--no-i18n", "--noi18n",
                    help="Skip internationalization tests",
                    default=False, action="store_true")
215 216 217 218 219 220 221
  result.add_option("--no-network", "--nonetwork",
                    help="Don't distribute tests on the network",
                    default=(utils.GuessOS() != "linux"),
                    dest="no_network", action="store_true")
  result.add_option("--no-presubmit", "--nopresubmit",
                    help='Skip presubmit checks',
                    default=False, dest="no_presubmit", action="store_true")
222 223 224
  result.add_option("--no-snap", "--nosnap",
                    help='Test a build compiled without snapshot.',
                    default=False, dest="no_snap", action="store_true")
225 226 227
  result.add_option("--no-sorting", "--nosorting",
                    help="Don't sort tests according to duration of last run.",
                    default=False, dest="no_sorting", action="store_true")
228 229 230
  result.add_option("--no-stress", "--nostress",
                    help="Don't run crankshaft --always-opt --stress-op test",
                    default=False, dest="no_stress", action="store_true")
231 232 233
  result.add_option("--no-variants", "--novariants",
                    help="Don't run any testing variants",
                    default=False, dest="no_variants", action="store_true")
234 235
  result.add_option("--variants",
                    help="Comma-separated list of testing variants")
236 237
  result.add_option("--outdir", help="Base directory with compile output",
                    default="out")
238 239 240
  result.add_option("--predictable",
                    help="Compare output of several reruns of each test",
                    default=False, action="store_true")
241 242 243 244
  result.add_option("-p", "--progress",
                    help=("The style of progress indicator"
                          " (verbose, dots, color, mono)"),
                    choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
245 246
  result.add_option("--quickcheck", default=False, action="store_true",
                    help=("Quick check mode (skip slow/flaky tests)"))
247 248
  result.add_option("--report", help="Print a summary of the tests to be run",
                    default=False, action="store_true")
249 250
  result.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
251 252 253 254 255 256 257
  result.add_option("--rerun-failures-count",
                    help=("Number of times to rerun each failing test case. "
                          "Very slow tests will be rerun only once."),
                    default=0, type="int")
  result.add_option("--rerun-failures-max",
                    help="Maximum number of failing test cases to rerun.",
                    default=100, type="int")
258 259 260 261 262 263 264 265 266
  result.add_option("--shard-count",
                    help="Split testsuites into this number of shards",
                    default=1, type="int")
  result.add_option("--shard-run",
                    help="Run this shard from the split up tests.",
                    default=1, type="int")
  result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
  result.add_option("--shell-dir", help="Directory containing executables",
                    default="")
267 268 269 270
  result.add_option("--dont-skip-slow-simulator-tests",
                    help="Don't skip more slow tests when using a simulator.",
                    default=False, action="store_true",
                    dest="dont_skip_simulator_slow_tests")
271 272 273 274 275 276 277
  result.add_option("--stress-only",
                    help="Only run tests with --always-opt --stress-opt",
                    default=False, action="store_true")
  result.add_option("--time", help="Print timing information after running",
                    default=False, action="store_true")
  result.add_option("-t", "--timeout", help="Timeout in seconds",
                    default= -1, type="int")
278 279 280
  result.add_option("--tsan",
                    help="Regard test expectations for TSAN",
                    default=False, action="store_true")
281 282 283 284 285 286
  result.add_option("-v", "--verbose", help="Verbose output",
                    default=False, action="store_true")
  result.add_option("--valgrind", help="Run tests through valgrind",
                    default=False, action="store_true")
  result.add_option("--warn-unused", help="Report unused rules",
                    default=False, action="store_true")
287 288 289 290
  result.add_option("--junitout", help="File name of the JUnit output")
  result.add_option("--junittestsuite",
                    help="The testsuite name in the JUnit output file",
                    default="v8tests")
291 292
  result.add_option("--random-seed", default=0, dest="random_seed",
                    help="Default seed for initializing random generator")
293 294 295
  result.add_option("--msan",
                    help="Regard test expectations for MSAN",
                    default=False, action="store_true")
296 297 298 299 300
  return result


def ProcessOptions(options):
  global VARIANT_FLAGS
301
  global VARIANTS
302 303 304

  # Architecture and mode related stuff.
  if options.arch_and_mode:
305 306 307 308
    options.arch_and_mode = [arch_and_mode.split(".")
        for arch_and_mode in options.arch_and_mode.split(",")]
    options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
    options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
309 310
  options.mode = options.mode.split(",")
  for mode in options.mode:
311
    if not mode.lower() in MODES:
312 313 314 315 316 317
      print "Unknown mode %s" % mode
      return False
  if options.arch in ["auto", "native"]:
    options.arch = ARCH_GUESS
  options.arch = options.arch.split(",")
  for arch in options.arch:
318
    if not arch in SUPPORTED_ARCHS:
319 320 321
      print "Unknown architecture %s" % arch
      return False

322 323 324 325 326
  # Store the final configuration in arch_and_mode list. Don't overwrite
  # predefined arch_and_mode since it is more expressive than arch and mode.
  if not options.arch_and_mode:
    options.arch_and_mode = itertools.product(options.arch, options.mode)

327 328 329 330 331 332 333 334 335 336
  # Special processing of other options, sorted alphabetically.

  if options.buildbot:
    # Buildbots run presubmit tests as a separate step.
    options.no_presubmit = True
    options.no_network = True
  if options.command_prefix:
    print("Specifying --command-prefix disables network distribution, "
          "running tests locally.")
    options.no_network = True
337
  options.command_prefix = shlex.split(options.command_prefix)
338
  options.extra_flags = shlex.split(options.extra_flags)
339 340 341 342

  if options.gc_stress:
    options.extra_flags += GC_STRESS_FLAGS

343
  if options.asan:
344
    options.extra_flags.append("--invoke-weak-callbacks")
345

346 347
  if options.tsan:
    VARIANTS = ["default"]
348 349 350 351 352
    suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                     'sanitizers', 'tsan_suppressions.txt')
    tsan_options = '%s suppressions=%s' % (
        os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
    os.environ['TSAN_OPTIONS'] = tsan_options
353

354 355
  if options.j == 0:
    options.j = multiprocessing.cpu_count()
356

357 358 359
  while options.random_seed == 0:
    options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)

360 361 362 363
  def excl(*args):
    """Returns true if zero or one of multiple arguments are true."""
    return reduce(lambda x, y: x + y, args) <= 1

364
  if not excl(options.no_stress, options.stress_only, options.no_variants,
365
              bool(options.variants)):
366
    print("Use only one of --no-stress, --stress-only, --no-variants, "
367
          "or --variants.")
368
    return False
369 370 371 372 373
  if options.quickcheck:
    VARIANTS = ["default", "stress"]
    options.flaky_tests = "skip"
    options.slow_tests = "skip"
    options.pass_fail_tests = "skip"
374
  if options.no_stress:
375
    VARIANTS = ["default", "nocrankshaft"]
376
  if options.no_variants:
377 378 379 380 381 382 383 384
    VARIANTS = ["default"]
  if options.stress_only:
    VARIANTS = ["stress"]
  if options.variants:
    VARIANTS = options.variants.split(",")
    if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
      print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
      return False
385 386 387 388 389
  if options.predictable:
    VARIANTS = ["default"]
    options.extra_flags.append("--predictable")
    options.extra_flags.append("--verify_predictable")
    options.extra_flags.append("--no-inline-new")
390

391 392 393 394 395 396 397
  if not options.shell_dir:
    if options.shell:
      print "Warning: --shell is deprecated, use --shell-dir instead."
      options.shell_dir = os.path.dirname(options.shell)
  if options.valgrind:
    run_valgrind = os.path.join("tools", "run-valgrind.py")
    # This is OK for distributed running, so we don't need to set no_network.
398
    options.command_prefix = (["python", "-u", run_valgrind] +
399
                              options.command_prefix)
400 401 402 403 404 405 406 407 408 409
  def CheckTestMode(name, option):
    if not option in ["run", "skip", "dontcare"]:
      print "Unknown %s mode %s" % (name, option)
      return False
    return True
  if not CheckTestMode("flaky test", options.flaky_tests):
    return False
  if not CheckTestMode("slow test", options.slow_tests):
    return False
  if not CheckTestMode("pass|fail test", options.pass_fail_tests):
410
    return False
411 412
  if not options.no_i18n:
    DEFAULT_TESTS.append("intl")
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
  return True


def ShardTests(tests, shard_count, shard_run):
  if shard_count < 2:
    return tests
  if shard_run < 1 or shard_run > shard_count:
    print "shard-run not a valid number, should be in [1:shard-count]"
    print "defaulting back to running all tests"
    return tests
  count = 0
  shard = []
  for test in tests:
    if count % shard_count == shard_run - 1:
      shard.append(test)
    count += 1
  return shard


def Main():
  parser = BuildOptions()
  (options, args) = parser.parse_args()
  if not ProcessOptions(options):
    parser.print_help()
    return 1

  exit_code = 0
  workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
  if not options.no_presubmit:
    print ">>> running presubmit tests"
443
    exit_code = subprocess.call(
444 445 446 447
        [sys.executable, join(workspace, "tools", "presubmit.py")])

  suite_paths = utils.GetSuitePaths(join(workspace, "test"))

448 449 450 451 452 453 454 455 456 457 458
  # Expand arguments with grouped tests. The args should reflect the list of
  # suites as otherwise filters would break.
  def ExpandTestGroups(name):
    if name in TEST_MAP:
      return [suite for suite in TEST_MAP[arg]]
    else:
      return [name]
  args = reduce(lambda x, y: x + y,
         [ExpandTestGroups(arg) for arg in args],
         [])

459
  if len(args) == 0:
460
    suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
461
  else:
462
    args_suites = OrderedDict() # Used as set
463
    for arg in args:
464
      args_suites[arg.split(os.path.sep)[0]] = True
465
    suite_paths = [ s for s in args_suites if s in suite_paths ]
466 467 468 469 470 471 472 473 474 475 476 477

  suites = []
  for root in suite_paths:
    suite = testsuite.TestSuite.LoadTestSuite(
        os.path.join(workspace, "test", root))
    if suite:
      suites.append(suite)

  if options.download_data:
    for s in suites:
      s.DownloadData()

478
  for (arch, mode) in options.arch_and_mode:
479 480 481 482
    try:
      code = Execute(arch, mode, args, options, suites, workspace)
    except KeyboardInterrupt:
      return 2
483
    exit_code = exit_code or code
484 485 486 487 488 489 490 491 492
  return exit_code


def Execute(arch, mode, args, options, suites, workspace):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
493 494
      # TODO(machenbach): Get rid of different output folder location on
      # buildbot. Currently this is capitalized Release and Debug.
495 496 497
      shell_dir = os.path.join(workspace, options.outdir, mode)
      mode = mode.lower()
    else:
498 499 500 501 502
      shell_dir = os.path.join(
          workspace,
          options.outdir,
          "%s.%s" % (arch, MODES[mode]["output_folder"]),
      )
503 504 505
  shell_dir = os.path.relpath(shell_dir)

  # Populate context object.
506
  mode_flags = MODES[mode]["flags"]
507 508 509
  timeout = options.timeout
  if timeout == -1:
    # Simulators are slow, therefore allow a longer default timeout.
510
    if arch in SLOW_ARCHS:
511
      timeout = 2 * TIMEOUT_DEFAULT;
512 513 514
    else:
      timeout = TIMEOUT_DEFAULT;

515
  timeout *= MODES[mode]["timeout_scalefactor"]
516 517 518 519 520

  if options.predictable:
    # Predictable mode is slower.
    timeout *= 2

521
  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
522
                        mode_flags, options.verbose,
523
                        timeout, options.isolates,
524
                        options.command_prefix,
525
                        options.extra_flags,
526
                        options.no_i18n,
527
                        options.random_seed,
528 529
                        options.no_sorting,
                        options.rerun_failures_count,
530
                        options.rerun_failures_max,
531 532
                        options.predictable,
                        options.no_harness)
533

534 535
  # TODO(all): Combine "simulator" and "simulator_run".
  simulator_run = not options.dont_skip_simulator_slow_tests and \
536 537
      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
               'ppc', 'ppc64'] and \
538
      ARCH_GUESS and arch != ARCH_GUESS
539 540 541
  # Find available test suites and read test cases from them.
  variables = {
    "arch": arch,
542
    "asan": options.asan,
543
    "deopt_fuzzer": False,
544 545
    "gc_stress": options.gc_stress,
    "isolates": options.isolates,
546
    "mode": MODES[mode]["status_mode"],
547
    "no_i18n": options.no_i18n,
548
    "no_snap": options.no_snap,
549
    "simulator_run": simulator_run,
550
    "simulator": utils.UseSimulator(arch),
551
    "system": utils.GuessOS(),
552
    "tsan": options.tsan,
553
    "msan": options.msan,
554
    "dcheck_always_on": options.dcheck_always_on,
555
    "byteorder": sys.byteorder,
556 557 558 559 560 561 562 563 564
  }
  all_tests = []
  num_tests = 0
  test_id = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
565
    all_tests += s.tests
566 567
    s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                              options.slow_tests, options.pass_fail_tests)
568 569 570
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
571
    variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
572 573
    s.tests = [ t.CopyAddingFlags(v)
                for t in s.tests
574
                for v in s.VariantFlags(t, variant_flags) ]
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
    num_tests += len(s.tests)
    for t in s.tests:
      t.id = test_id
      test_id += 1

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  if num_tests == 0:
    print "No tests to run."
    return 0

  # Run the tests, either locally or distributed on the network.
592 593 594 595 596
  start_time = time.time()
  progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
  if options.junitout:
    progress_indicator = progress.JUnitTestProgressIndicator(
        progress_indicator, options.junitout, options.junittestsuite)
597 598
  if options.json_test_results:
    progress_indicator = progress.JsonTestProgressIndicator(
599 600
        progress_indicator, options.json_test_results, arch,
        MODES[mode]["execution_mode"])
601 602 603 604 605 606 607 608 609 610 611 612

  run_networked = not options.no_network
  if not run_networked:
    print("Network distribution disabled, running tests locally.")
  elif utils.GuessOS() != "linux":
    print("Network distribution is only supported on Linux, sorry!")
    run_networked = False
  peers = []
  if run_networked:
    peers = network_execution.GetPeers()
    if not peers:
      print("No connection to distribution server; running tests locally.")
613
      run_networked = False
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
    elif len(peers) == 1:
      print("No other peers on the network; running tests locally.")
      run_networked = False
    elif num_tests <= 100:
      print("Less than 100 tests, running them locally.")
      run_networked = False

  if run_networked:
    runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                               ctx, peers, workspace)
  else:
    runner = execution.Runner(suites, progress_indicator, ctx)

  exit_code = runner.Run(options.j)
  overall_duration = time.time() - start_time
629 630 631 632 633 634 635 636

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
  return exit_code


if __name__ == "__main__":
  sys.exit(Main())