run-tests.py 25.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
#       copyright notice, this list of conditions and the following
#       disclaimer in the documentation and/or other materials provided
#       with the distribution.
#     * Neither the name of Google Inc. nor the names of its
#       contributors may be used to endorse or promote products derived
#       from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


31
from collections import OrderedDict
32
import itertools
33 34 35 36
import multiprocessing
import optparse
import os
from os.path import join
37
import platform
38
import random
39
import shlex
40 41 42 43 44 45 46
import subprocess
import sys
import time

from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
47
from testrunner.local.testsuite import VARIANT_FLAGS
48 49 50 51 52 53 54
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context


ARCH_GUESS = utils.DefaultArch()
55 56 57 58 59 60 61
DEFAULT_TESTS = [
  "mjsunit",
  "unittests",
  "cctest",
  "message",
  "preparser",
]
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
  "default": [
    "mjsunit",
    "cctest",
    "message",
    "preparser",
  ],
  "optimize_for_size": [
    "mjsunit",
    "cctest",
    "webkit",
  ],
  "unittests": [
79
    "unittests",
80 81 82
  ],
}

83 84
TIMEOUT_DEFAULT = 60

85
VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
               "--nofold-constants", "--enable-slow-asserts",
               "--debug-code", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
                 "--nofold-constants"]

MODES = {
  "debug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "debug",
  },
  "optdebug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "optdebug",
  },
  "release": {
    "flags": RELEASE_FLAGS,
    "timeout_scalefactor": 1,
    "status_mode": "release",
    "execution_mode": "release",
    "output_folder": "release",
  },
115 116 117
  # Normal trybot release configuration. There, dchecks are always on which
  # implies debug is set. Hence, the status file needs to assume debug-like
  # behavior/timeouts.
118
  "tryrelease": {
119 120 121 122 123 124 125 126
    "flags": RELEASE_FLAGS,
    "timeout_scalefactor": 1,
    "status_mode": "debug",
    "execution_mode": "release",
    "output_folder": "release",
  },
  # This mode requires v8 to be compiled with dchecks and slow dchecks.
  "slowrelease": {
127 128 129 130 131 132 133
    "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
    "timeout_scalefactor": 2,
    "status_mode": "debug",
    "execution_mode": "release",
    "output_folder": "release",
  },
}
134

135 136 137 138 139
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
                   "--concurrent-recompilation-queue-length=64",
                   "--concurrent-recompilation-delay=500",
                   "--concurrent-recompilation"]

140
SUPPORTED_ARCHS = ["android_arm",
141
                   "android_arm64",
142
                   "android_ia32",
143
                   "android_x64",
144 145
                   "arm",
                   "ia32",
danno@chromium.org's avatar
danno@chromium.org committed
146
                   "x87",
147
                   "mips",
148
                   "mipsel",
149
                   "mips64el",
150 151
                   "nacl_ia32",
                   "nacl_x64",
152 153
                   "ppc",
                   "ppc64",
154
                   "x64",
155
                   "x32",
156
                   "arm64"]
157 158
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
159
              "android_arm64",
160
              "android_ia32",
161
              "android_x64",
162
              "arm",
163
              "mips",
164
              "mipsel",
165
              "mips64el",
166
              "nacl_ia32",
167
              "nacl_x64",
danno@chromium.org's avatar
danno@chromium.org committed
168
              "x87",
169
              "arm64"]
170

171 172 173 174 175 176 177 178 179 180

def BuildOptions():
  result = optparse.OptionParser()
  result.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="ia32,x64,arm")
  result.add_option("--arch-and-mode",
                    help="Architecture and mode in the format 'arch.mode'",
                    default=None)
181 182 183
  result.add_option("--asan",
                    help="Regard test expectations for ASAN",
                    default=False, action="store_true")
184 185 186
  result.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
187 188 189
  result.add_option("--dcheck-always-on",
                    help="Indicates that V8 was compiled with DCHECKs enabled",
                    default=False, action="store_true")
190 191 192
  result.add_option("--novfp3",
                    help="Indicates that V8 was compiled without VFP3 support",
                    default=False, action="store_true")
193 194
  result.add_option("--cat", help="Print the source of the tests",
                    default=False, action="store_true")
195 196 197
  result.add_option("--flaky-tests",
                    help="Regard tests marked as flaky (run|skip|dontcare)",
                    default="dontcare")
198 199 200 201 202 203
  result.add_option("--slow-tests",
                    help="Regard slow tests (run|skip|dontcare)",
                    default="dontcare")
  result.add_option("--pass-fail-tests",
                    help="Regard pass|fail tests (run|skip|dontcare)",
                    default="dontcare")
204 205 206
  result.add_option("--gc-stress",
                    help="Switch on GC stress mode",
                    default=False, action="store_true")
207 208 209 210 211
  result.add_option("--command-prefix",
                    help="Prepended to each shell command used to run a test",
                    default="")
  result.add_option("--download-data", help="Download missing test suite data",
                    default=False, action="store_true")
212 213 214
  result.add_option("--download-data-only",
                    help="Download missing test suite data and exit",
                    default=False, action="store_true")
215 216 217 218 219 220 221 222 223 224
  result.add_option("--extra-flags",
                    help="Additional flags to pass to each test command",
                    default="")
  result.add_option("--isolates", help="Whether to test isolates",
                    default=False, action="store_true")
  result.add_option("-j", help="The number of parallel tasks to run",
                    default=0, type="int")
  result.add_option("-m", "--mode",
                    help="The test modes in which to run (comma-separated)",
                    default="release,debug")
225 226 227
  result.add_option("--no-harness", "--noharness",
                    help="Run without test harness of a given suite",
                    default=False, action="store_true")
228 229 230
  result.add_option("--no-i18n", "--noi18n",
                    help="Skip internationalization tests",
                    default=False, action="store_true")
231 232 233 234 235 236 237
  result.add_option("--no-network", "--nonetwork",
                    help="Don't distribute tests on the network",
                    default=(utils.GuessOS() != "linux"),
                    dest="no_network", action="store_true")
  result.add_option("--no-presubmit", "--nopresubmit",
                    help='Skip presubmit checks',
                    default=False, dest="no_presubmit", action="store_true")
238 239 240
  result.add_option("--no-snap", "--nosnap",
                    help='Test a build compiled without snapshot.',
                    default=False, dest="no_snap", action="store_true")
241 242 243
  result.add_option("--no-sorting", "--nosorting",
                    help="Don't sort tests according to duration of last run.",
                    default=False, dest="no_sorting", action="store_true")
244 245 246
  result.add_option("--no-stress", "--nostress",
                    help="Don't run crankshaft --always-opt --stress-op test",
                    default=False, dest="no_stress", action="store_true")
247 248 249
  result.add_option("--no-variants", "--novariants",
                    help="Don't run any testing variants",
                    default=False, dest="no_variants", action="store_true")
250 251
  result.add_option("--variants",
                    help="Comma-separated list of testing variants")
252 253
  result.add_option("--outdir", help="Base directory with compile output",
                    default="out")
254 255 256
  result.add_option("--predictable",
                    help="Compare output of several reruns of each test",
                    default=False, action="store_true")
257 258 259 260
  result.add_option("-p", "--progress",
                    help=("The style of progress indicator"
                          " (verbose, dots, color, mono)"),
                    choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
261 262
  result.add_option("--quickcheck", default=False, action="store_true",
                    help=("Quick check mode (skip slow/flaky tests)"))
263 264
  result.add_option("--report", help="Print a summary of the tests to be run",
                    default=False, action="store_true")
265 266
  result.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
267 268 269 270 271 272 273
  result.add_option("--rerun-failures-count",
                    help=("Number of times to rerun each failing test case. "
                          "Very slow tests will be rerun only once."),
                    default=0, type="int")
  result.add_option("--rerun-failures-max",
                    help="Maximum number of failing test cases to rerun.",
                    default=100, type="int")
274 275 276 277 278 279 280 281 282
  result.add_option("--shard-count",
                    help="Split testsuites into this number of shards",
                    default=1, type="int")
  result.add_option("--shard-run",
                    help="Run this shard from the split up tests.",
                    default=1, type="int")
  result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
  result.add_option("--shell-dir", help="Directory containing executables",
                    default="")
283 284 285 286
  result.add_option("--dont-skip-slow-simulator-tests",
                    help="Don't skip more slow tests when using a simulator.",
                    default=False, action="store_true",
                    dest="dont_skip_simulator_slow_tests")
287 288 289 290 291 292 293
  result.add_option("--stress-only",
                    help="Only run tests with --always-opt --stress-opt",
                    default=False, action="store_true")
  result.add_option("--time", help="Print timing information after running",
                    default=False, action="store_true")
  result.add_option("-t", "--timeout", help="Timeout in seconds",
                    default= -1, type="int")
294 295 296
  result.add_option("--tsan",
                    help="Regard test expectations for TSAN",
                    default=False, action="store_true")
297 298 299 300 301 302
  result.add_option("-v", "--verbose", help="Verbose output",
                    default=False, action="store_true")
  result.add_option("--valgrind", help="Run tests through valgrind",
                    default=False, action="store_true")
  result.add_option("--warn-unused", help="Report unused rules",
                    default=False, action="store_true")
303 304 305 306
  result.add_option("--junitout", help="File name of the JUnit output")
  result.add_option("--junittestsuite",
                    help="The testsuite name in the JUnit output file",
                    default="v8tests")
307
  result.add_option("--random-seed", default=0, dest="random_seed", type="int",
308
                    help="Default seed for initializing random generator")
309 310 311
  result.add_option("--random-seed-stress-count", default=1, type="int",
                    dest="random_seed_stress_count",
                    help="Number of runs with different random seeds")
312 313 314
  result.add_option("--msan",
                    help="Regard test expectations for MSAN",
                    default=False, action="store_true")
315 316 317
  return result


318 319 320 321 322 323 324
def RandomSeed():
  seed = 0
  while not seed:
    seed = random.SystemRandom().randint(-2147483648, 2147483647)
  return seed


325 326 327 328 329 330 331 332 333
def BuildbotToV8Mode(config):
  """Convert buildbot build configs to configs understood by the v8 runner.

  V8 configs are always lower case and without the additional _x64 suffix for
  64 bit builds on windows with ninja.
  """
  mode = config[:-4] if config.endswith('_x64') else config
  return mode.lower()

334 335
def ProcessOptions(options):
  global VARIANT_FLAGS
336
  global VARIANTS
337 338 339

  # Architecture and mode related stuff.
  if options.arch_and_mode:
340 341 342 343
    options.arch_and_mode = [arch_and_mode.split(".")
        for arch_and_mode in options.arch_and_mode.split(",")]
    options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
    options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
344 345
  options.mode = options.mode.split(",")
  for mode in options.mode:
346
    if not BuildbotToV8Mode(mode) in MODES:
347 348 349 350 351 352
      print "Unknown mode %s" % mode
      return False
  if options.arch in ["auto", "native"]:
    options.arch = ARCH_GUESS
  options.arch = options.arch.split(",")
  for arch in options.arch:
353
    if not arch in SUPPORTED_ARCHS:
354 355 356
      print "Unknown architecture %s" % arch
      return False

357 358 359 360 361
  # Store the final configuration in arch_and_mode list. Don't overwrite
  # predefined arch_and_mode since it is more expressive than arch and mode.
  if not options.arch_and_mode:
    options.arch_and_mode = itertools.product(options.arch, options.mode)

362 363 364 365 366 367
  # Special processing of other options, sorted alphabetically.

  if options.buildbot:
    # Buildbots run presubmit tests as a separate step.
    options.no_presubmit = True
    options.no_network = True
368 369
  if options.download_data_only:
    options.no_presubmit = True
370 371 372 373
  if options.command_prefix:
    print("Specifying --command-prefix disables network distribution, "
          "running tests locally.")
    options.no_network = True
374
  options.command_prefix = shlex.split(options.command_prefix)
375
  options.extra_flags = shlex.split(options.extra_flags)
376 377 378 379

  if options.gc_stress:
    options.extra_flags += GC_STRESS_FLAGS

380
  if options.asan:
381
    options.extra_flags.append("--invoke-weak-callbacks")
382
    options.extra_flags.append("--omit-quit")
383

384 385 386
  if options.novfp3:
    options.extra_flags.append("--noenable-vfp3")

387 388 389
  if options.msan:
    VARIANTS = ["default"]

390 391
  if options.tsan:
    VARIANTS = ["default"]
392 393 394 395 396
    suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                     'sanitizers', 'tsan_suppressions.txt')
    tsan_options = '%s suppressions=%s' % (
        os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
    os.environ['TSAN_OPTIONS'] = tsan_options
397

398 399
  if options.j == 0:
    options.j = multiprocessing.cpu_count()
400

401 402
  if options.random_seed_stress_count <= 1 and options.random_seed == 0:
    options.random_seed = RandomSeed()
403

404 405 406 407
  def excl(*args):
    """Returns true if zero or one of multiple arguments are true."""
    return reduce(lambda x, y: x + y, args) <= 1

408
  if not excl(options.no_stress, options.stress_only, options.no_variants,
409
              bool(options.variants)):
410
    print("Use only one of --no-stress, --stress-only, --no-variants, "
411
          "or --variants.")
412
    return False
413 414 415 416 417
  if options.quickcheck:
    VARIANTS = ["default", "stress"]
    options.flaky_tests = "skip"
    options.slow_tests = "skip"
    options.pass_fail_tests = "skip"
418
  if options.no_stress:
419
    VARIANTS = ["default", "nocrankshaft"]
420
  if options.no_variants:
421 422 423 424 425 426 427 428
    VARIANTS = ["default"]
  if options.stress_only:
    VARIANTS = ["stress"]
  if options.variants:
    VARIANTS = options.variants.split(",")
    if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
      print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
      return False
429 430 431 432 433
  if options.predictable:
    VARIANTS = ["default"]
    options.extra_flags.append("--predictable")
    options.extra_flags.append("--verify_predictable")
    options.extra_flags.append("--no-inline-new")
434

435 436 437 438 439 440 441
  if not options.shell_dir:
    if options.shell:
      print "Warning: --shell is deprecated, use --shell-dir instead."
      options.shell_dir = os.path.dirname(options.shell)
  if options.valgrind:
    run_valgrind = os.path.join("tools", "run-valgrind.py")
    # This is OK for distributed running, so we don't need to set no_network.
442
    options.command_prefix = (["python", "-u", run_valgrind] +
443
                              options.command_prefix)
444 445 446 447 448 449 450 451 452 453
  def CheckTestMode(name, option):
    if not option in ["run", "skip", "dontcare"]:
      print "Unknown %s mode %s" % (name, option)
      return False
    return True
  if not CheckTestMode("flaky test", options.flaky_tests):
    return False
  if not CheckTestMode("slow test", options.slow_tests):
    return False
  if not CheckTestMode("pass|fail test", options.pass_fail_tests):
454
    return False
455 456
  if not options.no_i18n:
    DEFAULT_TESTS.append("intl")
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
  return True


def ShardTests(tests, shard_count, shard_run):
  if shard_count < 2:
    return tests
  if shard_run < 1 or shard_run > shard_count:
    print "shard-run not a valid number, should be in [1:shard-count]"
    print "defaulting back to running all tests"
    return tests
  count = 0
  shard = []
  for test in tests:
    if count % shard_count == shard_run - 1:
      shard.append(test)
    count += 1
  return shard


def Main():
  parser = BuildOptions()
  (options, args) = parser.parse_args()
  if not ProcessOptions(options):
    parser.print_help()
    return 1

  exit_code = 0
  workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
  if not options.no_presubmit:
    print ">>> running presubmit tests"
487
    exit_code = subprocess.call(
488 489 490 491
        [sys.executable, join(workspace, "tools", "presubmit.py")])

  suite_paths = utils.GetSuitePaths(join(workspace, "test"))

492 493 494 495 496 497 498 499 500 501 502
  # Expand arguments with grouped tests. The args should reflect the list of
  # suites as otherwise filters would break.
  def ExpandTestGroups(name):
    if name in TEST_MAP:
      return [suite for suite in TEST_MAP[arg]]
    else:
      return [name]
  args = reduce(lambda x, y: x + y,
         [ExpandTestGroups(arg) for arg in args],
         [])

503
  if len(args) == 0:
504
    suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
505
  else:
506
    args_suites = OrderedDict() # Used as set
507
    for arg in args:
508
      args_suites[arg.split(os.path.sep)[0]] = True
509
    suite_paths = [ s for s in args_suites if s in suite_paths ]
510 511 512 513 514 515 516 517

  suites = []
  for root in suite_paths:
    suite = testsuite.TestSuite.LoadTestSuite(
        os.path.join(workspace, "test", root))
    if suite:
      suites.append(suite)

518
  if options.download_data or options.download_data_only:
519 520 521
    for s in suites:
      s.DownloadData()

522 523 524
  if options.download_data_only:
    return exit_code

525
  for (arch, mode) in options.arch_and_mode:
526 527 528 529
    try:
      code = Execute(arch, mode, args, options, suites, workspace)
    except KeyboardInterrupt:
      return 2
530
    exit_code = exit_code or code
531 532 533 534 535 536 537 538 539
  return exit_code


def Execute(arch, mode, args, options, suites, workspace):
  print(">>> Running tests for %s.%s" % (arch, mode))

  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
540 541
      # TODO(machenbach): Get rid of different output folder location on
      # buildbot. Currently this is capitalized Release and Debug.
542
      shell_dir = os.path.join(workspace, options.outdir, mode)
543
      mode = BuildbotToV8Mode(mode)
544
    else:
545 546 547 548 549
      shell_dir = os.path.join(
          workspace,
          options.outdir,
          "%s.%s" % (arch, MODES[mode]["output_folder"]),
      )
550 551 552
  shell_dir = os.path.relpath(shell_dir)

  # Populate context object.
553
  mode_flags = MODES[mode]["flags"]
554 555 556
  timeout = options.timeout
  if timeout == -1:
    # Simulators are slow, therefore allow a longer default timeout.
557
    if arch in SLOW_ARCHS:
558
      timeout = 2 * TIMEOUT_DEFAULT;
559 560 561
    else:
      timeout = TIMEOUT_DEFAULT;

562
  timeout *= MODES[mode]["timeout_scalefactor"]
563 564 565 566 567

  if options.predictable:
    # Predictable mode is slower.
    timeout *= 2

568 569 570 571 572 573
  # TODO(machenbach): Remove temporary verbose output on windows after
  # debugging driver-hung-up on XP.
  verbose_output = (
      options.verbose or
      utils.IsWindows() and options.progress == "verbose"
  )
574
  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
575
                        mode_flags, verbose_output,
576
                        timeout, options.isolates,
577
                        options.command_prefix,
578
                        options.extra_flags,
579
                        options.no_i18n,
580
                        options.random_seed,
581 582
                        options.no_sorting,
                        options.rerun_failures_count,
583
                        options.rerun_failures_max,
584 585
                        options.predictable,
                        options.no_harness)
586

587 588
  # TODO(all): Combine "simulator" and "simulator_run".
  simulator_run = not options.dont_skip_simulator_slow_tests and \
589 590
      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
               'ppc', 'ppc64'] and \
591
      ARCH_GUESS and arch != ARCH_GUESS
592 593 594
  # Find available test suites and read test cases from them.
  variables = {
    "arch": arch,
595
    "asan": options.asan,
596
    "deopt_fuzzer": False,
597 598
    "gc_stress": options.gc_stress,
    "isolates": options.isolates,
599
    "mode": MODES[mode]["status_mode"],
600
    "no_i18n": options.no_i18n,
601
    "no_snap": options.no_snap,
602
    "simulator_run": simulator_run,
603
    "simulator": utils.UseSimulator(arch),
604
    "system": utils.GuessOS(),
605
    "tsan": options.tsan,
606
    "msan": options.msan,
607
    "dcheck_always_on": options.dcheck_always_on,
608
    "novfp3": options.novfp3,
609
    "byteorder": sys.byteorder,
610 611 612 613 614 615 616 617
  }
  all_tests = []
  num_tests = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
618
    all_tests += s.tests
619 620
    s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
                              options.slow_tests, options.pass_fail_tests)
621 622 623
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
624
    variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
    variant_tests = [ t.CopyAddingFlags(v)
                      for t in s.tests
                      for v in s.VariantFlags(t, variant_flags) ]

    if options.random_seed_stress_count > 1:
      # Duplicate test for random seed stress mode.
      def iter_seed_flags():
        for i in range(0, options.random_seed_stress_count):
          # Use given random seed for all runs (set by default in execution.py)
          # or a new random seed if none is specified.
          if options.random_seed:
            yield []
          else:
            yield ["--random-seed=%d" % RandomSeed()]
      s.tests = [
        t.CopyAddingFlags(v)
        for t in variant_tests
        for v in iter_seed_flags()
      ]
    else:
      s.tests = variant_tests

647 648 649 650 651 652 653 654 655 656
    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
    num_tests += len(s.tests)

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  # Run the tests, either locally or distributed on the network.
657
  start_time = time.time()
658 659
  progress_indicator = progress.IndicatorNotifier()
  progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
660
  if options.junitout:
661 662
    progress_indicator.Register(progress.JUnitTestProgressIndicator(
        options.junitout, options.junittestsuite))
663
  if options.json_test_results:
664 665
    progress_indicator.Register(progress.JsonTestProgressIndicator(
        options.json_test_results, arch, MODES[mode]["execution_mode"]))
666 667 668 669 670 671 672 673 674 675 676 677

  run_networked = not options.no_network
  if not run_networked:
    print("Network distribution disabled, running tests locally.")
  elif utils.GuessOS() != "linux":
    print("Network distribution is only supported on Linux, sorry!")
    run_networked = False
  peers = []
  if run_networked:
    peers = network_execution.GetPeers()
    if not peers:
      print("No connection to distribution server; running tests locally.")
678
      run_networked = False
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
    elif len(peers) == 1:
      print("No other peers on the network; running tests locally.")
      run_networked = False
    elif num_tests <= 100:
      print("Less than 100 tests, running them locally.")
      run_networked = False

  if run_networked:
    runner = network_execution.NetworkedRunner(suites, progress_indicator,
                                               ctx, peers, workspace)
  else:
    runner = execution.Runner(suites, progress_indicator, ctx)

  exit_code = runner.Run(options.j)
  overall_duration = time.time() - start_time
694 695 696 697 698 699 700 701

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
  return exit_code


if __name__ == "__main__":
  sys.exit(Main())