run-tests.py 30.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
#       copyright notice, this list of conditions and the following
#       disclaimer in the documentation and/or other materials provided
#       with the distribution.
#     * Neither the name of Google Inc. nor the names of its
#       contributors may be used to endorse or promote products derived
#       from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


31
from collections import OrderedDict
32
import itertools
33 34 35 36
import multiprocessing
import optparse
import os
from os.path import join
37
import platform
38
import random
39
import shlex
40 41 42 43 44 45 46
import subprocess
import sys
import time

from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
47
from testrunner.local.testsuite import ALL_VARIANTS
48 49 50 51 52 53
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context


54 55 56
# Base dir of the v8 checkout to be used as cwd.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

57
ARCH_GUESS = utils.DefaultArch()
58 59 60 61 62

# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
63
  # This needs to stay in sync with test/bot_default.isolate.
64 65 66 67
  "bot_default": [
    "mjsunit",
    "cctest",
    "webkit",
68
    "fuzzer",
69 70 71 72 73
    "message",
    "preparser",
    "intl",
    "unittests",
  ],
74
  # This needs to stay in sync with test/default.isolate.
75 76 77
  "default": [
    "mjsunit",
    "cctest",
78
    "fuzzer",
79 80
    "message",
    "preparser",
81
    "intl",
82
    "unittests",
83
  ],
84
  # This needs to stay in sync with test/ignition.isolate.
85 86
  "ignition": [
    "mjsunit",
87
    "cctest",
88
    "webkit",
89
    "message",
90
  ],
91
  # This needs to stay in sync with test/optimize_for_size.isolate.
92 93 94 95
  "optimize_for_size": [
    "mjsunit",
    "cctest",
    "webkit",
96
    "intl",
97 98
  ],
  "unittests": [
99
    "unittests",
100 101 102
  ],
}

103 104
TIMEOUT_DEFAULT = 60

105
VARIANTS = ["default", "stress", "turbofan"]
106

107
EXHAUSTIVE_VARIANTS = VARIANTS + [
108
  "ignition",
109 110
  "nocrankshaft",
  "turbofan_opt",
111 112
]

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
               "--nofold-constants", "--enable-slow-asserts",
               "--debug-code", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
                 "--nofold-constants"]

MODES = {
  "debug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "debug",
  },
  "optdebug": {
    "flags": DEBUG_FLAGS,
    "timeout_scalefactor": 4,
    "status_mode": "debug",
    "execution_mode": "debug",
    "output_folder": "optdebug",
  },
  "release": {
    "flags": RELEASE_FLAGS,
    "timeout_scalefactor": 1,
    "status_mode": "release",
    "execution_mode": "release",
    "output_folder": "release",
  },
141 142 143
  # Normal trybot release configuration. There, dchecks are always on which
  # implies debug is set. Hence, the status file needs to assume debug-like
  # behavior/timeouts.
144
  "tryrelease": {
145 146 147 148 149 150 151 152
    "flags": RELEASE_FLAGS,
    "timeout_scalefactor": 1,
    "status_mode": "debug",
    "execution_mode": "release",
    "output_folder": "release",
  },
  # This mode requires v8 to be compiled with dchecks and slow dchecks.
  "slowrelease": {
153 154 155 156 157 158 159
    "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
    "timeout_scalefactor": 2,
    "status_mode": "debug",
    "execution_mode": "release",
    "output_folder": "release",
  },
}
160

161 162 163 164 165
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
                   "--concurrent-recompilation-queue-length=64",
                   "--concurrent-recompilation-delay=500",
                   "--concurrent-recompilation"]

166
SUPPORTED_ARCHS = ["android_arm",
167
                   "android_arm64",
168
                   "android_ia32",
169
                   "android_x64",
170 171
                   "arm",
                   "ia32",
danno@chromium.org's avatar
danno@chromium.org committed
172
                   "x87",
173
                   "mips",
174
                   "mipsel",
175
                   "mips64",
176
                   "mips64el",
177 178
                   "nacl_ia32",
                   "nacl_x64",
179 180
                   "s390",
                   "s390x",
181 182
                   "ppc",
                   "ppc64",
183
                   "x64",
184
                   "x32",
185
                   "arm64"]
186 187
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
188
              "android_arm64",
189
              "android_ia32",
190
              "android_x64",
191
              "arm",
192
              "mips",
193
              "mipsel",
194
              "mips64",
195
              "mips64el",
196
              "nacl_ia32",
197
              "nacl_x64",
198 199
              "s390",
              "s390x",
danno@chromium.org's avatar
danno@chromium.org committed
200
              "x87",
201
              "arm64"]
202

203 204 205

def BuildOptions():
  result = optparse.OptionParser()
206
  result.usage = '%prog [options] [tests]'
207
  result.description = """TESTS: %s""" % (TEST_MAP["default"])
208 209
  result.add_option("--arch",
                    help=("The architecture to run tests for, "
210
                          "'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS),
211 212 213 214
                    default="ia32,x64,arm")
  result.add_option("--arch-and-mode",
                    help="Architecture and mode in the format 'arch.mode'",
                    default=None)
215 216 217
  result.add_option("--asan",
                    help="Regard test expectations for ASAN",
                    default=False, action="store_true")
218 219
  result.add_option("--sancov-dir",
                    help="Directory where to collect coverage data")
220 221 222
  result.add_option("--cfi-vptr",
                    help="Run tests with UBSAN cfi_vptr option.",
                    default=False, action="store_true")
223 224 225
  result.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
226 227 228
  result.add_option("--dcheck-always-on",
                    help="Indicates that V8 was compiled with DCHECKs enabled",
                    default=False, action="store_true")
229 230 231
  result.add_option("--novfp3",
                    help="Indicates that V8 was compiled without VFP3 support",
                    default=False, action="store_true")
232 233
  result.add_option("--cat", help="Print the source of the tests",
                    default=False, action="store_true")
234 235 236 237 238 239
  result.add_option("--slow-tests",
                    help="Regard slow tests (run|skip|dontcare)",
                    default="dontcare")
  result.add_option("--pass-fail-tests",
                    help="Regard pass|fail tests (run|skip|dontcare)",
                    default="dontcare")
240 241 242
  result.add_option("--gc-stress",
                    help="Switch on GC stress mode",
                    default=False, action="store_true")
243 244 245
  result.add_option("--gcov-coverage",
                    help="Uses executables instrumented for gcov coverage",
                    default=False, action="store_true")
246 247 248 249 250
  result.add_option("--command-prefix",
                    help="Prepended to each shell command used to run a test",
                    default="")
  result.add_option("--download-data", help="Download missing test suite data",
                    default=False, action="store_true")
251 252 253
  result.add_option("--download-data-only",
                    help="Download missing test suite data and exit",
                    default=False, action="store_true")
254 255 256
  result.add_option("--extra-flags",
                    help="Additional flags to pass to each test command",
                    default="")
257 258
  result.add_option("--ignition", help="Skip tests which don't run in ignition",
                    default=False, action="store_true")
259 260 261
  result.add_option("--ignition-turbofan",
                    help="Skip tests which don't run in ignition_turbofan",
                    default=False, action="store_true")
262 263 264 265 266
  result.add_option("--isolates", help="Whether to test isolates",
                    default=False, action="store_true")
  result.add_option("-j", help="The number of parallel tasks to run",
                    default=0, type="int")
  result.add_option("-m", "--mode",
267 268
                    help="The test modes in which to run (comma-separated,"
                    " uppercase for ninja and buildbot builds): %s" % MODES.keys(),
269
                    default="release,debug")
270 271 272
  result.add_option("--no-harness", "--noharness",
                    help="Run without test harness of a given suite",
                    default=False, action="store_true")
273 274 275
  result.add_option("--no-i18n", "--noi18n",
                    help="Skip internationalization tests",
                    default=False, action="store_true")
276 277 278 279 280 281 282
  result.add_option("--no-network", "--nonetwork",
                    help="Don't distribute tests on the network",
                    default=(utils.GuessOS() != "linux"),
                    dest="no_network", action="store_true")
  result.add_option("--no-presubmit", "--nopresubmit",
                    help='Skip presubmit checks',
                    default=False, dest="no_presubmit", action="store_true")
283 284 285
  result.add_option("--no-snap", "--nosnap",
                    help='Test a build compiled without snapshot.',
                    default=False, dest="no_snap", action="store_true")
286 287 288
  result.add_option("--no-sorting", "--nosorting",
                    help="Don't sort tests according to duration of last run.",
                    default=False, dest="no_sorting", action="store_true")
289 290 291
  result.add_option("--no-stress", "--nostress",
                    help="Don't run crankshaft --always-opt --stress-op test",
                    default=False, dest="no_stress", action="store_true")
292 293 294
  result.add_option("--no-variants", "--novariants",
                    help="Don't run any testing variants",
                    default=False, dest="no_variants", action="store_true")
295
  result.add_option("--variants",
296
                    help="Comma-separated list of testing variants: %s" % VARIANTS)
297 298 299
  result.add_option("--exhaustive-variants",
                    default=False, action="store_true",
                    help="Use exhaustive set of default variants.")
300 301
  result.add_option("--outdir", help="Base directory with compile output",
                    default="out")
302 303 304
  result.add_option("--predictable",
                    help="Compare output of several reruns of each test",
                    default=False, action="store_true")
305 306 307 308
  result.add_option("-p", "--progress",
                    help=("The style of progress indicator"
                          " (verbose, dots, color, mono)"),
                    choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
309
  result.add_option("--quickcheck", default=False, action="store_true",
310
                    help=("Quick check mode (skip slow tests)"))
311 312
  result.add_option("--report", help="Print a summary of the tests to be run",
                    default=False, action="store_true")
313 314
  result.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
315 316 317 318 319 320 321
  result.add_option("--rerun-failures-count",
                    help=("Number of times to rerun each failing test case. "
                          "Very slow tests will be rerun only once."),
                    default=0, type="int")
  result.add_option("--rerun-failures-max",
                    help="Maximum number of failing test cases to rerun.",
                    default=100, type="int")
322 323 324 325 326 327 328 329 330
  result.add_option("--shard-count",
                    help="Split testsuites into this number of shards",
                    default=1, type="int")
  result.add_option("--shard-run",
                    help="Run this shard from the split up tests.",
                    default=1, type="int")
  result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
  result.add_option("--shell-dir", help="Directory containing executables",
                    default="")
331 332 333 334
  result.add_option("--dont-skip-slow-simulator-tests",
                    help="Don't skip more slow tests when using a simulator.",
                    default=False, action="store_true",
                    dest="dont_skip_simulator_slow_tests")
335 336 337
  result.add_option("--stress-only",
                    help="Only run tests with --always-opt --stress-opt",
                    default=False, action="store_true")
338 339 340
  result.add_option("--swarming",
                    help="Indicates running test driver on swarming.",
                    default=False, action="store_true")
341 342 343
  result.add_option("--time", help="Print timing information after running",
                    default=False, action="store_true")
  result.add_option("-t", "--timeout", help="Timeout in seconds",
344
                    default=TIMEOUT_DEFAULT, type="int")
345 346 347
  result.add_option("--tsan",
                    help="Regard test expectations for TSAN",
                    default=False, action="store_true")
348 349 350 351 352 353
  result.add_option("-v", "--verbose", help="Verbose output",
                    default=False, action="store_true")
  result.add_option("--valgrind", help="Run tests through valgrind",
                    default=False, action="store_true")
  result.add_option("--warn-unused", help="Report unused rules",
                    default=False, action="store_true")
354 355 356 357
  result.add_option("--junitout", help="File name of the JUnit output")
  result.add_option("--junittestsuite",
                    help="The testsuite name in the JUnit output file",
                    default="v8tests")
358
  result.add_option("--random-seed", default=0, dest="random_seed", type="int",
359
                    help="Default seed for initializing random generator")
360 361 362
  result.add_option("--random-seed-stress-count", default=1, type="int",
                    dest="random_seed_stress_count",
                    help="Number of runs with different random seeds")
363 364 365
  result.add_option("--msan",
                    help="Regard test expectations for MSAN",
                    default=False, action="store_true")
366 367 368
  return result


369 370 371 372 373 374 375
def RandomSeed():
  seed = 0
  while not seed:
    seed = random.SystemRandom().randint(-2147483648, 2147483647)
  return seed


376 377 378 379 380 381 382 383 384
def BuildbotToV8Mode(config):
  """Convert buildbot build configs to configs understood by the v8 runner.

  V8 configs are always lower case and without the additional _x64 suffix for
  64 bit builds on windows with ninja.
  """
  mode = config[:-4] if config.endswith('_x64') else config
  return mode.lower()

385 386
def SetupEnvironment(options):
  """Setup additional environment variables."""
387 388 389 390

  # Many tests assume an English interface.
  os.environ['LANG'] = 'en_US.UTF-8'

391 392 393 394 395 396 397 398 399 400
  symbolizer = 'external_symbolizer_path=%s' % (
      os.path.join(
          BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
          'llvm-symbolizer',
      )
  )

  if options.asan:
    os.environ['ASAN_OPTIONS'] = symbolizer

401 402 403 404 405 406 407 408
  if options.sancov_dir:
    assert os.path.exists(options.sancov_dir)
    os.environ['ASAN_OPTIONS'] = ":".join([
      'coverage=1',
      'coverage_dir=%s' % options.sancov_dir,
      symbolizer,
    ])

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
  if options.cfi_vptr:
    os.environ['UBSAN_OPTIONS'] = ":".join([
      'print_stacktrace=1',
      'print_summary=1',
      'symbolize=1',
      symbolizer,
    ])

  if options.msan:
    os.environ['MSAN_OPTIONS'] = symbolizer

  if options.tsan:
    suppressions_file = os.path.join(
        BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt')
    os.environ['TSAN_OPTIONS'] = " ".join([
      symbolizer,
      'suppressions=%s' % suppressions_file,
      'exit_code=0',
      'report_thread_leaks=0',
      'history_size=7',
      'report_destroy_locked=0',
    ])

432
def ProcessOptions(options):
433
  global ALL_VARIANTS
434
  global EXHAUSTIVE_VARIANTS
435
  global VARIANTS
436 437 438

  # Architecture and mode related stuff.
  if options.arch_and_mode:
439 440 441 442
    options.arch_and_mode = [arch_and_mode.split(".")
        for arch_and_mode in options.arch_and_mode.split(",")]
    options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
    options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
443 444
  options.mode = options.mode.split(",")
  for mode in options.mode:
445
    if not BuildbotToV8Mode(mode) in MODES:
446 447 448 449 450 451
      print "Unknown mode %s" % mode
      return False
  if options.arch in ["auto", "native"]:
    options.arch = ARCH_GUESS
  options.arch = options.arch.split(",")
  for arch in options.arch:
452
    if not arch in SUPPORTED_ARCHS:
453 454 455
      print "Unknown architecture %s" % arch
      return False

456 457 458 459 460
  # Store the final configuration in arch_and_mode list. Don't overwrite
  # predefined arch_and_mode since it is more expressive than arch and mode.
  if not options.arch_and_mode:
    options.arch_and_mode = itertools.product(options.arch, options.mode)

461 462 463 464 465 466
  # Special processing of other options, sorted alphabetically.

  if options.buildbot:
    # Buildbots run presubmit tests as a separate step.
    options.no_presubmit = True
    options.no_network = True
467 468
  if options.download_data_only:
    options.no_presubmit = True
469 470 471 472
  if options.command_prefix:
    print("Specifying --command-prefix disables network distribution, "
          "running tests locally.")
    options.no_network = True
473
  options.command_prefix = shlex.split(options.command_prefix)
474
  options.extra_flags = shlex.split(options.extra_flags)
475 476 477 478

  if options.gc_stress:
    options.extra_flags += GC_STRESS_FLAGS

479
  if options.asan:
480
    options.extra_flags.append("--invoke-weak-callbacks")
481
    options.extra_flags.append("--omit-quit")
482

483 484 485
  if options.novfp3:
    options.extra_flags.append("--noenable-vfp3")

486 487 488 489 490
  if options.exhaustive_variants:
    # This is used on many bots. It includes a larger set of default variants.
    # Other options for manipulating variants still apply afterwards.
    VARIANTS = EXHAUSTIVE_VARIANTS

491 492 493
  if options.msan:
    VARIANTS = ["default"]

494 495 496
  if options.tsan:
    VARIANTS = ["default"]

497 498
  if options.j == 0:
    options.j = multiprocessing.cpu_count()
499

500 501
  if options.random_seed_stress_count <= 1 and options.random_seed == 0:
    options.random_seed = RandomSeed()
502

503 504 505 506
  def excl(*args):
    """Returns true if zero or one of multiple arguments are true."""
    return reduce(lambda x, y: x + y, args) <= 1

507
  if not excl(options.no_stress, options.stress_only, options.no_variants,
508
              bool(options.variants)):
509
    print("Use only one of --no-stress, --stress-only, --no-variants, "
510
          "or --variants.")
511
    return False
512 513 514 515
  if options.quickcheck:
    VARIANTS = ["default", "stress"]
    options.slow_tests = "skip"
    options.pass_fail_tests = "skip"
516
  if options.no_stress:
517
    VARIANTS = ["default", "nocrankshaft"]
518
  if options.no_variants:
519 520 521 522 523
    VARIANTS = ["default"]
  if options.stress_only:
    VARIANTS = ["stress"]
  if options.variants:
    VARIANTS = options.variants.split(",")
524 525
    if not set(VARIANTS).issubset(ALL_VARIANTS):
      print "All variants must be in %s" % str(ALL_VARIANTS)
526
      return False
527 528 529 530 531
  if options.predictable:
    VARIANTS = ["default"]
    options.extra_flags.append("--predictable")
    options.extra_flags.append("--verify_predictable")
    options.extra_flags.append("--no-inline-new")
532

533 534 535 536 537 538 539
  if not options.shell_dir:
    if options.shell:
      print "Warning: --shell is deprecated, use --shell-dir instead."
      options.shell_dir = os.path.dirname(options.shell)
  if options.valgrind:
    run_valgrind = os.path.join("tools", "run-valgrind.py")
    # This is OK for distributed running, so we don't need to set no_network.
540
    options.command_prefix = (["python", "-u", run_valgrind] +
541
                              options.command_prefix)
542 543 544 545 546 547 548 549
  def CheckTestMode(name, option):
    if not option in ["run", "skip", "dontcare"]:
      print "Unknown %s mode %s" % (name, option)
      return False
    return True
  if not CheckTestMode("slow test", options.slow_tests):
    return False
  if not CheckTestMode("pass|fail test", options.pass_fail_tests):
550
    return False
551
  if options.no_i18n:
552
    TEST_MAP["bot_default"].remove("intl")
553
    TEST_MAP["default"].remove("intl")
554 555 556
  return True


557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
def ShardTests(tests, options):
  # Read gtest shard configuration from environment (e.g. set by swarming).
  # If none is present, use values passed on the command line.
  shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
  shard_run = os.environ.get('GTEST_SHARD_INDEX')
  if shard_run is not None:
    # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
    shard_run = int(shard_run) + 1
  else:
    shard_run = options.shard_run

  if options.shard_count > 1:
    # Log if a value was passed on the cmd line and it differs from the
    # environment variables.
    if options.shard_count != shard_count:
      print("shard_count from cmd line differs from environment variable "
            "GTEST_TOTAL_SHARDS")
    if options.shard_run > 1 and options.shard_run != shard_run:
      print("shard_run from cmd line differs from environment variable "
            "GTEST_SHARD_INDEX")

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
  if shard_count < 2:
    return tests
  if shard_run < 1 or shard_run > shard_count:
    print "shard-run not a valid number, should be in [1:shard-count]"
    print "defaulting back to running all tests"
    return tests
  count = 0
  shard = []
  for test in tests:
    if count % shard_count == shard_run - 1:
      shard.append(test)
    count += 1
  return shard


def Main():
594 595 596
  # Use the v8 root as cwd as some test cases use "load" with relative paths.
  os.chdir(BASE_DIR)

597 598 599 600 601
  parser = BuildOptions()
  (options, args) = parser.parse_args()
  if not ProcessOptions(options):
    parser.print_help()
    return 1
602
  SetupEnvironment(options)
603

604 605 606 607 608
  if options.swarming:
    # Swarming doesn't print how isolated commands are called. Lets make this
    # less cryptic by printing it ourselves.
    print ' '.join(sys.argv)

609 610 611
  exit_code = 0
  if not options.no_presubmit:
    print ">>> running presubmit tests"
612
    exit_code = subprocess.call(
613
        [sys.executable, join(BASE_DIR, "tools", "presubmit.py")])
614

615
  suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
616

617 618 619 620
  # Use default tests if no test configuration was provided at the cmd line.
  if len(args) == 0:
    args = ["default"]

621 622 623 624
  # Expand arguments with grouped tests. The args should reflect the list of
  # suites as otherwise filters would break.
  def ExpandTestGroups(name):
    if name in TEST_MAP:
625
      return [suite for suite in TEST_MAP[name]]
626 627 628 629 630 631
    else:
      return [name]
  args = reduce(lambda x, y: x + y,
         [ExpandTestGroups(arg) for arg in args],
         [])

632 633 634 635
  args_suites = OrderedDict() # Used as set
  for arg in args:
    args_suites[arg.split('/')[0]] = True
  suite_paths = [ s for s in args_suites if s in suite_paths ]
636 637 638 639

  suites = []
  for root in suite_paths:
    suite = testsuite.TestSuite.LoadTestSuite(
640
        os.path.join(BASE_DIR, "test", root))
641 642 643
    if suite:
      suites.append(suite)

644
  if options.download_data or options.download_data_only:
645 646 647
    for s in suites:
      s.DownloadData()

648 649 650
  if options.download_data_only:
    return exit_code

651
  for (arch, mode) in options.arch_and_mode:
652
    try:
653
      code = Execute(arch, mode, args, options, suites)
654 655
    except KeyboardInterrupt:
      return 2
656
    exit_code = exit_code or code
657 658 659
  return exit_code


660
def Execute(arch, mode, args, options, suites):
661 662
  print(">>> Running tests for %s.%s" % (arch, mode))

663 664 665
  shell_dir = options.shell_dir
  if not shell_dir:
    if options.buildbot:
666 667
      # TODO(machenbach): Get rid of different output folder location on
      # buildbot. Currently this is capitalized Release and Debug.
668
      shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
669
      mode = BuildbotToV8Mode(mode)
670
    else:
671
      shell_dir = os.path.join(
672
          BASE_DIR,
673 674 675
          options.outdir,
          "%s.%s" % (arch, MODES[mode]["output_folder"]),
      )
676 677
  if not os.path.exists(shell_dir):
      raise Exception('Could not find shell_dir: "%s"' % shell_dir)
678 679

  # Populate context object.
680
  mode_flags = MODES[mode]["flags"]
681

682 683 684 685 686
  # Simulators are slow, therefore allow a longer timeout.
  if arch in SLOW_ARCHS:
    options.timeout *= 2

  options.timeout *= MODES[mode]["timeout_scalefactor"]
687 688 689

  if options.predictable:
    # Predictable mode is slower.
690
    options.timeout *= 2
691

692 693 694 695 696 697
  # TODO(machenbach): Remove temporary verbose output on windows after
  # debugging driver-hung-up on XP.
  verbose_output = (
      options.verbose or
      utils.IsWindows() and options.progress == "verbose"
  )
698
  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
699
                        mode_flags, verbose_output,
700 701
                        options.timeout,
                        options.isolates,
702
                        options.command_prefix,
703
                        options.extra_flags,
704
                        options.no_i18n,
705
                        options.random_seed,
706 707
                        options.no_sorting,
                        options.rerun_failures_count,
708
                        options.rerun_failures_max,
709
                        options.predictable,
710
                        options.no_harness,
711 712
                        use_perf_data=not options.swarming,
                        sancov_dir=options.sancov_dir)
713

714 715
  # TODO(all): Combine "simulator" and "simulator_run".
  simulator_run = not options.dont_skip_simulator_slow_tests and \
716
      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
717
               'ppc', 'ppc64'] and \
718
      ARCH_GUESS and arch != ARCH_GUESS
719 720 721
  # Find available test suites and read test cases from them.
  variables = {
    "arch": arch,
722
    "asan": options.asan,
723
    "deopt_fuzzer": False,
724
    "gc_stress": options.gc_stress,
725
    "gcov_coverage": options.gcov_coverage,
726
    "ignition": options.ignition,
727
    "ignition_turbofan": options.ignition_turbofan,
728
    "isolates": options.isolates,
729
    "mode": MODES[mode]["status_mode"],
730
    "no_i18n": options.no_i18n,
731
    "no_snap": options.no_snap,
732
    "simulator_run": simulator_run,
733
    "simulator": utils.UseSimulator(arch),
734
    "system": utils.GuessOS(),
735
    "tsan": options.tsan,
736
    "msan": options.msan,
737
    "dcheck_always_on": options.dcheck_always_on,
738
    "novfp3": options.novfp3,
739
    "predictable": options.predictable,
740
    "byteorder": sys.byteorder,
741 742 743 744 745 746 747 748
  }
  all_tests = []
  num_tests = 0
  for s in suites:
    s.ReadStatusFile(variables)
    s.ReadTestCases(ctx)
    if len(args) > 0:
      s.FilterTestCasesByArgs(args)
749
    all_tests += s.tests
750 751
    s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
                              options.pass_fail_tests)
752 753 754
    if options.cat:
      verbose.PrintTestSource(s.tests)
      continue
755
    variant_gen = s.CreateVariantGenerator(VARIANTS)
756
    variant_tests = [ t.CopyAddingFlags(v, flags)
757
                      for t in s.tests
758 759
                      for v in variant_gen.FilterVariantsByTest(t)
                      for flags in variant_gen.GetFlagSets(t, v) ]
760 761 762 763 764 765 766 767 768 769 770 771

    if options.random_seed_stress_count > 1:
      # Duplicate test for random seed stress mode.
      def iter_seed_flags():
        for i in range(0, options.random_seed_stress_count):
          # Use given random seed for all runs (set by default in execution.py)
          # or a new random seed if none is specified.
          if options.random_seed:
            yield []
          else:
            yield ["--random-seed=%d" % RandomSeed()]
      s.tests = [
772
        t.CopyAddingFlags(t.variant, flags)
773
        for t in variant_tests
774
        for flags in iter_seed_flags()
775 776 777 778
      ]
    else:
      s.tests = variant_tests

779
    s.tests = ShardTests(s.tests, options)
780 781 782 783 784 785 786 787 788
    num_tests += len(s.tests)

  if options.cat:
    return 0  # We're done here.

  if options.report:
    verbose.PrintReport(all_tests)

  # Run the tests, either locally or distributed on the network.
789
  start_time = time.time()
790 791
  progress_indicator = progress.IndicatorNotifier()
  progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
792
  if options.junitout:
793 794
    progress_indicator.Register(progress.JUnitTestProgressIndicator(
        options.junitout, options.junittestsuite))
795
  if options.json_test_results:
796
    progress_indicator.Register(progress.JsonTestProgressIndicator(
797 798
        options.json_test_results, arch, MODES[mode]["execution_mode"],
        ctx.random_seed))
799 800 801

  run_networked = not options.no_network
  if not run_networked:
802 803
    if verbose_output:
      print("Network distribution disabled, running tests locally.")
804 805 806 807 808 809 810 811
  elif utils.GuessOS() != "linux":
    print("Network distribution is only supported on Linux, sorry!")
    run_networked = False
  peers = []
  if run_networked:
    peers = network_execution.GetPeers()
    if not peers:
      print("No connection to distribution server; running tests locally.")
812
      run_networked = False
813 814 815 816 817 818 819 820 821
    elif len(peers) == 1:
      print("No other peers on the network; running tests locally.")
      run_networked = False
    elif num_tests <= 100:
      print("Less than 100 tests, running them locally.")
      run_networked = False

  if run_networked:
    runner = network_execution.NetworkedRunner(suites, progress_indicator,
822
                                               ctx, peers, BASE_DIR)
823 824 825 826 827
  else:
    runner = execution.Runner(suites, progress_indicator, ctx)

  exit_code = runner.Run(options.j)
  overall_duration = time.time() - start_time
828 829 830

  if options.time:
    verbose.PrintTestDurations(suites, overall_duration)
831 832 833 834

  if num_tests == 0:
    print("Warning: no tests were run!")

835 836 837 838 839
  if exit_code == 1 and options.json_test_results:
    print("Force exit code 0 after failures. Json test results file generated "
          "with failure information.")
    exit_code = 0

840 841 842 843 844 845 846 847 848 849 850 851
  if options.sancov_dir:
    # If tests ran with sanitizer coverage, merge coverage files in the end.
    try:
      print "Merging sancov files."
      subprocess.check_call([
        sys.executable,
        join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
        "--coverage-dir=%s" % options.sancov_dir])
    except:
      print >> sys.stderr, "Error: Merging sancov files failed."
      exit_code = 1

852 853 854 855 856
  return exit_code


if __name__ == "__main__":
  sys.exit(Main())