base_runner.py 28.7 KB
Newer Older
1 2 3 4
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

5 6 7
# for py2/py3 compatibility
from __future__ import print_function
from functools import reduce
8

9
from collections import OrderedDict
10
import json
11
import multiprocessing
12
import optparse
13
import os
14
import shlex
15
import sys
16
import traceback
17 18


19

20 21 22 23 24 25 26
# Add testrunner to the path.
sys.path.insert(
  0,
  os.path.dirname(
    os.path.dirname(os.path.abspath(__file__))))


27
from testrunner.local import command
28 29 30
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
31
from testrunner.testproc import progress
32
from testrunner.testproc.rerun import RerunProc
33
from testrunner.testproc.shard import ShardProc
34
from testrunner.testproc.sigproc import SignalProc
35
from testrunner.testproc.timeout import TimeoutProc
36
from testrunner.testproc import util
37

38 39 40 41 42 43 44 45 46 47 48 49

BASE_DIR = (
    os.path.dirname(
      os.path.dirname(
        os.path.dirname(
          os.path.abspath(__file__)))))

DEFAULT_OUT_GN = 'out.gn'

# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
50 51
# The mapping from names used here to GN targets (which must stay in sync)
# is defined in infra/mb/gn_isolate_map.pyl.
52
TEST_MAP = {
53
  # This needs to stay in sync with group("v8_bot_default") in test/BUILD.gn.
54 55 56 57 58 59 60 61
  "bot_default": [
    "debugger",
    "mjsunit",
    "cctest",
    "wasm-spec-tests",
    "inspector",
    "webkit",
    "mkgrokdump",
62
    "wasm-js",
63 64 65 66
    "fuzzer",
    "message",
    "intl",
    "unittests",
67
    "wasm-api-tests",
68
  ],
69
  # This needs to stay in sync with group("v8_default") in test/BUILD.gn.
70 71 72 73 74 75 76
  "default": [
    "debugger",
    "mjsunit",
    "cctest",
    "wasm-spec-tests",
    "inspector",
    "mkgrokdump",
77
    "wasm-js",
78 79 80 81
    "fuzzer",
    "message",
    "intl",
    "unittests",
82
    "wasm-api-tests",
83
  ],
84
  # This needs to stay in sync with group("v8_d8_default") in test/BUILD.gn.
85
  "d8_default": [
86
    "debugger",
87 88
    "mjsunit",
    "webkit",
89 90
    "message",
    "intl",
91
  ],
92
  # This needs to stay in sync with "v8_optimize_for_size" in test/BUILD.gn.
93 94 95 96 97 98 99 100 101 102 103 104 105
  "optimize_for_size": [
    "debugger",
    "mjsunit",
    "cctest",
    "inspector",
    "webkit",
    "intl",
  ],
  "unittests": [
    "unittests",
  ],
}

106 107 108 109 110 111 112 113 114 115 116
# Increase the timeout for these:
SLOW_ARCHS = [
  "arm",
  "arm64",
  "mips",
  "mipsel",
  "mips64",
  "mips64el",
  "s390",
  "s390x",
]
117

118

119 120 121 122 123 124 125 126
class ModeConfig(object):
  def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
    self.flags = flags
    self.timeout_scalefactor = timeout_scalefactor
    self.status_mode = status_mode
    self.execution_mode = execution_mode


127 128
DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort"]
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
MODES = {
  "debug": ModeConfig(
    flags=DEBUG_FLAGS,
    timeout_scalefactor=4,
    status_mode="debug",
    execution_mode="debug",
  ),
  "optdebug": ModeConfig(
    flags=DEBUG_FLAGS,
    timeout_scalefactor=4,
    status_mode="debug",
    execution_mode="debug",
  ),
  "release": ModeConfig(
    flags=RELEASE_FLAGS,
    timeout_scalefactor=1,
    status_mode="release",
    execution_mode="release",
  ),
  # Normal trybot release configuration. There, dchecks are always on which
  # implies debug is set. Hence, the status file needs to assume debug-like
  # behavior/timeouts.
  "tryrelease": ModeConfig(
    flags=RELEASE_FLAGS,
    timeout_scalefactor=1,
    status_mode="debug",
    execution_mode="release",
  ),
  # This mode requires v8 to be compiled with dchecks and slow dchecks.
  "slowrelease": ModeConfig(
    flags=RELEASE_FLAGS + ["--enable-slow-asserts"],
    timeout_scalefactor=2,
    status_mode="debug",
    execution_mode="release",
  ),
}

166 167
PROGRESS_INDICATORS = {
  'verbose': progress.VerboseProgressIndicator,
168
  'ci': progress.CIProgressIndicator,
169 170 171
  'dots': progress.DotsProgressIndicator,
  'color': progress.ColorProgressIndicator,
  'mono': progress.MonochromeProgressIndicator,
172
  'stream': progress.StreamProgressIndicator,
173
}
174

175 176 177 178
class TestRunnerError(Exception):
  pass


179 180 181 182 183 184 185 186 187 188 189 190
class BuildConfig(object):
  def __init__(self, build_config):
    # In V8 land, GN's x86 is called ia32.
    if build_config['v8_target_cpu'] == 'x86':
      self.arch = 'ia32'
    else:
      self.arch = build_config['v8_target_cpu']

    self.asan = build_config['is_asan']
    self.cfi_vptr = build_config['is_cfi']
    self.dcheck_always_on = build_config['dcheck_always_on']
    self.gcov_coverage = build_config['is_gcov_coverage']
191
    self.is_android = build_config['is_android']
192
    self.is_clang = build_config['is_clang']
193
    self.is_debug = build_config['is_debug']
194
    self.is_full_debug = build_config['is_full_debug']
195 196 197
    self.msan = build_config['is_msan']
    self.no_i18n = not build_config['v8_enable_i18n_support']
    self.predictable = build_config['v8_enable_verify_predictable']
198 199
    self.simulator_run = (build_config['target_cpu'] !=
                          build_config['v8_target_cpu'])
200
    self.tsan = build_config['is_tsan']
201
    # TODO(machenbach): We only have ubsan not ubsan_vptr.
202
    self.ubsan_vptr = build_config['is_ubsan_vptr']
203
    self.verify_csa = build_config['v8_enable_verify_csa']
204
    self.lite_mode = build_config['v8_enable_lite_mode']
205
    self.pointer_compression = build_config['v8_enable_pointer_compression']
206 207 208 209
    # Export only for MIPS target
    if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
      self.mips_arch_variant = build_config['mips_arch_variant']
      self.mips_use_msa = build_config['mips_use_msa']
210

211 212 213 214 215
  @property
  def use_sanitizer(self):
    return (self.asan or self.cfi_vptr or self.msan or self.tsan or
            self.ubsan_vptr)

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
  def __str__(self):
    detected_options = []

    if self.asan:
      detected_options.append('asan')
    if self.cfi_vptr:
      detected_options.append('cfi_vptr')
    if self.dcheck_always_on:
      detected_options.append('dcheck_always_on')
    if self.gcov_coverage:
      detected_options.append('gcov_coverage')
    if self.msan:
      detected_options.append('msan')
    if self.no_i18n:
      detected_options.append('no_i18n')
    if self.predictable:
      detected_options.append('predictable')
    if self.tsan:
      detected_options.append('tsan')
    if self.ubsan_vptr:
      detected_options.append('ubsan_vptr')
237 238
    if self.verify_csa:
      detected_options.append('verify_csa')
239 240
    if self.lite_mode:
      detected_options.append('lite_mode')
241 242
    if self.pointer_compression:
      detected_options.append('pointer_compression')
243 244 245

    return '\n'.join(detected_options)

246

247
class BaseTestRunner(object):
248 249
  def __init__(self, basedir=None):
    self.basedir = basedir or BASE_DIR
250
    self.outdir = None
251 252 253
    self.build_config = None
    self.mode_name = None
    self.mode_options = None
254
    self.target_os = None
255

256 257 258 259 260
  @property
  def framework_name(self):
    """String name of the base-runner subclass, used in test results."""
    raise NotImplementedError()

261 262 263
  def execute(self, sys_args=None):
    if sys_args is None:  # pragma: no cover
      sys_args = sys.argv[1:]
264
    try:
265
      parser = self._create_parser()
266
      options, args = self._parse_args(parser, sys_args)
267 268 269
      if options.swarming:
        # Swarming doesn't print how isolated commands are called. Lets make
        # this less cryptic by printing it ourselves.
270
        print(' '.join(sys.argv))
271

272 273 274
        # Kill stray processes from previous tasks on swarming.
        util.kill_processes_linux()

275
      self._load_build_config(options)
276
      command.setup(self.target_os, options.device)
277 278 279 280 281 282 283 284

      try:
        self._process_default_options(options)
        self._process_options(options)
      except TestRunnerError:
        parser.print_help()
        raise

285
      args = self._parse_test_args(args)
286
      tests = self._load_testsuite_generators(args, options)
287
      self._setup_env()
288 289
      print(">>> Running tests for %s.%s" % (self.build_config.arch,
                                            self.mode_name))
290 291 292 293 294 295
      exit_code = self._do_execute(tests, args, options)
      if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
        print("Force exit code 0 after failures. Json test results file "
              "generated with failure information.")
        exit_code = utils.EXIT_CODE_PASS
      return exit_code
296
    except TestRunnerError:
297
      traceback.print_exc()
298
      return utils.EXIT_CODE_INTERNAL_ERROR
299
    except KeyboardInterrupt:
300
      return utils.EXIT_CODE_INTERRUPTED
301 302 303
    except Exception:
      traceback.print_exc()
      return utils.EXIT_CODE_INTERNAL_ERROR
304 305
    finally:
      command.tear_down()
306

307
  def _create_parser(self):
308 309 310 311 312
    parser = optparse.OptionParser()
    parser.usage = '%prog [options] [tests]'
    parser.description = """TESTS: %s""" % (TEST_MAP["default"])
    self._add_parser_default_options(parser)
    self._add_parser_options(parser)
313
    return parser
314 315 316 317 318 319 320 321

  def _add_parser_default_options(self, parser):
    parser.add_option("--gn", help="Scan out.gn for the last built"
                      " configuration",
                      default=False, action="store_true")
    parser.add_option("--outdir", help="Base directory with compile output",
                      default="out")
    parser.add_option("--arch",
322
                      help="The architecture to run tests for")
323
    parser.add_option("-m", "--mode",
324 325
                      help="The test mode in which to run (uppercase for builds"
                      " in CI): %s" % MODES.keys())
326 327
    parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
                      "directory will be used")
328 329
    parser.add_option("--test-root", help="Root directory of the test suites",
                      default=os.path.join(self.basedir, 'test'))
330 331
    parser.add_option("--total-timeout-sec", default=0, type="int",
                      help="How long should fuzzer run")
332 333 334 335 336
    parser.add_option("--swarming", default=False, action="store_true",
                      help="Indicates running test driver on swarming.")

    parser.add_option("-j", help="The number of parallel tasks to run",
                      default=0, type=int)
337 338 339
    parser.add_option("-d", "--device",
                      help="The device ID to run Android tests on. If not "
                           "given it will be autodetected.")
340 341 342 343 344 345

    # Shard
    parser.add_option("--shard-count", default=1, type=int,
                      help="Split tests into this number of shards")
    parser.add_option("--shard-run", default=1, type=int,
                      help="Run this shard from the split up tests.")
346

347 348 349 350 351 352 353
    # Progress
    parser.add_option("-p", "--progress",
                      choices=PROGRESS_INDICATORS.keys(), default="mono",
                      help="The style of progress indicator (verbose, dots, "
                           "color, mono)")
    parser.add_option("--json-test-results",
                      help="Path to a file for storing json results.")
354 355
    parser.add_option('--slow-tests-cutoff', type="int", default=100,
                      help='Collect N slowest tests')
356 357 358
    parser.add_option("--exit-after-n-failures", type="int", default=100,
                      help="Exit after the first N failures instead of "
                           "running all tests. Pass 0 to disable this feature.")
359 360 361 362
    parser.add_option("--ci-test-completion",
                      help="Path to a file for logging test completion in the "
                           "context of CI progress indicator. Ignored if "
                           "progress indicator is other than 'ci'.")
363 364

    # Rerun
365 366 367 368 369
    parser.add_option("--rerun-failures-count", default=0, type=int,
                      help="Number of times to rerun each failing test case. "
                           "Very slow tests will be rerun only once.")
    parser.add_option("--rerun-failures-max", default=100, type=int,
                      help="Maximum number of failing test cases to rerun")
370

371
    # Test config
372 373
    parser.add_option("--command-prefix", default="",
                      help="Prepended to each shell command used to run a test")
374 375 376 377
    parser.add_option('--dont-skip-slow-simulator-tests',
                      help='Don\'t skip more slow tests when using a'
                      ' simulator.', default=False, action='store_true',
                      dest='dont_skip_simulator_slow_tests')
378 379 380 381 382 383 384
    parser.add_option("--extra-flags", action="append", default=[],
                      help="Additional flags to pass to each test command")
    parser.add_option("--isolates", action="store_true", default=False,
                      help="Whether to test isolates")
    parser.add_option("--no-harness", "--noharness",
                      default=False, action="store_true",
                      help="Run without test harness of a given suite")
385 386
    parser.add_option("--random-seed", default=0, type=int,
                      help="Default seed for initializing random generator")
387 388
    parser.add_option("--run-skipped", help="Also run skipped tests.",
                      default=False, action="store_true")
389 390 391 392
    parser.add_option("-t", "--timeout", default=60, type=int,
                      help="Timeout for single test in seconds")
    parser.add_option("-v", "--verbose", default=False, action="store_true",
                      help="Verbose output")
393 394
    parser.add_option('--regenerate-expected-files', default=False, action='store_true',
                      help='Regenerate expected files')
395

396 397
    # TODO(machenbach): Temporary options for rolling out new test runner
    # features.
398 399 400 401 402 403
    parser.add_option("--mastername", default='',
                      help="Mastername property from infrastructure. Not "
                           "setting this option indicates manual usage.")
    parser.add_option("--buildername", default='',
                      help="Buildername property from infrastructure. Not "
                           "setting this option indicates manual usage.")
404

405 406 407
  def _add_parser_options(self, parser):
    pass

408 409
  def _parse_args(self, parser, sys_args):
    options, args = parser.parse_args(sys_args)
410

411
    if any(map(lambda v: v and ',' in v,
412
                [options.arch, options.mode])):  # pragma: no cover
413
      print('Multiple arch/mode are deprecated')
414
      raise TestRunnerError()
415

416
    return options, args
417

418 419 420
  def _load_build_config(self, options):
    for outdir in self._possible_outdirs(options):
      try:
421
        self.build_config = self._do_load_build_config(outdir, options.verbose)
422 423
      except TestRunnerError:
        pass
424

425
    if not self.build_config:  # pragma: no cover
426
      print('Failed to load build config')
427 428
      raise TestRunnerError

429
    print('Build found: %s' % self.outdir)
430
    if str(self.build_config):
431 432
      print('>>> Autodetected:')
      print(self.build_config)
433

434 435 436 437 438 439 440
    # Represents the OS where tests are run on. Same as host OS except for
    # Android, which is determined by build output.
    if self.build_config.is_android:
      self.target_os = 'android'
    else:
      self.target_os = utils.GuessOS()

441 442 443 444
  # Returns possible build paths in order:
  # gn
  # outdir
  # outdir/arch.mode
445
  # Each path is provided in two versions: <path> and <path>/mode for bots.
446
  def _possible_outdirs(self, options):
447 448 449 450 451 452 453 454 455
    def outdirs():
      if options.gn:
        yield self._get_gn_outdir()
        return

      yield options.outdir
      if options.arch and options.mode:
        yield os.path.join(options.outdir,
                          '%s.%s' % (options.arch, options.mode))
456

457
    for outdir in outdirs():
458
      yield os.path.join(self.basedir, outdir)
459

460
      # bot option
461
      if options.mode:
462
        yield os.path.join(self.basedir, outdir, options.mode)
463

464
  def _get_gn_outdir(self):
465
    gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
466 467 468 469 470 471 472 473 474 475 476 477 478
    latest_timestamp = -1
    latest_config = None
    for gn_config in os.listdir(gn_out_dir):
      gn_config_dir = os.path.join(gn_out_dir, gn_config)
      if not os.path.isdir(gn_config_dir):
        continue
      if os.path.getmtime(gn_config_dir) > latest_timestamp:
        latest_timestamp = os.path.getmtime(gn_config_dir)
        latest_config = gn_config
    if latest_config:
      print(">>> Latest GN build found: %s" % latest_config)
      return os.path.join(DEFAULT_OUT_GN, latest_config)

479 480
  def _do_load_build_config(self, outdir, verbose=False):
    build_config_path = os.path.join(outdir, "v8_build_config.json")
481
    if not os.path.exists(build_config_path):
482 483
      if verbose:
        print("Didn't find build config: %s" % build_config_path)
484
      raise TestRunnerError()
485 486 487

    with open(build_config_path) as f:
      try:
488
        build_config_json = json.load(f)
489
      except Exception:  # pragma: no cover
490 491 492 493
        print("%s exists but contains invalid json. Is your build up-to-date?"
              % build_config_path)
        raise TestRunnerError()

494
    # In auto-detect mode the outdir is always where we found the build config.
495 496
    # This ensures that we'll also take the build products from there.
    self.outdir = os.path.dirname(build_config_path)
497

498
    return BuildConfig(build_config_json)
499

500 501
  def _process_default_options(self, options):
    # We don't use the mode for more path-magic.
502
    # Therefore transform the bot mode here to fix build_config value.
503
    if options.mode:
504
      options.mode = self._bot_to_v8_mode(options.mode)
505

506 507
    build_config_mode = 'debug' if self.build_config.is_debug else 'release'
    if options.mode:
508
      if options.mode not in MODES:  # pragma: no cover
509
        print('%s mode is invalid' % options.mode)
510
        raise TestRunnerError()
511 512 513 514 515 516
      if MODES[options.mode].execution_mode != build_config_mode:
        print ('execution mode (%s) for %s is inconsistent with build config '
               '(%s)' % (
            MODES[options.mode].execution_mode,
            options.mode,
            build_config_mode))
517
        raise TestRunnerError()
518

519 520 521 522 523 524 525 526 527 528 529
      self.mode_name = options.mode
    else:
      self.mode_name = build_config_mode

    self.mode_options = MODES[self.mode_name]

    if options.arch and options.arch != self.build_config.arch:
      print('--arch value (%s) inconsistent with build config (%s).' % (
        options.arch, self.build_config.arch))
      raise TestRunnerError()

530
    if options.shell_dir:  # pragma: no cover
531 532
      print('Warning: --shell-dir is deprecated. Searching for executables in '
            'build directory (%s) instead.' % self.outdir)
533

534
    if options.j == 0:
535 536 537 538 539
      if self.build_config.is_android:
        # Adb isn't happy about multi-processed file pushing.
        options.j = 1
      else:
        options.j = multiprocessing.cpu_count()
540

541 542 543
    options.command_prefix = shlex.split(options.command_prefix)
    options.extra_flags = sum(map(shlex.split, options.extra_flags), [])

544 545
  def _bot_to_v8_mode(self, config):
    """Convert build configs from bots to configs understood by the v8 runner.
546 547 548 549 550 551 552 553 554

    V8 configs are always lower case and without the additional _x64 suffix
    for 64 bit builds on windows with ninja.
    """
    mode = config[:-4] if config.endswith('_x64') else config
    return mode.lower()

  def _process_options(self, options):
    pass
555

556 557
  def _setup_env(self):
    # Use the v8 root as cwd as some test cases use "load" with relative paths.
558
    os.chdir(self.basedir)
559 560 561 562 563 564 565

    # Many tests assume an English interface.
    os.environ['LANG'] = 'en_US.UTF-8'

    symbolizer_option = self._get_external_symbolizer_option()

    if self.build_config.asan:
566 567 568 569 570
      asan_options = [
          symbolizer_option,
          'allow_user_segv_handler=1',
          'allocator_may_return_null=1',
      ]
571 572 573
      if not utils.GuessOS() in ['macos', 'windows']:
        # LSAN is not available on mac and windows.
        asan_options.append('detect_leaks=1')
574 575
      else:
        asan_options.append('detect_leaks=0')
576 577 578
      if utils.GuessOS() == 'windows':
        # https://crbug.com/967663
        asan_options.append('detect_stack_use_after_return=0')
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
      os.environ['ASAN_OPTIONS'] = ":".join(asan_options)

    if self.build_config.cfi_vptr:
      os.environ['UBSAN_OPTIONS'] = ":".join([
        'print_stacktrace=1',
        'print_summary=1',
        'symbolize=1',
        symbolizer_option,
      ])

    if self.build_config.ubsan_vptr:
      os.environ['UBSAN_OPTIONS'] = ":".join([
        'print_stacktrace=1',
        symbolizer_option,
      ])

    if self.build_config.msan:
      os.environ['MSAN_OPTIONS'] = symbolizer_option

    if self.build_config.tsan:
      suppressions_file = os.path.join(
600
          self.basedir,
601 602 603 604 605 606 607 608 609 610 611 612 613 614
          'tools',
          'sanitizers',
          'tsan_suppressions.txt')
      os.environ['TSAN_OPTIONS'] = " ".join([
        symbolizer_option,
        'suppressions=%s' % suppressions_file,
        'exit_code=0',
        'report_thread_leaks=0',
        'history_size=7',
        'report_destroy_locked=0',
      ])

  def _get_external_symbolizer_option(self):
    external_symbolizer_path = os.path.join(
615
        self.basedir,
616 617 618 619 620 621 622 623 624 625 626 627 628
        'third_party',
        'llvm-build',
        'Release+Asserts',
        'bin',
        'llvm-symbolizer',
    )

    if utils.IsWindows():
      # Quote, because sanitizers might confuse colon as option separator.
      external_symbolizer_path = '"%s.exe"' % external_symbolizer_path

    return 'external_symbolizer_path=%s' % external_symbolizer_path

629
  def _parse_test_args(self, args):
630 631 632 633 634 635 636 637
    if not args:
      args = self._get_default_suite_names()

    # Expand arguments with grouped tests. The args should reflect the list
    # of suites as otherwise filters would break.
    def expand_test_group(name):
      return TEST_MAP.get(name, [name])

638
    return reduce(list.__add__, map(expand_test_group, args), [])
639

640
  def _args_to_suite_names(self, args, test_root):
641
    # Use default tests if no test configuration was provided at the cmd line.
642
    all_names = set(utils.GetSuitePaths(test_root))
643 644 645 646 647 648
    args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
    return [name for name in args_names if name in all_names]

  def _get_default_suite_names(self):
    return []

649
  def _load_testsuite_generators(self, args, options):
650
    names = self._args_to_suite_names(args, options.test_root)
651
    test_config = self._create_test_config(options)
652
    variables = self._get_statusfile_variables(options)
653 654 655

    # Head generator with no elements
    test_chain = testsuite.TestGenerator(0, [], [])
656
    for name in names:
657
      if options.verbose:
658
        print('>>> Loading test suite: %s' % name)
659
      suite = testsuite.TestSuite.Load(
660 661
          os.path.join(options.test_root, name), test_config,
          self.framework_name)
662 663

      if self._is_testsuite_supported(suite, options):
664 665
        tests = suite.load_tests_from_disk(variables)
        test_chain.merge(tests)
666

667
    return test_chain
668 669 670 671 672

  def _is_testsuite_supported(self, suite, options):
    """A predicate that can be overridden to filter out unsupported TestSuite
    instances (see NumFuzzer for usage)."""
    return True
673 674 675 676 677 678 679

  def _get_statusfile_variables(self, options):
    simd_mips = (
      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
      self.build_config.mips_arch_variant == "r6" and
      self.build_config.mips_use_msa)

680 681 682 683
    mips_arch_variant = (
      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
      self.build_config.mips_arch_variant)

684 685 686 687
    return {
      "arch": self.build_config.arch,
      "asan": self.build_config.asan,
      "byteorder": sys.byteorder,
688
      "cfi_vptr": self.build_config.cfi_vptr,
689 690
      "dcheck_always_on": self.build_config.dcheck_always_on,
      "deopt_fuzzer": False,
691
      "endurance_fuzzer": False,
692 693 694 695
      "gc_fuzzer": False,
      "gc_stress": False,
      "gcov_coverage": self.build_config.gcov_coverage,
      "isolates": options.isolates,
696
      "is_clang": self.build_config.is_clang,
697
      "is_full_debug": self.build_config.is_full_debug,
698
      "mips_arch_variant": mips_arch_variant,
699 700 701
      "mode": self.mode_options.status_mode
              if not self.build_config.dcheck_always_on
              else "debug",
702 703 704 705
      "msan": self.build_config.msan,
      "no_harness": options.no_harness,
      "no_i18n": self.build_config.no_i18n,
      "novfp3": False,
706
      "optimize_for_size": "--optimize-for-size" in options.extra_flags,
707 708
      "predictable": self.build_config.predictable,
      "simd_mips": simd_mips,
709 710
      "simulator_run": self.build_config.simulator_run and
                       not options.dont_skip_simulator_slow_tests,
711
      "system": self.target_os,
712 713
      "tsan": self.build_config.tsan,
      "ubsan_vptr": self.build_config.ubsan_vptr,
714
      "verify_csa": self.build_config.verify_csa,
715
      "lite_mode": self.build_config.lite_mode,
716
      "pointer_compression": self.build_config.pointer_compression,
717 718
    }

719 720 721 722
  def _runner_flags(self):
    """Extra default flags specific to the test runner implementation."""
    return []

723
  def _create_test_config(self, options):
724 725 726 727 728
    timeout = options.timeout * self._timeout_scalefactor(options)
    return TestConfig(
        command_prefix=options.command_prefix,
        extra_flags=options.extra_flags,
        isolates=options.isolates,
729
        mode_flags=self.mode_options.flags + self._runner_flags(),
730 731 732
        no_harness=options.no_harness,
        noi18n=self.build_config.no_i18n,
        random_seed=options.random_seed,
733
        run_skipped=options.run_skipped,
734 735 736
        shell_dir=self.outdir,
        timeout=timeout,
        verbose=options.verbose,
737
        regenerate_expected_files=options.regenerate_expected_files,
738 739 740
    )

  def _timeout_scalefactor(self, options):
741
    """Increases timeout for slow build configurations."""
742 743
    factor = self.mode_options.timeout_scalefactor
    if self.build_config.arch in SLOW_ARCHS:
744
      factor *= 4.5
745
    if self.build_config.lite_mode:
746 747
      factor *= 2
    if self.build_config.predictable:
748
      factor *= 4
749 750
    if self.build_config.use_sanitizer:
      factor *= 1.5
751
    if self.build_config.is_full_debug:
752
      factor *= 4
753 754

    return factor
755

756
  # TODO(majeski): remove options & args parameters
757
  def _do_execute(self, suites, args, options):
758
    raise NotImplementedError()
759

760 761
  def _prepare_procs(self, procs):
    procs = filter(None, procs)
762
    for i in range(0, len(procs) - 1):
763 764 765
      procs[i].connect_to(procs[i + 1])
    procs[0].setup()

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
  def _create_shard_proc(self, options):
    myid, count = self._get_shard_info(options)
    if count == 1:
      return None
    return ShardProc(myid - 1, count)

  def _get_shard_info(self, options):
    """
    Returns pair:
      (id of the current shard [1; number of shards], number of shards)
    """
    # Read gtest shard configuration from environment (e.g. set by swarming).
    # If none is present, use values passed on the command line.
    shard_count = int(
      os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
    shard_run = os.environ.get('GTEST_SHARD_INDEX')
    if shard_run is not None:
      # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
      shard_run = int(shard_run) + 1
    else:
      shard_run = options.shard_run

    if options.shard_count > 1:
      # Log if a value was passed on the cmd line and it differs from the
      # environment variables.
      if options.shard_count != shard_count:  # pragma: no cover
        print("shard_count from cmd line differs from environment variable "
              "GTEST_TOTAL_SHARDS")
      if (options.shard_run > 1 and
          options.shard_run != shard_run):  # pragma: no cover
        print("shard_run from cmd line differs from environment variable "
              "GTEST_SHARD_INDEX")

    if shard_run < 1 or shard_run > shard_count:
      # TODO(machenbach): Turn this into an assert. If that's wrong on the
      # bots, printing will be quite useless. Or refactor this code to make
      # sure we get a return code != 0 after testing if we got here.
803 804
      print("shard-run not a valid number, should be in [1:shard-count]")
      print("defaulting back to running all tests")
805 806 807
      return 1, 1

    return shard_run, shard_count
808

809
  def _create_progress_indicators(self, test_count, options):
810 811 812
    procs = [PROGRESS_INDICATORS[options.progress]()]
    if options.json_test_results:
      procs.append(progress.JsonTestProgressIndicator(
813
        self.framework_name,
814 815
        self.build_config.arch,
        self.mode_options.execution_mode))
816

817 818 819
    for proc in procs:
      proc.configure(options)

820 821 822 823 824 825
    for proc in procs:
      try:
        proc.set_test_count(test_count)
      except AttributeError:
        pass

826 827
    return procs

828 829 830
  def _create_result_tracker(self, options):
    return progress.ResultsTracker(options.exit_after_n_failures)

831 832 833 834
  def _create_timeout_proc(self, options):
    if not options.total_timeout_sec:
      return None
    return TimeoutProc(options.total_timeout_sec)
835 836 837

  def _create_signal_proc(self):
    return SignalProc()
838 839 840 841 842 843

  def _create_rerun_proc(self, options):
    if not options.rerun_failures_count:
      return None
    return RerunProc(options.rerun_failures_count,
                     options.rerun_failures_max)