run_perf.py 37.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
Performance runner for d8.

Call e.g. with tools/run-perf.py --arch ia32 some_suite.json

The suite json format is expected to be:
{
  "path": <relative path chunks to perf resources and main file>,
14
  "owners": [<list of email addresses of benchmark owners (required)>],
15 16 17 18
  "name": <optional suite name, file name is default>,
  "archs": [<architecture name for which this suite is run>, ...],
  "binary": <name of binary to run, default "d8">,
  "flags": [<flag to d8>, ...],
19
  "test_flags": [<flag to the test file>, ...],
20 21
  "run_count": <how often will this suite run (optional)>,
  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
22
  "resources": [<js file to be moved to android device>, ...]
23 24 25 26
  "main": <main js perf runner file>,
  "results_regexp": <optional regexp>,
  "results_processor": <optional python results processor script>,
  "units": <the unit specification for the performance dashboard>,
27
  "process_size": <flag - collect maximum memory used by the process>,
28 29 30 31 32 33
  "tests": [
    {
      "name": <name of the trace>,
      "results_regexp": <optional more specific regexp>,
      "results_processor": <optional python results processor script>,
      "units": <the unit specification for the performance dashboard>,
34
      "process_size": <flag - collect maximum memory used by the process>,
35 36 37 38 39 40 41 42 43 44 45 46 47
    }, ...
  ]
}

The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.

A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.

A suite's results_processor may point to an optional python script. If
48 49 50
specified, it is called after running the tests (with a path relative to the
suite level's path). It is expected to read the measurement's output text
on stdin and print the processed output to stdout.
51

52
The results_regexp will be applied to the processed output.
53 54 55 56 57 58

A suite without "tests" is considered a performance test itself.

Full example (suite with one runner):
{
  "path": ["."],
59
  "owners": ["username@chromium.org"],
60
  "flags": ["--expose-gc"],
61
  "test_flags": ["5"],
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "run_count_ia32": 3,
  "main": "run.js",
  "results_regexp": "^%s: (.+)$",
  "units": "score",
  "tests": [
    {"name": "Richards"},
    {"name": "DeltaBlue"},
    {"name": "NavierStokes",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Full example (suite with several runners):
{
  "path": ["."],
79
  "owners": ["username@chromium.org", "otherowner@google.com"],
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
  "flags": ["--expose-gc"],
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "units": "score",
  "tests": [
    {"name": "Richards",
     "path": ["richards"],
     "main": "run.js",
     "run_count": 3,
     "results_regexp": "^Richards: (.+)$"},
    {"name": "NavierStokes",
     "path": ["navier_stokes"],
     "main": "run.js",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Path pieces are concatenated. D8 is always run with the suite's path as cwd.
98 99

The test flags are passed to the js test file after '--'.
100 101
"""

102
from collections import OrderedDict
103
import json
104
import logging
105 106 107 108
import math
import optparse
import os
import re
109
import subprocess
110 111
import sys

112
from testrunner.local import android
113
from testrunner.local import command
114 115 116
from testrunner.local import utils

ARCH_GUESS = utils.DefaultArch()
117
SUPPORTED_ARCHS = ["arm",
118 119 120 121 122 123
                   "ia32",
                   "mips",
                   "mipsel",
                   "x64",
                   "arm64"]

124 125 126
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
127
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
128

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

def GeometricMean(values):
  """Returns the geometric mean of a list of values.

  The mean is calculated using log to avoid overflow.
  """
  values = map(float, values)
  return str(math.exp(sum(map(math.log, values)) / len(values)))


class Results(object):
  """Place holder for result traces."""
  def __init__(self, traces=None, errors=None):
    self.traces = traces or []
    self.errors = errors or []

  def ToDict(self):
    return {"traces": self.traces, "errors": self.errors}

  def WriteToFile(self, file_name):
    with open(file_name, "w") as f:
      f.write(json.dumps(self.ToDict()))

  def __add__(self, other):
    self.traces += other.traces
    self.errors += other.errors
    return self

  def __str__(self):  # pragma: no cover
    return str(self.ToDict())


161 162 163 164 165 166 167
class Measurement(object):
  """Represents a series of results of one trace.

  The results are from repetitive runs of the same executable. They are
  gathered by repeated calls to ConsumeOutput.
  """
  def __init__(self, graphs, units, results_regexp, stddev_regexp):
168
    self.name = '/'.join(graphs)
169 170 171 172 173 174 175
    self.graphs = graphs
    self.units = units
    self.results_regexp = results_regexp
    self.stddev_regexp = stddev_regexp
    self.results = []
    self.errors = []
    self.stddev = ""
176
    self.process_size = False
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

  def ConsumeOutput(self, stdout):
    try:
      result = re.search(self.results_regexp, stdout, re.M).group(1)
      self.results.append(str(float(result)))
    except ValueError:
      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
                         % (self.results_regexp, self.name))
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.results_regexp, self.name))

    try:
      if self.stddev_regexp and self.stddev:
        self.errors.append("Test %s should only run once since a stddev "
                           "is provided by the test." % self.name)
      if self.stddev_regexp:
        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.stddev_regexp, self.name))

  def GetResults(self):
    return Results([{
      "graphs": self.graphs,
      "units": self.units,
      "results": self.results,
      "stddev": self.stddev,
    }], self.errors)


208
class NullMeasurement(object):
209 210
  """Null object to avoid having extra logic for configurations that don't
  require secondary run, e.g. CI bots.
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
  """
  def ConsumeOutput(self, stdout):
    pass

  def GetResults(self):
    return Results()


def Unzip(iterable):
  left = []
  right = []
  for l, r in iterable:
    left.append(l)
    right.append(r)
  return lambda: iter(left), lambda: iter(right)


228 229 230 231 232 233 234 235 236 237 238 239 240 241
def RunResultsProcessor(results_processor, stdout, count):
  # Dummy pass through for null-runs.
  if stdout is None:
    return None

  # We assume the results processor is relative to the suite.
  assert os.path.exists(results_processor)
  p = subprocess.Popen(
      [sys.executable, results_processor],
      stdin=subprocess.PIPE,
      stdout=subprocess.PIPE,
      stderr=subprocess.PIPE,
  )
  result, _ = p.communicate(input=stdout)
242
  logging.info(">>> Processed stdout (#%d):\n%s", count, result)
243 244 245
  return result


246
def AccumulateResults(
247
    graph_names, trace_configs, iter_output, perform_measurement, calc_total):
248 249 250 251 252 253 254 255 256
  """Iterates over the output of multiple benchmark reruns and accumulates
  results for a configured list of traces.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    trace_configs: List of "TraceConfig" instances. Each trace config defines
                   how to perform a measurement.
    iter_output: Iterator over the standard output of each test run.
257 258 259 260
    perform_measurement: Whether to actually run tests and perform measurements.
                         This is needed so that we reuse this script for both CI
                         and trybot, but want to ignore second run on CI without
                         having to spread this logic throughout the script.
261 262 263
    calc_total: Boolean flag to speficy the calculation of a summary trace.
  Returns: A "Results" object.
  """
264
  measurements = [
265
    trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
  for stdout in iter_output():
    for measurement in measurements:
      measurement.ConsumeOutput(stdout)

  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())

  if not res.traces or not calc_total:
    return res

  # Assume all traces have the same structure.
  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
    res.errors.append("Not all traces have the same number of results.")
    return res

  # Calculate the geometric means for all traces. Above we made sure that
  # there is at least one trace and that the number of results is the same
  # for each trace.
  n_results = len(res.traces[0]["results"])
  total_results = [GeometricMean(t["results"][i] for t in res.traces)
                   for i in range(0, n_results)]
  res.traces.append({
    "graphs": graph_names + ["Total"],
    "units": res.traces[0]["units"],
    "results": total_results,
    "stddev": "",
  })
  return res


def AccumulateGenericResults(graph_names, suite_units, iter_output):
  """Iterates over the output of multiple benchmark reruns and accumulates
  generic results.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    suite_units: Measurement default units as defined by the benchmark suite.
    iter_output: Iterator over the standard output of each test run.
  Returns: A "Results" object.
  """
  traces = OrderedDict()
  for stdout in iter_output():
308 309 310
    if stdout is None:
      # The None value is used as a null object to simplify logic.
      continue
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
    for line in stdout.strip().splitlines():
      match = GENERIC_RESULTS_RE.match(line)
      if match:
        stddev = ""
        graph = match.group(1)
        trace = match.group(2)
        body = match.group(3)
        units = match.group(4)
        match_stddev = RESULT_STDDEV_RE.match(body)
        match_list = RESULT_LIST_RE.match(body)
        errors = []
        if match_stddev:
          result, stddev = map(str.strip, match_stddev.group(1).split(","))
          results = [result]
        elif match_list:
          results = map(str.strip, match_list.group(1).split(","))
        else:
          results = [body.strip()]

        try:
          results = map(lambda r: str(float(r)), results)
        except ValueError:
          results = []
          errors = ["Found non-numeric in %s" %
                    "/".join(graph_names + [graph, trace])]

        trace_result = traces.setdefault(trace, Results([{
          "graphs": graph_names + [graph, trace],
          "units": (units or suite_units).strip(),
          "results": [],
          "stddev": "",
        }], errors))
        trace_result.traces[0]["results"].extend(results)
        trace_result.traces[0]["stddev"] = stddev

  return reduce(lambda r, t: r + t, traces.itervalues(), Results())


349 350 351 352 353 354 355 356 357 358 359
class Node(object):
  """Represents a node in the suite tree structure."""
  def __init__(self, *args):
    self._children = []

  def AppendChild(self, child):
    self._children.append(child)


class DefaultSentinel(Node):
  """Fake parent node with all default values."""
360
  def __init__(self, binary = "d8"):
361
    super(DefaultSentinel, self).__init__()
362
    self.binary = binary
363
    self.run_count = 10
364
    self.timeout = 60
365 366 367
    self.path = []
    self.graphs = []
    self.flags = []
368
    self.test_flags = []
369
    self.process_size = False
370
    self.resources = []
371
    self.results_processor = None
372 373 374 375
    self.results_regexp = None
    self.stddev_regexp = None
    self.units = "score"
    self.total = False
376
    self.owners = []
377 378


379
class GraphConfig(Node):
380 381 382 383 384
  """Represents a suite definition.

  Can either be a leaf or an inner node that provides default values.
  """
  def __init__(self, suite, parent, arch):
385
    super(GraphConfig, self).__init__()
386 387 388
    self._suite = suite

    assert isinstance(suite.get("path", []), list)
389
    assert isinstance(suite.get("owners", []), list)
390 391
    assert isinstance(suite["name"], basestring)
    assert isinstance(suite.get("flags", []), list)
392
    assert isinstance(suite.get("test_flags", []), list)
393 394 395 396 397 398
    assert isinstance(suite.get("resources", []), list)

    # Accumulated values.
    self.path = parent.path[:] + suite.get("path", [])
    self.graphs = parent.graphs[:] + [suite["name"]]
    self.flags = parent.flags[:] + suite.get("flags", [])
399
    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
400
    self.owners = parent.owners[:] + suite.get("owners", [])
401 402 403

    # Values independent of parent node.
    self.resources = suite.get("resources", [])
404 405 406 407 408

    # Descrete values (with parent defaults).
    self.binary = suite.get("binary", parent.binary)
    self.run_count = suite.get("run_count", parent.run_count)
    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
409
    self.timeout = suite.get("timeout", parent.timeout)
410
    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
411 412
    self.units = suite.get("units", parent.units)
    self.total = suite.get("total", parent.total)
413 414
    self.results_processor = suite.get(
        "results_processor", parent.results_processor)
415
    self.process_size = suite.get("process_size", parent.process_size)
416 417 418 419 420 421 422

    # A regular expression for results. If the parent graph provides a
    # regexp and the current suite has none, a string place holder for the
    # suite name is expected.
    # TODO(machenbach): Currently that makes only sense for the leaf level.
    # Multiple place holders for multiple levels are not supported.
    if parent.results_regexp:
423
      regexp_default = parent.results_regexp % re.escape(suite["name"])
424 425 426 427 428 429 430 431 432 433 434 435
    else:
      regexp_default = None
    self.results_regexp = suite.get("results_regexp", regexp_default)

    # A similar regular expression for the standard deviation (optional).
    if parent.stddev_regexp:
      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
    else:
      stddev_default = None
    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)


436 437
class TraceConfig(GraphConfig):
  """Represents a leaf in the suite tree structure."""
438
  def __init__(self, suite, parent, arch):
439
    super(TraceConfig, self).__init__(suite, parent, arch)
440
    assert self.results_regexp
441
    assert self.owners
442

443 444
  def CreateMeasurement(self, perform_measurement):
    if not perform_measurement:
445 446
      return NullMeasurement()

447 448 449 450 451 452
    return Measurement(
        self.graphs,
        self.units,
        self.results_regexp,
        self.stddev_regexp,
    )
453 454


455
class RunnableConfig(GraphConfig):
456 457 458 459 460 461
  """Represents a runnable suite definition (i.e. has a main file).
  """
  @property
  def main(self):
    return self._suite.get("main", "")

462 463 464 465 466 467 468 469 470
  def PostProcess(self, stdouts_iter):
    if self.results_processor:
      def it():
        for i, stdout in enumerate(stdouts_iter()):
          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
      return it
    else:
      return stdouts_iter

471 472 473 474 475 476 477 478 479
  def ChangeCWD(self, suite_path):
    """Changes the cwd to to path defined in the current graph.

    The tests are supposed to be relative to the suite configuration.
    """
    suite_dir = os.path.abspath(os.path.dirname(suite_path))
    bench_dir = os.path.normpath(os.path.join(*self.path))
    os.chdir(os.path.join(suite_dir, bench_dir))

480
  def GetCommandFlags(self, extra_flags=None):
481
    suffix = ["--"] + self.test_flags if self.test_flags else []
482
    return self.flags + (extra_flags or []) + [self.main] + suffix
483

484
  def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
485
    # TODO(machenbach): This requires +.exe if run on windows.
486
    extra_flags = extra_flags or []
487
    if self.binary != 'd8' and '--prof' in extra_flags:
488
      logging.info("Profiler supported only on a benchmark run with d8")
489 490 491 492 493 494 495 496 497 498 499 500

    if self.process_size:
      cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
    if self.binary.endswith('.py'):
      # Copy cmd_prefix instead of update (+=).
      cmd_prefix = cmd_prefix + [sys.executable]

    return command.Command(
        cmd_prefix=cmd_prefix,
        shell=os.path.join(shell_dir, self.binary),
        args=self.GetCommandFlags(extra_flags=extra_flags),
        timeout=self.timeout or 60)
501

502
  def Run(self, runner, trybot):
503
    """Iterates over several runs and handles the output for all traces."""
504
    stdout, stdout_secondary = Unzip(runner())
505 506 507 508
    return (
        AccumulateResults(
            self.graphs,
            self._children,
509 510
            iter_output=self.PostProcess(stdout),
            perform_measurement=True,
511 512 513 514 515
            calc_total=self.total,
        ),
        AccumulateResults(
            self.graphs,
            self._children,
516 517
            iter_output=self.PostProcess(stdout_secondary),
            perform_measurement=trybot,  # only run second time on trybots
518 519 520
            calc_total=self.total,
        ),
    )
521

522 523

class RunnableTraceConfig(TraceConfig, RunnableConfig):
524 525
  """Represents a runnable suite definition that is a leaf."""
  def __init__(self, suite, parent, arch):
526
    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
527

528
  def Run(self, runner, trybot):
529
    """Iterates over several runs and handles the output."""
530 531 532 533 534
    measurement = self.CreateMeasurement(perform_measurement=True)
    measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
    for stdout, stdout_secondary in runner():
      measurement.ConsumeOutput(stdout)
      measurement_secondary.ConsumeOutput(stdout_secondary)
535
    return (
536 537
        measurement.GetResults(),
        measurement_secondary.GetResults(),
538
    )
539 540


541
class RunnableGenericConfig(RunnableConfig):
542 543
  """Represents a runnable suite definition with generic traces."""
  def __init__(self, suite, parent, arch):
544
    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
545

546
  def Run(self, runner, trybot):
547
    stdout, stdout_secondary = Unzip(runner())
548
    return (
549 550
        AccumulateGenericResults(self.graphs, self.units, stdout),
        AccumulateGenericResults(self.graphs, self.units, stdout_secondary),
551
    )
552 553 554 555 556


def MakeGraphConfig(suite, arch, parent):
  """Factory method for making graph configuration objects."""
  if isinstance(parent, RunnableConfig):
557
    # Below a runnable can only be traces.
558
    return TraceConfig(suite, parent, arch)
559 560
  elif suite.get("main") is not None:
    # A main file makes this graph runnable. Empty strings are accepted.
561 562
    if suite.get("tests"):
      # This graph has subgraphs (traces).
563
      return RunnableConfig(suite, parent, arch)
564 565
    else:
      # This graph has no subgraphs, it's a leaf.
566
      return RunnableTraceConfig(suite, parent, arch)
567 568 569
  elif suite.get("generic"):
    # This is a generic suite definition. It is either a runnable executable
    # or has a main js file.
570
    return RunnableGenericConfig(suite, parent, arch)
571 572
  elif suite.get("tests"):
    # This is neither a leaf nor a runnable.
573
    return GraphConfig(suite, parent, arch)
574 575 576 577
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


578
def BuildGraphConfigs(suite, arch, parent):
579 580 581 582 583
  """Builds a tree structure of graph objects that corresponds to the suite
  configuration.
  """

  # TODO(machenbach): Implement notion of cpu type?
584
  if arch not in suite.get("archs", SUPPORTED_ARCHS):
585 586
    return None

587
  graph = MakeGraphConfig(suite, arch, parent)
588
  for subsuite in suite.get("tests", []):
589
    BuildGraphConfigs(subsuite, arch, graph)
590 591 592 593
  parent.AppendChild(graph)
  return graph


594
def FlattenRunnables(node, node_cb):
595 596 597
  """Generator that traverses the tree structure and iterates over all
  runnables.
  """
598
  node_cb(node)
599
  if isinstance(node, RunnableConfig):
600 601 602
    yield node
  elif isinstance(node, Node):
    for child in node._children:
603
      for result in FlattenRunnables(child, node_cb):
604 605 606 607 608
        yield result
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


609
class Platform(object):
610 611
  def __init__(self, options):
    self.shell_dir = options.shell_dir
612
    self.shell_dir_secondary = options.shell_dir_secondary
613
    self.extra_flags = options.extra_flags.split()
614
    self.options = options
615

616 617 618 619 620 621 622 623
  @staticmethod
  def ReadBuildConfig(options):
    config_path = os.path.join(options.shell_dir, 'v8_build_config.json')
    if not os.path.isfile(config_path):
      return {}
    with open(config_path) as f:
      return json.load(f)

624 625
  @staticmethod
  def GetPlatform(options):
626
    if Platform.ReadBuildConfig(options).get('is_android', False):
627 628 629 630
      return AndroidPlatform(options)
    else:
      return DesktopPlatform(options)

631
  def _Run(self, runnable, count, secondary=False):
632 633 634 635 636
    raise NotImplementedError()  # pragma: no cover

  def Run(self, runnable, count):
    """Execute the benchmark's main file.

637 638
    If options.shell_dir_secondary is specified, the benchmark is run twice,
    e.g. with and without patch.
639 640 641
    Args:
      runnable: A Runnable benchmark instance.
      count: The number of this (repeated) run.
642 643
    Returns: A tuple with the two benchmark outputs. The latter will be None if
             options.shell_dir_secondary was not specified.
644
    """
645 646 647
    stdout = self._Run(runnable, count, secondary=False)
    if self.shell_dir_secondary:
      return stdout, self._Run(runnable, count, secondary=True)
648 649 650
    else:
      return stdout, None

651 652 653

class DesktopPlatform(Platform):
  def __init__(self, options):
654
    super(DesktopPlatform, self).__init__(options)
655 656
    self.command_prefix = []

657 658 659
    # Setup command class to OS specific version.
    command.setup(utils.GuessOS())

660 661 662 663 664 665 666 667 668 669 670 671 672
    if options.prioritize or options.affinitize != None:
      self.command_prefix = ["schedtool"]
      if options.prioritize:
        self.command_prefix += ["-n", "-20"]
      if options.affinitize != None:
      # schedtool expects a bit pattern when setting affinity, where each
      # bit set to '1' corresponds to a core where the process may run on.
      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
      # a core number, we need to map to said bit pattern.
        cpu = int(options.affinitize)
        core = 1 << cpu
        self.command_prefix += ["-a", ("0x%x" % core)]
      self.command_prefix += ["-e"]
673

674 675 676 677 678
  def PreExecution(self):
    pass

  def PostExecution(self):
    pass
679

680
  def PreTests(self, node, path):
681
    if isinstance(node, RunnableConfig):
682
      node.ChangeCWD(path)
683

684 685 686
  def _Run(self, runnable, count, secondary=False):
    suffix = ' - secondary' if secondary else ''
    shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
687
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
688
    cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
689
    try:
690
      output = cmd.execute()
691 692
    except OSError:  # pragma: no cover
      logging.exception(title % "OSError")
693
      return ""
694

695
    logging.info(title % "Stdout" + "\n%s", output.stdout)
696 697
    if output.stderr:  # pragma: no cover
      # Print stderr for debugging.
698
      logging.info(title % "Stderr" + "\n%s", output.stderr)
699
    if output.timed_out:
700
      logging.warning(">>> Test timed out after %ss.", runnable.timeout)
701
    if '--prof' in self.extra_flags:
702 703 704 705 706
      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
      if os_prefix:
        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
        subprocess.check_call(tick_tools + " --only-summary", shell=True)
      else:  # pragma: no cover
707 708
        logging.warning(
            "Profiler option currently supported on Linux and Mac OS.")
709 710 711 712

    # time outputs to stderr
    if runnable.process_size:
      return output.stdout + output.stderr
713 714 715
    return output.stdout


716 717
class AndroidPlatform(Platform):  # pragma: no cover

718
  def __init__(self, options):
719
    super(AndroidPlatform, self).__init__(options)
720
    self.driver = android.android_driver(options.device)
721 722

  def PreExecution(self):
723
    self.driver.set_high_perf_mode()
724

725
  def PostExecution(self):
726 727
    self.driver.set_default_perf_mode()
    self.driver.tear_down()
728

729
  def PreTests(self, node, path):
730 731
    if isinstance(node, RunnableConfig):
      node.ChangeCWD(path)
732 733 734 735 736 737 738 739
    suite_dir = os.path.abspath(os.path.dirname(path))
    if node.path:
      bench_rel = os.path.normpath(os.path.join(*node.path))
      bench_abs = os.path.join(suite_dir, bench_rel)
    else:
      bench_rel = "."
      bench_abs = suite_dir

740
    self.driver.push_executable(self.shell_dir, "bin", node.binary)
741
    if self.shell_dir_secondary:
742
      self.driver.push_executable(
743
          self.shell_dir_secondary, "bin_secondary", node.binary)
744

745
    if isinstance(node, RunnableConfig):
746
      self.driver.push_file(bench_abs, node.main, bench_rel)
747
    for resource in node.resources:
748
      self.driver.push_file(bench_abs, resource, bench_rel)
749

750 751 752
  def _Run(self, runnable, count, secondary=False):
    suffix = ' - secondary' if secondary else ''
    target_dir = "bin_secondary" if secondary else "bin"
753
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
754
    self.driver.drop_ram_caches()
755 756 757 758 759 760 761

    # Relative path to benchmark directory.
    if runnable.path:
      bench_rel = os.path.normpath(os.path.join(*runnable.path))
    else:
      bench_rel = "."

762 763 764 765 766 767 768 769
    logcat_file = None
    if self.options.dump_logcats_to:
      runnable_name = '-'.join(runnable.graphs)
      logcat_file = os.path.join(
          self.options.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
            runnable_name, count + 1, '-secondary' if secondary else ''))
      logging.debug('Dumping logcat into %s', logcat_file)

770
    try:
771 772 773 774 775
      stdout = self.driver.run(
          target_dir=target_dir,
          binary=runnable.binary,
          args=runnable.GetCommandFlags(self.extra_flags),
          rel_path=bench_rel,
776
          timeout=runnable.timeout,
777
          logcat_file=logcat_file,
778
      )
779
      logging.info(title % "Stdout" + "\n%s", stdout)
780 781 782
    except android.CommandFailedException as e:
      logging.info(title % "Stdout" + "\n%s", e.output)
      raise
783
    except android.TimeoutException:
784
      logging.warning(">>> Test timed out after %ss.", runnable.timeout)
785
      stdout = ""
786 787
    if runnable.process_size:
      return stdout + "MaxMemory: Unsupported"
788
    return stdout
789

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
class CustomMachineConfiguration:
  def __init__(self, disable_aslr = False, governor = None):
    self.aslr_backup = None
    self.governor_backup = None
    self.disable_aslr = disable_aslr
    self.governor = governor

  def __enter__(self):
    if self.disable_aslr:
      self.aslr_backup = CustomMachineConfiguration.GetASLR()
      CustomMachineConfiguration.SetASLR(0)
    if self.governor != None:
      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
      CustomMachineConfiguration.SetCPUGovernor(self.governor)
    return self

  def __exit__(self, type, value, traceback):
    if self.aslr_backup != None:
      CustomMachineConfiguration.SetASLR(self.aslr_backup)
    if self.governor_backup != None:
      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)

  @staticmethod
  def GetASLR():
    try:
      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
        return int(f.readline().strip())
817 818 819
    except Exception:
      logging.exception("Failed to get current ASLR settings.")
      raise
820 821 822 823 824 825

  @staticmethod
  def SetASLR(value):
    try:
      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
        f.write(str(value))
826 827 828 829
    except Exception:
      logging.exception(
          "Failed to update ASLR to %s. Are we running under sudo?", value)
      raise
830 831 832 833 834 835 836 837 838 839

    new_value = CustomMachineConfiguration.GetASLR()
    if value != new_value:
      raise Exception("Present value is %s" % new_value)

  @staticmethod
  def GetCPUCoresRange():
    try:
      with open("/sys/devices/system/cpu/present", "r") as f:
        indexes = f.readline()
840 841 842 843
        r = map(int, indexes.split("-"))
        if len(r) == 1:
          return range(r[0], r[0] + 1)
        return range(r[0], r[1] + 1)
844 845 846
    except Exception:
      logging.exception("Failed to retrieve number of CPUs.")
      raise
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869

  @staticmethod
  def GetCPUPathForId(cpu_index):
    ret = "/sys/devices/system/cpu/cpu"
    ret += str(cpu_index)
    ret += "/cpufreq/scaling_governor"
    return ret

  @staticmethod
  def GetCPUGovernor():
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      ret = None
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "r") as f:
          # We assume the governors of all CPUs are set to the same value
          val = f.readline().strip()
          if ret == None:
            ret = val
          elif ret != val:
            raise Exception("CPU cores have differing governor settings")
      return ret
870 871 872 873
    except Exception:
      logging.exception("Failed to get the current CPU governor. Is the CPU "
                        "governor disabled? Check BIOS.")
      raise
874 875 876 877 878 879 880 881 882 883

  @staticmethod
  def SetCPUGovernor(value):
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "w") as f:
          f.write(value)

884 885 886 887
    except Exception:
      logging.exception("Failed to change CPU governor to %s. Are we "
                        "running under sudo?", value)
      raise
888 889 890 891 892

    cur_value = CustomMachineConfiguration.GetCPUGovernor()
    if cur_value != value:
      raise Exception("Could not set CPU governor. Present value is %s"
                      % cur_value )
893

894 895
def Main(args):
  parser = optparse.OptionParser()
896
  parser.add_option("--android-build-tools", help="Deprecated.")
897 898 899 900 901
  parser.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="x64")
  parser.add_option("--buildbot",
902 903
                    help="Adapt to path structure used on buildbots and adds "
                         "timestamps/level to all logged status messages",
904
                    default=False, action="store_true")
905 906 907
  parser.add_option("--device",
                    help="The device ID to run Android tests on. If not given "
                         "it will be autodetected.")
908 909 910
  parser.add_option("--extra-flags",
                    help="Additional flags to pass to the test executable",
                    default="")
911 912
  parser.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
913 914
  parser.add_option("--json-test-results-secondary",
                    "--json-test-results-no-patch",  # TODO(sergiyb): Deprecate.
915
                    help="Path to a file for storing json results from run "
916
                         "without patch or for reference build run.")
917 918
  parser.add_option("--outdir", help="Base directory with compile output",
                    default="out")
919 920 921 922
  parser.add_option("--outdir-secondary",
                    "--outdir-no-patch",  # TODO(sergiyb): Deprecate.
                    help="Base directory with compile output without patch or "
                         "for reference build")
923 924 925
  parser.add_option("--binary-override-path",
                    help="JavaScript engine binary. By default, d8 under "
                    "architecture-specific build dir. "
926
                    "Not supported in conjunction with outdir-secondary.")
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
  parser.add_option("--prioritize",
                    help="Raise the priority to nice -20 for the benchmarking "
                    "process.Requires Linux, schedtool, and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--affinitize",
                    help="Run benchmarking process on the specified core. "
                    "For example: "
                    "--affinitize=0 will run the benchmark process on core 0. "
                    "--affinitize=3 will run the benchmark process on core 3. "
                    "Requires Linux, schedtool, and sudo privileges.",
                    default=None)
  parser.add_option("--noaslr",
                    help="Disable ASLR for the duration of the benchmarked "
                    "process. Requires Linux and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--cpu-governor",
                    help="Set cpu governor to specified policy for the "
                    "duration of the benchmarked process. Typical options: "
                    "'powersave' for more stable results, or 'performance' "
                    "for shorter completion time of suite, with potentially "
                    "more noise in results.")
948 949 950 951 952 953
  parser.add_option("--filter",
                    help="Only run the benchmarks beginning with this string. "
                    "For example: "
                    "--filter=JSTests/TypedArrays/ will run only TypedArray "
                    "benchmarks from the JSTests suite.",
                    default="")
954 955 956
  parser.add_option("--run-count-multiplier", default=1, type="int",
                    help="Multipled used to increase number of times each test "
                    "is retried.")
957 958 959
  parser.add_option("--dump-logcats-to",
                    help="Writes logcat output from each test into specified "
                    "directory. Only supported for android targets.")
960

961 962
  (options, args) = parser.parse_args(args)

963 964 965 966 967 968
  if options.buildbot:
    logging.basicConfig(
        level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
  else:
    logging.basicConfig(level=logging.INFO, format="%(message)s")

969 970 971 972 973 974 975 976
  if len(args) == 0:  # pragma: no cover
    parser.print_help()
    return 1

  if options.arch in ["auto", "native"]:  # pragma: no cover
    options.arch = ARCH_GUESS

  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
977
    logging.error("Unknown architecture %s", options.arch)
978 979
    return 1

980 981
  if (options.json_test_results_secondary and
      not options.outdir_secondary):  # pragma: no cover
982 983
    logging.error("For writing secondary json test results, a secondary outdir "
                  "patch must be specified.")
984 985
    return 1

986 987 988
  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

  if options.buildbot:
989
    build_config = "Release"
990
  else:
991 992
    build_config = "%s.release" % options.arch

993 994 995 996 997
  if options.binary_override_path == None:
    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
    default_binary_name = "d8"
  else:
    if not os.path.isfile(options.binary_override_path):
998
      logging.error("binary-override-path must be a file name")
999
      return 1
1000
    if options.outdir_secondary:
1001
      logging.error("specify either binary-override-path or outdir-secondary")
1002
      return 1
1003 1004
    options.shell_dir = os.path.abspath(
        os.path.dirname(options.binary_override_path))
1005
    default_binary_name = os.path.basename(options.binary_override_path)
1006

1007 1008 1009
  if options.outdir_secondary:
    options.shell_dir_secondary = os.path.join(
        workspace, options.outdir_secondary, build_config)
1010
  else:
1011
    options.shell_dir_secondary = None
1012

1013 1014 1015
  if options.json_test_results:
    options.json_test_results = os.path.abspath(options.json_test_results)

1016 1017 1018
  if options.json_test_results_secondary:
    options.json_test_results_secondary = os.path.abspath(
        options.json_test_results_secondary)
1019 1020 1021 1022 1023

  # Ensure all arguments have absolute path before we start changing current
  # directory.
  args = map(os.path.abspath, args)

1024 1025
  prev_aslr = None
  prev_cpu_gov = None
1026
  platform = Platform.GetPlatform(options)
1027 1028

  results = Results()
1029
  results_secondary = Results()
1030 1031 1032 1033 1034 1035
  with CustomMachineConfiguration(governor = options.cpu_governor,
                                  disable_aslr = options.noaslr) as conf:
    for path in args:
      if not os.path.exists(path):  # pragma: no cover
        results.errors.append("Configuration file %s does not exist." % path)
        continue
1036

1037 1038
      with open(path) as f:
        suite = json.loads(f.read())
1039

1040 1041
      # If no name is given, default to the file name without .json.
      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
1042

1043 1044
      # Setup things common to one test suite.
      platform.PreExecution()
1045

1046 1047 1048
      # Build the graph/trace tree structure.
      default_parent = DefaultSentinel(default_binary_name)
      root = BuildGraphConfigs(suite, options.arch, default_parent)
1049

1050 1051 1052
      # Callback to be called on each node on traversal.
      def NodeCB(node):
        platform.PreTests(node, path)
1053

1054
      # Traverse graph/trace tree and iterate over all runnables.
1055
      for runnable in FlattenRunnables(root, NodeCB):
1056
        runnable_name = "/".join(runnable.graphs)
1057 1058
        if (not runnable_name.startswith(options.filter) and
            runnable_name + "/" != options.filter):
1059
          continue
1060
        logging.info(">>> Running suite: %s", runnable_name)
1061

1062 1063
        def Runner():
          """Output generator that reruns several times."""
1064 1065
          total_runs = runnable.run_count * options.run_count_multiplier
          for i in xrange(0, max(1, total_runs)):
1066 1067 1068
            # TODO(machenbach): Allow timeout per arch like with run_count per
            # arch.
            yield platform.Run(runnable, i)
1069

1070
        # Let runnable iterate over all runs and handle output.
1071 1072
        result, result_secondary = runnable.Run(
          Runner, trybot=options.shell_dir_secondary)
1073
        results += result
1074
        results_secondary += result_secondary
1075 1076 1077 1078 1079 1080
      platform.PostExecution()

    if options.json_test_results:
      results.WriteToFile(options.json_test_results)
    else:  # pragma: no cover
      print results
1081

1082 1083
  if options.json_test_results_secondary:
    results_secondary.WriteToFile(options.json_test_results_secondary)
1084
  else:  # pragma: no cover
1085
    print results_secondary
1086

1087 1088 1089 1090
  return min(1, len(results.errors))

if __name__ == "__main__":  # pragma: no cover
  sys.exit(Main(sys.argv[1:]))