run_perf.py 38.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
Performance runner for d8.

Call e.g. with tools/run-perf.py --arch ia32 some_suite.json

The suite json format is expected to be:
{
  "path": <relative path chunks to perf resources and main file>,
  "name": <optional suite name, file name is default>,
  "archs": [<architecture name for which this suite is run>, ...],
  "binary": <name of binary to run, default "d8">,
  "flags": [<flag to d8>, ...],
18
  "test_flags": [<flag to the test file>, ...],
19 20
  "run_count": <how often will this suite run (optional)>,
  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
21
  "resources": [<js file to be moved to android device>, ...]
22 23 24 25
  "main": <main js perf runner file>,
  "results_regexp": <optional regexp>,
  "results_processor": <optional python results processor script>,
  "units": <the unit specification for the performance dashboard>,
26
  "process_size": <flag - collect maximum memory used by the process>,
27 28 29 30 31 32
  "tests": [
    {
      "name": <name of the trace>,
      "results_regexp": <optional more specific regexp>,
      "results_processor": <optional python results processor script>,
      "units": <the unit specification for the performance dashboard>,
33
      "process_size": <flag - collect maximum memory used by the process>,
34 35 36 37 38 39 40 41 42 43 44 45 46
    }, ...
  ]
}

The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.

A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.

A suite's results_processor may point to an optional python script. If
47 48 49
specified, it is called after running the tests (with a path relative to the
suite level's path). It is expected to read the measurement's output text
on stdin and print the processed output to stdout.
50

51
The results_regexp will be applied to the processed output.
52 53 54 55 56 57 58

A suite without "tests" is considered a performance test itself.

Full example (suite with one runner):
{
  "path": ["."],
  "flags": ["--expose-gc"],
59
  "test_flags": ["5"],
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "run_count_ia32": 3,
  "main": "run.js",
  "results_regexp": "^%s: (.+)$",
  "units": "score",
  "tests": [
    {"name": "Richards"},
    {"name": "DeltaBlue"},
    {"name": "NavierStokes",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Full example (suite with several runners):
{
  "path": ["."],
  "flags": ["--expose-gc"],
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "units": "score",
  "tests": [
    {"name": "Richards",
     "path": ["richards"],
     "main": "run.js",
     "run_count": 3,
     "results_regexp": "^Richards: (.+)$"},
    {"name": "NavierStokes",
     "path": ["navier_stokes"],
     "main": "run.js",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Path pieces are concatenated. D8 is always run with the suite's path as cwd.
95 96

The test flags are passed to the js test file after '--'.
97 98
"""

99
from collections import OrderedDict
100
import json
101
import logging
102 103 104 105
import math
import optparse
import os
import re
106
import subprocess
107 108 109 110 111 112
import sys

from testrunner.local import commands
from testrunner.local import utils

ARCH_GUESS = utils.DefaultArch()
113
SUPPORTED_ARCHS = ["arm",
114 115 116 117 118 119
                   "ia32",
                   "mips",
                   "mipsel",
                   "x64",
                   "arm64"]

120 121 122
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
123
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
124

125

126 127 128 129
def LoadAndroidBuildTools(path):  # pragma: no cover
  assert os.path.exists(path)
  sys.path.insert(0, path)

130 131 132 133 134 135
  import devil_chromium
  from devil.android import device_errors  # pylint: disable=import-error
  from devil.android import device_utils  # pylint: disable=import-error
  from devil.android.sdk import adb_wrapper  # pylint: disable=import-error
  from devil.android.perf import cache_control  # pylint: disable=import-error
  from devil.android.perf import perf_control  # pylint: disable=import-error
136
  global adb_wrapper
137 138 139 140 141
  global cache_control
  global device_errors
  global device_utils
  global perf_control

142 143
  devil_chromium.Initialize()

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

def GeometricMean(values):
  """Returns the geometric mean of a list of values.

  The mean is calculated using log to avoid overflow.
  """
  values = map(float, values)
  return str(math.exp(sum(map(math.log, values)) / len(values)))


class Results(object):
  """Place holder for result traces."""
  def __init__(self, traces=None, errors=None):
    self.traces = traces or []
    self.errors = errors or []

  def ToDict(self):
    return {"traces": self.traces, "errors": self.errors}

  def WriteToFile(self, file_name):
    with open(file_name, "w") as f:
      f.write(json.dumps(self.ToDict()))

  def __add__(self, other):
    self.traces += other.traces
    self.errors += other.errors
    return self

  def __str__(self):  # pragma: no cover
    return str(self.ToDict())


176 177 178 179 180 181 182
class Measurement(object):
  """Represents a series of results of one trace.

  The results are from repetitive runs of the same executable. They are
  gathered by repeated calls to ConsumeOutput.
  """
  def __init__(self, graphs, units, results_regexp, stddev_regexp):
183
    self.name = '/'.join(graphs)
184 185 186 187 188 189 190
    self.graphs = graphs
    self.units = units
    self.results_regexp = results_regexp
    self.stddev_regexp = stddev_regexp
    self.results = []
    self.errors = []
    self.stddev = ""
191
    self.process_size = False
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222

  def ConsumeOutput(self, stdout):
    try:
      result = re.search(self.results_regexp, stdout, re.M).group(1)
      self.results.append(str(float(result)))
    except ValueError:
      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
                         % (self.results_regexp, self.name))
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.results_regexp, self.name))

    try:
      if self.stddev_regexp and self.stddev:
        self.errors.append("Test %s should only run once since a stddev "
                           "is provided by the test." % self.name)
      if self.stddev_regexp:
        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.stddev_regexp, self.name))

  def GetResults(self):
    return Results([{
      "graphs": self.graphs,
      "units": self.units,
      "results": self.results,
      "stddev": self.stddev,
    }], self.errors)


223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
class NullMeasurement(object):
  """Null object to avoid having extra logic for configurations that didn't
  run like running without patch on trybots.
  """
  def ConsumeOutput(self, stdout):
    pass

  def GetResults(self):
    return Results()


def Unzip(iterable):
  left = []
  right = []
  for l, r in iterable:
    left.append(l)
    right.append(r)
  return lambda: iter(left), lambda: iter(right)


243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
def RunResultsProcessor(results_processor, stdout, count):
  # Dummy pass through for null-runs.
  if stdout is None:
    return None

  # We assume the results processor is relative to the suite.
  assert os.path.exists(results_processor)
  p = subprocess.Popen(
      [sys.executable, results_processor],
      stdin=subprocess.PIPE,
      stdout=subprocess.PIPE,
      stderr=subprocess.PIPE,
  )
  result, _ = p.communicate(input=stdout)
  print ">>> Processed stdout (#%d):" % count
  print result
  return result


262 263
def AccumulateResults(
    graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
264 265 266 267 268 269 270 271 272
  """Iterates over the output of multiple benchmark reruns and accumulates
  results for a configured list of traces.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    trace_configs: List of "TraceConfig" instances. Each trace config defines
                   how to perform a measurement.
    iter_output: Iterator over the standard output of each test run.
273 274 275
    trybot: Indicates that this is run in trybot mode, i.e. run twice, once
            with once without patch.
    no_patch: Indicates weather this is a trybot run without patch.
276 277 278
    calc_total: Boolean flag to speficy the calculation of a summary trace.
  Returns: A "Results" object.
  """
279 280
  measurements = [
    trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
  for stdout in iter_output():
    for measurement in measurements:
      measurement.ConsumeOutput(stdout)

  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())

  if not res.traces or not calc_total:
    return res

  # Assume all traces have the same structure.
  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
    res.errors.append("Not all traces have the same number of results.")
    return res

  # Calculate the geometric means for all traces. Above we made sure that
  # there is at least one trace and that the number of results is the same
  # for each trace.
  n_results = len(res.traces[0]["results"])
  total_results = [GeometricMean(t["results"][i] for t in res.traces)
                   for i in range(0, n_results)]
  res.traces.append({
    "graphs": graph_names + ["Total"],
    "units": res.traces[0]["units"],
    "results": total_results,
    "stddev": "",
  })
  return res


def AccumulateGenericResults(graph_names, suite_units, iter_output):
  """Iterates over the output of multiple benchmark reruns and accumulates
  generic results.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    suite_units: Measurement default units as defined by the benchmark suite.
    iter_output: Iterator over the standard output of each test run.
  Returns: A "Results" object.
  """
  traces = OrderedDict()
  for stdout in iter_output():
323 324 325
    if stdout is None:
      # The None value is used as a null object to simplify logic.
      continue
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
    for line in stdout.strip().splitlines():
      match = GENERIC_RESULTS_RE.match(line)
      if match:
        stddev = ""
        graph = match.group(1)
        trace = match.group(2)
        body = match.group(3)
        units = match.group(4)
        match_stddev = RESULT_STDDEV_RE.match(body)
        match_list = RESULT_LIST_RE.match(body)
        errors = []
        if match_stddev:
          result, stddev = map(str.strip, match_stddev.group(1).split(","))
          results = [result]
        elif match_list:
          results = map(str.strip, match_list.group(1).split(","))
        else:
          results = [body.strip()]

        try:
          results = map(lambda r: str(float(r)), results)
        except ValueError:
          results = []
          errors = ["Found non-numeric in %s" %
                    "/".join(graph_names + [graph, trace])]

        trace_result = traces.setdefault(trace, Results([{
          "graphs": graph_names + [graph, trace],
          "units": (units or suite_units).strip(),
          "results": [],
          "stddev": "",
        }], errors))
        trace_result.traces[0]["results"].extend(results)
        trace_result.traces[0]["stddev"] = stddev

  return reduce(lambda r, t: r + t, traces.itervalues(), Results())


364 365 366 367 368 369 370 371 372 373 374
class Node(object):
  """Represents a node in the suite tree structure."""
  def __init__(self, *args):
    self._children = []

  def AppendChild(self, child):
    self._children.append(child)


class DefaultSentinel(Node):
  """Fake parent node with all default values."""
375
  def __init__(self, binary = "d8"):
376
    super(DefaultSentinel, self).__init__()
377
    self.binary = binary
378
    self.run_count = 10
379
    self.timeout = 60
380 381 382
    self.path = []
    self.graphs = []
    self.flags = []
383
    self.test_flags = []
384
    self.process_size = False
385
    self.resources = []
386
    self.results_processor = None
387 388 389 390 391 392
    self.results_regexp = None
    self.stddev_regexp = None
    self.units = "score"
    self.total = False


393
class GraphConfig(Node):
394 395 396 397 398
  """Represents a suite definition.

  Can either be a leaf or an inner node that provides default values.
  """
  def __init__(self, suite, parent, arch):
399
    super(GraphConfig, self).__init__()
400 401 402 403 404
    self._suite = suite

    assert isinstance(suite.get("path", []), list)
    assert isinstance(suite["name"], basestring)
    assert isinstance(suite.get("flags", []), list)
405
    assert isinstance(suite.get("test_flags", []), list)
406 407 408 409 410 411
    assert isinstance(suite.get("resources", []), list)

    # Accumulated values.
    self.path = parent.path[:] + suite.get("path", [])
    self.graphs = parent.graphs[:] + [suite["name"]]
    self.flags = parent.flags[:] + suite.get("flags", [])
412
    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
413 414 415

    # Values independent of parent node.
    self.resources = suite.get("resources", [])
416 417 418 419 420

    # Descrete values (with parent defaults).
    self.binary = suite.get("binary", parent.binary)
    self.run_count = suite.get("run_count", parent.run_count)
    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
421
    self.timeout = suite.get("timeout", parent.timeout)
422
    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
423 424
    self.units = suite.get("units", parent.units)
    self.total = suite.get("total", parent.total)
425 426
    self.results_processor = suite.get(
        "results_processor", parent.results_processor)
427
    self.process_size = suite.get("process_size", parent.process_size)
428 429 430 431 432 433 434

    # A regular expression for results. If the parent graph provides a
    # regexp and the current suite has none, a string place holder for the
    # suite name is expected.
    # TODO(machenbach): Currently that makes only sense for the leaf level.
    # Multiple place holders for multiple levels are not supported.
    if parent.results_regexp:
435
      regexp_default = parent.results_regexp % re.escape(suite["name"])
436 437 438 439 440 441 442 443 444 445 446 447
    else:
      regexp_default = None
    self.results_regexp = suite.get("results_regexp", regexp_default)

    # A similar regular expression for the standard deviation (optional).
    if parent.stddev_regexp:
      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
    else:
      stddev_default = None
    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)


448 449
class TraceConfig(GraphConfig):
  """Represents a leaf in the suite tree structure."""
450
  def __init__(self, suite, parent, arch):
451
    super(TraceConfig, self).__init__(suite, parent, arch)
452 453
    assert self.results_regexp

454 455 456 457 458
  def CreateMeasurement(self, trybot, no_patch):
    if not trybot and no_patch:
      # Use null object for no-patch logic if this is not a trybot run.
      return NullMeasurement()

459 460 461 462 463 464
    return Measurement(
        self.graphs,
        self.units,
        self.results_regexp,
        self.stddev_regexp,
    )
465 466


467
class RunnableConfig(GraphConfig):
468 469 470 471 472 473
  """Represents a runnable suite definition (i.e. has a main file).
  """
  @property
  def main(self):
    return self._suite.get("main", "")

474 475 476 477 478 479 480 481 482
  def PostProcess(self, stdouts_iter):
    if self.results_processor:
      def it():
        for i, stdout in enumerate(stdouts_iter()):
          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
      return it
    else:
      return stdouts_iter

483 484 485 486 487 488 489 490 491
  def ChangeCWD(self, suite_path):
    """Changes the cwd to to path defined in the current graph.

    The tests are supposed to be relative to the suite configuration.
    """
    suite_dir = os.path.abspath(os.path.dirname(suite_path))
    bench_dir = os.path.normpath(os.path.join(*self.path))
    os.chdir(os.path.join(suite_dir, bench_dir))

492
  def GetCommandFlags(self, extra_flags=None):
493
    suffix = ["--"] + self.test_flags if self.test_flags else []
494
    return self.flags + (extra_flags or []) + [self.main] + suffix
495

496
  def GetCommand(self, shell_dir, extra_flags=None):
497
    # TODO(machenbach): This requires +.exe if run on windows.
498
    extra_flags = extra_flags or []
499
    cmd = [os.path.join(shell_dir, self.binary)]
500 501
    if self.binary.endswith(".py"):
      cmd = [sys.executable] + cmd
502
    if self.binary != 'd8' and '--prof' in extra_flags:
503
      print "Profiler supported only on a benchmark run with d8"
504
    return cmd + self.GetCommandFlags(extra_flags=extra_flags)
505

506
  def Run(self, runner, trybot):
507
    """Iterates over several runs and handles the output for all traces."""
508 509 510 511 512
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateResults(
            self.graphs,
            self._children,
513
            iter_output=self.PostProcess(stdout_with_patch),
514 515 516 517 518 519 520
            trybot=trybot,
            no_patch=False,
            calc_total=self.total,
        ),
        AccumulateResults(
            self.graphs,
            self._children,
521
            iter_output=self.PostProcess(stdout_no_patch),
522 523 524 525 526
            trybot=trybot,
            no_patch=True,
            calc_total=self.total,
        ),
    )
527

528 529

class RunnableTraceConfig(TraceConfig, RunnableConfig):
530 531
  """Represents a runnable suite definition that is a leaf."""
  def __init__(self, suite, parent, arch):
532
    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
533

534
  def Run(self, runner, trybot):
535
    """Iterates over several runs and handles the output."""
536 537 538 539 540 541 542 543 544
    measurement_with_patch = self.CreateMeasurement(trybot, False)
    measurement_no_patch = self.CreateMeasurement(trybot, True)
    for stdout_with_patch, stdout_no_patch in runner():
      measurement_with_patch.ConsumeOutput(stdout_with_patch)
      measurement_no_patch.ConsumeOutput(stdout_no_patch)
    return (
        measurement_with_patch.GetResults(),
        measurement_no_patch.GetResults(),
    )
545 546


547
class RunnableGenericConfig(RunnableConfig):
548 549
  """Represents a runnable suite definition with generic traces."""
  def __init__(self, suite, parent, arch):
550
    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
551

552 553 554 555 556 557
  def Run(self, runner, trybot):
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
        AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
    )
558 559 560 561 562


def MakeGraphConfig(suite, arch, parent):
  """Factory method for making graph configuration objects."""
  if isinstance(parent, RunnableConfig):
563
    # Below a runnable can only be traces.
564
    return TraceConfig(suite, parent, arch)
565 566
  elif suite.get("main") is not None:
    # A main file makes this graph runnable. Empty strings are accepted.
567 568
    if suite.get("tests"):
      # This graph has subgraphs (traces).
569
      return RunnableConfig(suite, parent, arch)
570 571
    else:
      # This graph has no subgraphs, it's a leaf.
572
      return RunnableTraceConfig(suite, parent, arch)
573 574 575
  elif suite.get("generic"):
    # This is a generic suite definition. It is either a runnable executable
    # or has a main js file.
576
    return RunnableGenericConfig(suite, parent, arch)
577 578
  elif suite.get("tests"):
    # This is neither a leaf nor a runnable.
579
    return GraphConfig(suite, parent, arch)
580 581 582 583
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


584
def BuildGraphConfigs(suite, arch, parent):
585 586 587 588 589
  """Builds a tree structure of graph objects that corresponds to the suite
  configuration.
  """

  # TODO(machenbach): Implement notion of cpu type?
590
  if arch not in suite.get("archs", SUPPORTED_ARCHS):
591 592
    return None

593
  graph = MakeGraphConfig(suite, arch, parent)
594
  for subsuite in suite.get("tests", []):
595
    BuildGraphConfigs(subsuite, arch, graph)
596 597 598 599
  parent.AppendChild(graph)
  return graph


600
def FlattenRunnables(node, node_cb):
601 602 603
  """Generator that traverses the tree structure and iterates over all
  runnables.
  """
604
  node_cb(node)
605
  if isinstance(node, RunnableConfig):
606 607 608
    yield node
  elif isinstance(node, Node):
    for child in node._children:
609
      for result in FlattenRunnables(child, node_cb):
610 611 612 613 614
        yield result
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


615
class Platform(object):
616 617
  def __init__(self, options):
    self.shell_dir = options.shell_dir
618
    self.shell_dir_no_patch = options.shell_dir_no_patch
619 620
    self.extra_flags = options.extra_flags.split()

621 622
  @staticmethod
  def GetPlatform(options):
623
    if options.android_build_tools:
624 625 626 627
      return AndroidPlatform(options)
    else:
      return DesktopPlatform(options)

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
  def _Run(self, runnable, count, no_patch=False):
    raise NotImplementedError()  # pragma: no cover

  def Run(self, runnable, count):
    """Execute the benchmark's main file.

    If options.shell_dir_no_patch is specified, the benchmark is run once with
    and once without patch.
    Args:
      runnable: A Runnable benchmark instance.
      count: The number of this (repeated) run.
    Returns: A tuple with the benchmark outputs with and without patch. The
             latter will be None if options.shell_dir_no_patch was not
             specified.
    """
    stdout = self._Run(runnable, count, no_patch=False)
    if self.shell_dir_no_patch:
      return stdout, self._Run(runnable, count, no_patch=True)
    else:
      return stdout, None

649 650 651

class DesktopPlatform(Platform):
  def __init__(self, options):
652
    super(DesktopPlatform, self).__init__(options)
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
    self.command_prefix = []

    if options.prioritize or options.affinitize != None:
      self.command_prefix = ["schedtool"]
      if options.prioritize:
        self.command_prefix += ["-n", "-20"]
      if options.affinitize != None:
      # schedtool expects a bit pattern when setting affinity, where each
      # bit set to '1' corresponds to a core where the process may run on.
      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
      # a core number, we need to map to said bit pattern.
        cpu = int(options.affinitize)
        core = 1 << cpu
        self.command_prefix += ["-a", ("0x%x" % core)]
      self.command_prefix += ["-e"]
668

669 670 671 672 673
  def PreExecution(self):
    pass

  def PostExecution(self):
    pass
674

675
  def PreTests(self, node, path):
676
    if isinstance(node, RunnableConfig):
677
      node.ChangeCWD(path)
678

679 680 681 682
  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
683 684 685 686 687 688
    if runnable.process_size:
      command = ["/usr/bin/time", "--format=MaxMemory: %MKB"]
    else:
      command = []

    command += self.command_prefix + runnable.GetCommand(shell_dir,
689
                                                        self.extra_flags)
690
    try:
691
      output = commands.Execute(
692 693
        command,
        timeout=runnable.timeout,
694
      )
695 696
    except OSError as e:  # pragma: no cover
      print title % "OSError"
697 698
      print e
      return ""
699

700 701
    print title % "Stdout"
    print output.stdout
702 703
    if output.stderr:  # pragma: no cover
      # Print stderr for debugging.
704
      print title % "Stderr"
705 706 707
      print output.stderr
    if output.timed_out:
      print ">>> Test timed out after %ss." % runnable.timeout
708
    if '--prof' in self.extra_flags:
709 710 711 712 713
      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
      if os_prefix:
        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
        subprocess.check_call(tick_tools + " --only-summary", shell=True)
      else:  # pragma: no cover
714
        print "Profiler option currently supported on Linux and Mac OS."
715 716 717 718

    # time outputs to stderr
    if runnable.process_size:
      return output.stdout + output.stderr
719 720 721
    return output.stdout


722 723 724
class AndroidPlatform(Platform):  # pragma: no cover
  DEVICE_DIR = "/data/local/tmp/v8/"

725
  def __init__(self, options):
726
    super(AndroidPlatform, self).__init__(options)
727 728 729 730
    LoadAndroidBuildTools(options.android_build_tools)

    if not options.device:
      # Detect attached device if not specified.
731
      devices = adb_wrapper.AdbWrapper.Devices()
732 733 734
      assert devices and len(devices) == 1, (
          "None or multiple devices detected. Please specify the device on "
          "the command-line with --device")
735 736 737
      options.device = str(devices[0])
    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
    self.device = device_utils.DeviceUtils(self.adb_wrapper)
738 739 740 741 742

  def PreExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetHighPerfMode()

743 744 745
    # Remember what we have already pushed to the device.
    self.pushed = set()

746 747 748
  def PostExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetDefaultPerfMode()
749 750
    self.device.RemovePath(
        AndroidPlatform.DEVICE_DIR, force=True, recursive=True)
751

752 753
  def _PushFile(self, host_dir, file_name, target_rel=".",
                skip_if_missing=False):
754
    file_on_host = os.path.join(host_dir, file_name)
755 756
    file_on_device_tmp = os.path.join(
        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
757 758
    file_on_device = os.path.join(
        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
759
    folder_on_device = os.path.dirname(file_on_device)
760

761 762 763 764 765 766
    # Only attempt to push files that exist.
    if not os.path.exists(file_on_host):
      if not skip_if_missing:
        logging.critical('Missing file on host: %s' % file_on_host)
      return

767 768 769 770 771 772
    # Only push files not yet pushed in one execution.
    if file_on_host in self.pushed:
      return
    else:
      self.pushed.add(file_on_host)

773
    # Work-around for "text file busy" errors. Push the files to a temporary
774
    # location and then copy them with a shell command.
775
    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
776 777 778 779
    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
    # Errors look like this: "failed to copy  ... ".
    if output and not re.search('^[0-9]', output.splitlines()[-1]):
      logging.critical('PUSH FAILED: ' + output)
780 781
    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
782

783 784
  def _PushExecutable(self, shell_dir, target_dir, binary):
    self._PushFile(shell_dir, binary, target_dir)
785 786 787

    # Push external startup data. Backwards compatible for revisions where
    # these files didn't exist.
788
    self._PushFile(
789
        shell_dir,
790
        "natives_blob.bin",
791
        target_dir,
792 793 794
        skip_if_missing=True,
    )
    self._PushFile(
795
        shell_dir,
796
        "snapshot_blob.bin",
797
        target_dir,
798 799
        skip_if_missing=True,
    )
800 801
    self._PushFile(
        shell_dir,
802
        "icudtl.dat",
803 804 805
        target_dir,
        skip_if_missing=True,
    )
806

807
  def PreTests(self, node, path):
808 809
    if isinstance(node, RunnableConfig):
      node.ChangeCWD(path)
810 811 812 813 814 815 816 817 818 819 820 821 822
    suite_dir = os.path.abspath(os.path.dirname(path))
    if node.path:
      bench_rel = os.path.normpath(os.path.join(*node.path))
      bench_abs = os.path.join(suite_dir, bench_rel)
    else:
      bench_rel = "."
      bench_abs = suite_dir

    self._PushExecutable(self.shell_dir, "bin", node.binary)
    if self.shell_dir_no_patch:
      self._PushExecutable(
          self.shell_dir_no_patch, "bin_no_patch", node.binary)

823
    if isinstance(node, RunnableConfig):
824
      self._PushFile(bench_abs, node.main, bench_rel)
825
    for resource in node.resources:
826
      self._PushFile(bench_abs, resource, bench_rel)
827

828 829 830 831
  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    target_dir = "bin_no_patch" if no_patch else "bin"
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
832 833
    cache = cache_control.CacheControl(self.device)
    cache.DropRamCaches()
834
    binary_on_device = os.path.join(
835
        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
836
    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
837 838 839 840 841 842 843

    # Relative path to benchmark directory.
    if runnable.path:
      bench_rel = os.path.normpath(os.path.join(*runnable.path))
    else:
      bench_rel = "."

844 845 846
    try:
      output = self.device.RunShellCommand(
          cmd,
847
          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
848
          check_return=True,
849 850 851
          timeout=runnable.timeout,
          retries=0,
      )
852 853 854
      stdout = "\n".join(output)
      print title % "Stdout"
      print stdout
855 856 857
    except device_errors.CommandTimeoutError:
      print ">>> Test timed out after %ss." % runnable.timeout
      stdout = ""
858 859
    if runnable.process_size:
      return stdout + "MaxMemory: Unsupported"
860
    return stdout
861

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
class CustomMachineConfiguration:
  def __init__(self, disable_aslr = False, governor = None):
    self.aslr_backup = None
    self.governor_backup = None
    self.disable_aslr = disable_aslr
    self.governor = governor

  def __enter__(self):
    if self.disable_aslr:
      self.aslr_backup = CustomMachineConfiguration.GetASLR()
      CustomMachineConfiguration.SetASLR(0)
    if self.governor != None:
      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
      CustomMachineConfiguration.SetCPUGovernor(self.governor)
    return self

  def __exit__(self, type, value, traceback):
    if self.aslr_backup != None:
      CustomMachineConfiguration.SetASLR(self.aslr_backup)
    if self.governor_backup != None:
      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)

  @staticmethod
  def GetASLR():
    try:
      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
        return int(f.readline().strip())
    except Exception as e:
      print "Failed to get current ASLR settings."
      raise e

  @staticmethod
  def SetASLR(value):
    try:
      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
        f.write(str(value))
    except Exception as e:
      print "Failed to update ASLR to %s." % value
      print "Are we running under sudo?"
      raise e

    new_value = CustomMachineConfiguration.GetASLR()
    if value != new_value:
      raise Exception("Present value is %s" % new_value)

  @staticmethod
  def GetCPUCoresRange():
    try:
      with open("/sys/devices/system/cpu/present", "r") as f:
        indexes = f.readline()
912 913 914 915
        r = map(int, indexes.split("-"))
        if len(r) == 1:
          return range(r[0], r[0] + 1)
        return range(r[0], r[1] + 1)
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
    except Exception as e:
      print "Failed to retrieve number of CPUs."
      raise e

  @staticmethod
  def GetCPUPathForId(cpu_index):
    ret = "/sys/devices/system/cpu/cpu"
    ret += str(cpu_index)
    ret += "/cpufreq/scaling_governor"
    return ret

  @staticmethod
  def GetCPUGovernor():
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      ret = None
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "r") as f:
          # We assume the governors of all CPUs are set to the same value
          val = f.readline().strip()
          if ret == None:
            ret = val
          elif ret != val:
            raise Exception("CPU cores have differing governor settings")
      return ret
    except Exception as e:
      print "Failed to get the current CPU governor."
      print "Is the CPU governor disabled? Check BIOS."
      raise e

  @staticmethod
  def SetCPUGovernor(value):
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "w") as f:
          f.write(value)

    except Exception as e:
      print "Failed to change CPU governor to %s." % value
      print "Are we running under sudo?"
      raise e

    cur_value = CustomMachineConfiguration.GetCPUGovernor()
    if cur_value != value:
      raise Exception("Could not set CPU governor. Present value is %s"
                      % cur_value )
965

966
def Main(args):
967
  logging.getLogger().setLevel(logging.INFO)
968
  parser = optparse.OptionParser()
969
  parser.add_option("--android-build-tools",
970 971
                    help="Path to chromium's build/android. Specifying this "
                         "option will run tests using android platform.")
972 973 974 975 976 977 978
  parser.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="x64")
  parser.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
979 980 981
  parser.add_option("--device",
                    help="The device ID to run Android tests on. If not given "
                         "it will be autodetected.")
982 983 984
  parser.add_option("--extra-flags",
                    help="Additional flags to pass to the test executable",
                    default="")
985 986
  parser.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
987 988 989
  parser.add_option("--json-test-results-no-patch",
                    help="Path to a file for storing json results from run "
                         "without patch.")
990 991
  parser.add_option("--outdir", help="Base directory with compile output",
                    default="out")
992 993
  parser.add_option("--outdir-no-patch",
                    help="Base directory with compile output without patch")
994 995 996 997
  parser.add_option("--binary-override-path",
                    help="JavaScript engine binary. By default, d8 under "
                    "architecture-specific build dir. "
                    "Not supported in conjunction with outdir-no-patch.")
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
  parser.add_option("--prioritize",
                    help="Raise the priority to nice -20 for the benchmarking "
                    "process.Requires Linux, schedtool, and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--affinitize",
                    help="Run benchmarking process on the specified core. "
                    "For example: "
                    "--affinitize=0 will run the benchmark process on core 0. "
                    "--affinitize=3 will run the benchmark process on core 3. "
                    "Requires Linux, schedtool, and sudo privileges.",
                    default=None)
  parser.add_option("--noaslr",
                    help="Disable ASLR for the duration of the benchmarked "
                    "process. Requires Linux and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--cpu-governor",
                    help="Set cpu governor to specified policy for the "
                    "duration of the benchmarked process. Typical options: "
                    "'powersave' for more stable results, or 'performance' "
                    "for shorter completion time of suite, with potentially "
                    "more noise in results.")
1019 1020 1021 1022 1023 1024
  parser.add_option("--filter",
                    help="Only run the benchmarks beginning with this string. "
                    "For example: "
                    "--filter=JSTests/TypedArrays/ will run only TypedArray "
                    "benchmarks from the JSTests suite.",
                    default="")
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
  (options, args) = parser.parse_args(args)

  if len(args) == 0:  # pragma: no cover
    parser.print_help()
    return 1

  if options.arch in ["auto", "native"]:  # pragma: no cover
    options.arch = ARCH_GUESS

  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
    print "Unknown architecture %s" % options.arch
    return 1

1039
  if options.device and not options.android_build_tools:  # pragma: no cover
1040
    print "Specifying a device requires Android build tools."
1041
    return 1
1042

1043 1044 1045 1046 1047 1048
  if (options.json_test_results_no_patch and
      not options.outdir_no_patch):  # pragma: no cover
    print("For writing json test results without patch, an outdir without "
          "patch must be specified.")
    return 1

1049 1050 1051
  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

  if options.buildbot:
1052
    build_config = "Release"
1053
  else:
1054 1055
    build_config = "%s.release" % options.arch

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
  if options.binary_override_path == None:
    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
    default_binary_name = "d8"
  else:
    if not os.path.isfile(options.binary_override_path):
      print "binary-override-path must be a file name"
      return 1
    if options.outdir_no_patch:
      print "specify either binary-override-path or outdir-no-patch"
      return 1
1066 1067
    options.shell_dir = os.path.abspath(
        os.path.dirname(options.binary_override_path))
1068
    default_binary_name = os.path.basename(options.binary_override_path)
1069 1070 1071 1072 1073 1074

  if options.outdir_no_patch:
    options.shell_dir_no_patch = os.path.join(
        workspace, options.outdir_no_patch, build_config)
  else:
    options.shell_dir_no_patch = None
1075

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
  if options.json_test_results:
    options.json_test_results = os.path.abspath(options.json_test_results)

  if options.json_test_results_no_patch:
    options.json_test_results_no_patch = os.path.abspath(
        options.json_test_results_no_patch)

  # Ensure all arguments have absolute path before we start changing current
  # directory.
  args = map(os.path.abspath, args)

1087 1088
  prev_aslr = None
  prev_cpu_gov = None
1089
  platform = Platform.GetPlatform(options)
1090 1091

  results = Results()
1092
  results_no_patch = Results()
1093 1094 1095 1096 1097 1098
  with CustomMachineConfiguration(governor = options.cpu_governor,
                                  disable_aslr = options.noaslr) as conf:
    for path in args:
      if not os.path.exists(path):  # pragma: no cover
        results.errors.append("Configuration file %s does not exist." % path)
        continue
1099

1100 1101
      with open(path) as f:
        suite = json.loads(f.read())
1102

1103 1104
      # If no name is given, default to the file name without .json.
      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
1105

1106 1107
      # Setup things common to one test suite.
      platform.PreExecution()
1108

1109 1110 1111
      # Build the graph/trace tree structure.
      default_parent = DefaultSentinel(default_binary_name)
      root = BuildGraphConfigs(suite, options.arch, default_parent)
1112

1113 1114 1115
      # Callback to be called on each node on traversal.
      def NodeCB(node):
        platform.PreTests(node, path)
1116

1117
      # Traverse graph/trace tree and iterate over all runnables.
1118
      for runnable in FlattenRunnables(root, NodeCB):
1119 1120 1121 1122
        runnable_name = "/".join(runnable.graphs)
        if not runnable_name.startswith(options.filter):
          continue
        print ">>> Running suite: %s" % runnable_name
1123

1124 1125 1126 1127 1128 1129
        def Runner():
          """Output generator that reruns several times."""
          for i in xrange(0, max(1, runnable.run_count)):
            # TODO(machenbach): Allow timeout per arch like with run_count per
            # arch.
            yield platform.Run(runnable, i)
1130

1131 1132
        # Let runnable iterate over all runs and handle output.
        result, result_no_patch = runnable.Run(
1133
          Runner, trybot=options.shell_dir_no_patch)
1134 1135 1136 1137 1138 1139 1140 1141
        results += result
        results_no_patch += result_no_patch
      platform.PostExecution()

    if options.json_test_results:
      results.WriteToFile(options.json_test_results)
    else:  # pragma: no cover
      print results
1142

1143 1144 1145
  if options.json_test_results_no_patch:
    results_no_patch.WriteToFile(options.json_test_results_no_patch)
  else:  # pragma: no cover
1146
    print results_no_patch
1147

1148 1149 1150 1151
  return min(1, len(results.errors))

if __name__ == "__main__":  # pragma: no cover
  sys.exit(Main(sys.argv[1:]))