run_perf.py 36.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
Performance runner for d8.

Call e.g. with tools/run-perf.py --arch ia32 some_suite.json

The suite json format is expected to be:
{
  "path": <relative path chunks to perf resources and main file>,
  "name": <optional suite name, file name is default>,
  "archs": [<architecture name for which this suite is run>, ...],
  "binary": <name of binary to run, default "d8">,
  "flags": [<flag to d8>, ...],
18
  "test_flags": [<flag to the test file>, ...],
19 20
  "run_count": <how often will this suite run (optional)>,
  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
21
  "resources": [<js file to be moved to android device>, ...]
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
  "main": <main js perf runner file>,
  "results_regexp": <optional regexp>,
  "results_processor": <optional python results processor script>,
  "units": <the unit specification for the performance dashboard>,
  "tests": [
    {
      "name": <name of the trace>,
      "results_regexp": <optional more specific regexp>,
      "results_processor": <optional python results processor script>,
      "units": <the unit specification for the performance dashboard>,
    }, ...
  ]
}

The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.

A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.

A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>

The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.

A suite without "tests" is considered a performance test itself.

Full example (suite with one runner):
{
  "path": ["."],
  "flags": ["--expose-gc"],
58
  "test_flags": ["5"],
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "run_count_ia32": 3,
  "main": "run.js",
  "results_regexp": "^%s: (.+)$",
  "units": "score",
  "tests": [
    {"name": "Richards"},
    {"name": "DeltaBlue"},
    {"name": "NavierStokes",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Full example (suite with several runners):
{
  "path": ["."],
  "flags": ["--expose-gc"],
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "units": "score",
  "tests": [
    {"name": "Richards",
     "path": ["richards"],
     "main": "run.js",
     "run_count": 3,
     "results_regexp": "^Richards: (.+)$"},
    {"name": "NavierStokes",
     "path": ["navier_stokes"],
     "main": "run.js",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Path pieces are concatenated. D8 is always run with the suite's path as cwd.
94 95

The test flags are passed to the js test file after '--'.
96 97
"""

98
from collections import OrderedDict
99
import json
100
import logging
101 102 103 104
import math
import optparse
import os
import re
105
import subprocess
106 107 108 109 110 111
import sys

from testrunner.local import commands
from testrunner.local import utils

ARCH_GUESS = utils.DefaultArch()
112
SUPPORTED_ARCHS = ["arm",
113 114 115 116 117 118
                   "ia32",
                   "mips",
                   "mipsel",
                   "x64",
                   "arm64"]

119 120 121
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
122
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
123

124

125 126 127 128
def LoadAndroidBuildTools(path):  # pragma: no cover
  assert os.path.exists(path)
  sys.path.insert(0, path)

129 130 131 132 133 134
  import devil_chromium
  from devil.android import device_errors  # pylint: disable=import-error
  from devil.android import device_utils  # pylint: disable=import-error
  from devil.android.sdk import adb_wrapper  # pylint: disable=import-error
  from devil.android.perf import cache_control  # pylint: disable=import-error
  from devil.android.perf import perf_control  # pylint: disable=import-error
135
  global adb_wrapper
136 137 138 139 140
  global cache_control
  global device_errors
  global device_utils
  global perf_control

141 142
  devil_chromium.Initialize()

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174

def GeometricMean(values):
  """Returns the geometric mean of a list of values.

  The mean is calculated using log to avoid overflow.
  """
  values = map(float, values)
  return str(math.exp(sum(map(math.log, values)) / len(values)))


class Results(object):
  """Place holder for result traces."""
  def __init__(self, traces=None, errors=None):
    self.traces = traces or []
    self.errors = errors or []

  def ToDict(self):
    return {"traces": self.traces, "errors": self.errors}

  def WriteToFile(self, file_name):
    with open(file_name, "w") as f:
      f.write(json.dumps(self.ToDict()))

  def __add__(self, other):
    self.traces += other.traces
    self.errors += other.errors
    return self

  def __str__(self):  # pragma: no cover
    return str(self.ToDict())


175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
class Measurement(object):
  """Represents a series of results of one trace.

  The results are from repetitive runs of the same executable. They are
  gathered by repeated calls to ConsumeOutput.
  """
  def __init__(self, graphs, units, results_regexp, stddev_regexp):
    self.name = graphs[-1]
    self.graphs = graphs
    self.units = units
    self.results_regexp = results_regexp
    self.stddev_regexp = stddev_regexp
    self.results = []
    self.errors = []
    self.stddev = ""

  def ConsumeOutput(self, stdout):
    try:
      result = re.search(self.results_regexp, stdout, re.M).group(1)
      self.results.append(str(float(result)))
    except ValueError:
      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
                         % (self.results_regexp, self.name))
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.results_regexp, self.name))

    try:
      if self.stddev_regexp and self.stddev:
        self.errors.append("Test %s should only run once since a stddev "
                           "is provided by the test." % self.name)
      if self.stddev_regexp:
        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.stddev_regexp, self.name))

  def GetResults(self):
    return Results([{
      "graphs": self.graphs,
      "units": self.units,
      "results": self.results,
      "stddev": self.stddev,
    }], self.errors)


221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
class NullMeasurement(object):
  """Null object to avoid having extra logic for configurations that didn't
  run like running without patch on trybots.
  """
  def ConsumeOutput(self, stdout):
    pass

  def GetResults(self):
    return Results()


def Unzip(iterable):
  left = []
  right = []
  for l, r in iterable:
    left.append(l)
    right.append(r)
  return lambda: iter(left), lambda: iter(right)


def AccumulateResults(
    graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
243 244 245 246 247 248 249 250 251
  """Iterates over the output of multiple benchmark reruns and accumulates
  results for a configured list of traces.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    trace_configs: List of "TraceConfig" instances. Each trace config defines
                   how to perform a measurement.
    iter_output: Iterator over the standard output of each test run.
252 253 254
    trybot: Indicates that this is run in trybot mode, i.e. run twice, once
            with once without patch.
    no_patch: Indicates weather this is a trybot run without patch.
255 256 257
    calc_total: Boolean flag to speficy the calculation of a summary trace.
  Returns: A "Results" object.
  """
258 259
  measurements = [
    trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
  for stdout in iter_output():
    for measurement in measurements:
      measurement.ConsumeOutput(stdout)

  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())

  if not res.traces or not calc_total:
    return res

  # Assume all traces have the same structure.
  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
    res.errors.append("Not all traces have the same number of results.")
    return res

  # Calculate the geometric means for all traces. Above we made sure that
  # there is at least one trace and that the number of results is the same
  # for each trace.
  n_results = len(res.traces[0]["results"])
  total_results = [GeometricMean(t["results"][i] for t in res.traces)
                   for i in range(0, n_results)]
  res.traces.append({
    "graphs": graph_names + ["Total"],
    "units": res.traces[0]["units"],
    "results": total_results,
    "stddev": "",
  })
  return res


def AccumulateGenericResults(graph_names, suite_units, iter_output):
  """Iterates over the output of multiple benchmark reruns and accumulates
  generic results.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    suite_units: Measurement default units as defined by the benchmark suite.
    iter_output: Iterator over the standard output of each test run.
  Returns: A "Results" object.
  """
  traces = OrderedDict()
  for stdout in iter_output():
302 303 304
    if stdout is None:
      # The None value is used as a null object to simplify logic.
      continue
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
    for line in stdout.strip().splitlines():
      match = GENERIC_RESULTS_RE.match(line)
      if match:
        stddev = ""
        graph = match.group(1)
        trace = match.group(2)
        body = match.group(3)
        units = match.group(4)
        match_stddev = RESULT_STDDEV_RE.match(body)
        match_list = RESULT_LIST_RE.match(body)
        errors = []
        if match_stddev:
          result, stddev = map(str.strip, match_stddev.group(1).split(","))
          results = [result]
        elif match_list:
          results = map(str.strip, match_list.group(1).split(","))
        else:
          results = [body.strip()]

        try:
          results = map(lambda r: str(float(r)), results)
        except ValueError:
          results = []
          errors = ["Found non-numeric in %s" %
                    "/".join(graph_names + [graph, trace])]

        trace_result = traces.setdefault(trace, Results([{
          "graphs": graph_names + [graph, trace],
          "units": (units or suite_units).strip(),
          "results": [],
          "stddev": "",
        }], errors))
        trace_result.traces[0]["results"].extend(results)
        trace_result.traces[0]["stddev"] = stddev

  return reduce(lambda r, t: r + t, traces.itervalues(), Results())


343 344 345 346 347 348 349 350 351 352 353
class Node(object):
  """Represents a node in the suite tree structure."""
  def __init__(self, *args):
    self._children = []

  def AppendChild(self, child):
    self._children.append(child)


class DefaultSentinel(Node):
  """Fake parent node with all default values."""
354
  def __init__(self, binary = "d8"):
355
    super(DefaultSentinel, self).__init__()
356
    self.binary = binary
357
    self.run_count = 10
358
    self.timeout = 60
359 360 361
    self.path = []
    self.graphs = []
    self.flags = []
362
    self.test_flags = []
363 364 365 366 367 368 369
    self.resources = []
    self.results_regexp = None
    self.stddev_regexp = None
    self.units = "score"
    self.total = False


370
class GraphConfig(Node):
371 372 373 374 375
  """Represents a suite definition.

  Can either be a leaf or an inner node that provides default values.
  """
  def __init__(self, suite, parent, arch):
376
    super(GraphConfig, self).__init__()
377 378 379 380 381
    self._suite = suite

    assert isinstance(suite.get("path", []), list)
    assert isinstance(suite["name"], basestring)
    assert isinstance(suite.get("flags", []), list)
382
    assert isinstance(suite.get("test_flags", []), list)
383 384 385 386 387 388
    assert isinstance(suite.get("resources", []), list)

    # Accumulated values.
    self.path = parent.path[:] + suite.get("path", [])
    self.graphs = parent.graphs[:] + [suite["name"]]
    self.flags = parent.flags[:] + suite.get("flags", [])
389
    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
390 391 392

    # Values independent of parent node.
    self.resources = suite.get("resources", [])
393 394 395 396 397

    # Descrete values (with parent defaults).
    self.binary = suite.get("binary", parent.binary)
    self.run_count = suite.get("run_count", parent.run_count)
    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
398
    self.timeout = suite.get("timeout", parent.timeout)
399
    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
400 401 402 403 404 405 406 407 408
    self.units = suite.get("units", parent.units)
    self.total = suite.get("total", parent.total)

    # A regular expression for results. If the parent graph provides a
    # regexp and the current suite has none, a string place holder for the
    # suite name is expected.
    # TODO(machenbach): Currently that makes only sense for the leaf level.
    # Multiple place holders for multiple levels are not supported.
    if parent.results_regexp:
409
      regexp_default = parent.results_regexp % re.escape(suite["name"])
410 411 412 413 414 415 416 417 418 419 420 421
    else:
      regexp_default = None
    self.results_regexp = suite.get("results_regexp", regexp_default)

    # A similar regular expression for the standard deviation (optional).
    if parent.stddev_regexp:
      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
    else:
      stddev_default = None
    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)


422 423
class TraceConfig(GraphConfig):
  """Represents a leaf in the suite tree structure."""
424
  def __init__(self, suite, parent, arch):
425
    super(TraceConfig, self).__init__(suite, parent, arch)
426 427
    assert self.results_regexp

428 429 430 431 432
  def CreateMeasurement(self, trybot, no_patch):
    if not trybot and no_patch:
      # Use null object for no-patch logic if this is not a trybot run.
      return NullMeasurement()

433 434 435 436 437 438
    return Measurement(
        self.graphs,
        self.units,
        self.results_regexp,
        self.stddev_regexp,
    )
439 440


441
class RunnableConfig(GraphConfig):
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
  """Represents a runnable suite definition (i.e. has a main file).
  """
  @property
  def main(self):
    return self._suite.get("main", "")

  def ChangeCWD(self, suite_path):
    """Changes the cwd to to path defined in the current graph.

    The tests are supposed to be relative to the suite configuration.
    """
    suite_dir = os.path.abspath(os.path.dirname(suite_path))
    bench_dir = os.path.normpath(os.path.join(*self.path))
    os.chdir(os.path.join(suite_dir, bench_dir))

457
  def GetCommandFlags(self, extra_flags=None):
458
    suffix = ["--"] + self.test_flags if self.test_flags else []
459
    return self.flags + (extra_flags or []) + [self.main] + suffix
460

461
  def GetCommand(self, shell_dir, extra_flags=None):
462
    # TODO(machenbach): This requires +.exe if run on windows.
463
    extra_flags = extra_flags or []
464
    cmd = [os.path.join(shell_dir, self.binary)]
465
    if self.binary != 'd8' and '--prof' in extra_flags:
466
      print "Profiler supported only on a benchmark run with d8"
467
    return cmd + self.GetCommandFlags(extra_flags=extra_flags)
468

469
  def Run(self, runner, trybot):
470
    """Iterates over several runs and handles the output for all traces."""
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateResults(
            self.graphs,
            self._children,
            iter_output=stdout_with_patch,
            trybot=trybot,
            no_patch=False,
            calc_total=self.total,
        ),
        AccumulateResults(
            self.graphs,
            self._children,
            iter_output=stdout_no_patch,
            trybot=trybot,
            no_patch=True,
            calc_total=self.total,
        ),
    )
490

491 492

class RunnableTraceConfig(TraceConfig, RunnableConfig):
493 494
  """Represents a runnable suite definition that is a leaf."""
  def __init__(self, suite, parent, arch):
495
    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
496

497
  def Run(self, runner, trybot):
498
    """Iterates over several runs and handles the output."""
499 500 501 502 503 504 505 506 507
    measurement_with_patch = self.CreateMeasurement(trybot, False)
    measurement_no_patch = self.CreateMeasurement(trybot, True)
    for stdout_with_patch, stdout_no_patch in runner():
      measurement_with_patch.ConsumeOutput(stdout_with_patch)
      measurement_no_patch.ConsumeOutput(stdout_no_patch)
    return (
        measurement_with_patch.GetResults(),
        measurement_no_patch.GetResults(),
    )
508 509


510
class RunnableGenericConfig(RunnableConfig):
511 512
  """Represents a runnable suite definition with generic traces."""
  def __init__(self, suite, parent, arch):
513
    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
514

515 516 517 518 519 520
  def Run(self, runner, trybot):
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
        AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
    )
521 522 523 524 525


def MakeGraphConfig(suite, arch, parent):
  """Factory method for making graph configuration objects."""
  if isinstance(parent, RunnableConfig):
526
    # Below a runnable can only be traces.
527
    return TraceConfig(suite, parent, arch)
528 529
  elif suite.get("main") is not None:
    # A main file makes this graph runnable. Empty strings are accepted.
530 531
    if suite.get("tests"):
      # This graph has subgraphs (traces).
532
      return RunnableConfig(suite, parent, arch)
533 534
    else:
      # This graph has no subgraphs, it's a leaf.
535
      return RunnableTraceConfig(suite, parent, arch)
536 537 538
  elif suite.get("generic"):
    # This is a generic suite definition. It is either a runnable executable
    # or has a main js file.
539
    return RunnableGenericConfig(suite, parent, arch)
540 541
  elif suite.get("tests"):
    # This is neither a leaf nor a runnable.
542
    return GraphConfig(suite, parent, arch)
543 544 545 546
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


547
def BuildGraphConfigs(suite, arch, parent):
548 549 550 551 552
  """Builds a tree structure of graph objects that corresponds to the suite
  configuration.
  """

  # TODO(machenbach): Implement notion of cpu type?
553
  if arch not in suite.get("archs", SUPPORTED_ARCHS):
554 555
    return None

556
  graph = MakeGraphConfig(suite, arch, parent)
557
  for subsuite in suite.get("tests", []):
558
    BuildGraphConfigs(subsuite, arch, graph)
559 560 561 562
  parent.AppendChild(graph)
  return graph


563
def FlattenRunnables(node, node_cb):
564 565 566
  """Generator that traverses the tree structure and iterates over all
  runnables.
  """
567
  node_cb(node)
568
  if isinstance(node, RunnableConfig):
569 570 571
    yield node
  elif isinstance(node, Node):
    for child in node._children:
572
      for result in FlattenRunnables(child, node_cb):
573 574 575 576 577
        yield result
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


578
class Platform(object):
579 580
  def __init__(self, options):
    self.shell_dir = options.shell_dir
581
    self.shell_dir_no_patch = options.shell_dir_no_patch
582 583
    self.extra_flags = options.extra_flags.split()

584 585
  @staticmethod
  def GetPlatform(options):
586
    if options.android_build_tools:
587 588 589 590
      return AndroidPlatform(options)
    else:
      return DesktopPlatform(options)

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
  def _Run(self, runnable, count, no_patch=False):
    raise NotImplementedError()  # pragma: no cover

  def Run(self, runnable, count):
    """Execute the benchmark's main file.

    If options.shell_dir_no_patch is specified, the benchmark is run once with
    and once without patch.
    Args:
      runnable: A Runnable benchmark instance.
      count: The number of this (repeated) run.
    Returns: A tuple with the benchmark outputs with and without patch. The
             latter will be None if options.shell_dir_no_patch was not
             specified.
    """
    stdout = self._Run(runnable, count, no_patch=False)
    if self.shell_dir_no_patch:
      return stdout, self._Run(runnable, count, no_patch=True)
    else:
      return stdout, None

612 613 614

class DesktopPlatform(Platform):
  def __init__(self, options):
615
    super(DesktopPlatform, self).__init__(options)
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
    self.command_prefix = []

    if options.prioritize or options.affinitize != None:
      self.command_prefix = ["schedtool"]
      if options.prioritize:
        self.command_prefix += ["-n", "-20"]
      if options.affinitize != None:
      # schedtool expects a bit pattern when setting affinity, where each
      # bit set to '1' corresponds to a core where the process may run on.
      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
      # a core number, we need to map to said bit pattern.
        cpu = int(options.affinitize)
        core = 1 << cpu
        self.command_prefix += ["-a", ("0x%x" % core)]
      self.command_prefix += ["-e"]
631

632 633 634 635 636
  def PreExecution(self):
    pass

  def PostExecution(self):
    pass
637

638
  def PreTests(self, node, path):
639
    if isinstance(node, RunnableConfig):
640
      node.ChangeCWD(path)
641

642 643 644 645
  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
646 647
    command = self.command_prefix + runnable.GetCommand(shell_dir,
                                                        self.extra_flags)
648
    try:
649
      output = commands.Execute(
650 651
        command,
        timeout=runnable.timeout,
652
      )
653 654
    except OSError as e:  # pragma: no cover
      print title % "OSError"
655 656
      print e
      return ""
657

658 659
    print title % "Stdout"
    print output.stdout
660 661
    if output.stderr:  # pragma: no cover
      # Print stderr for debugging.
662
      print title % "Stderr"
663 664 665
      print output.stderr
    if output.timed_out:
      print ">>> Test timed out after %ss." % runnable.timeout
666
    if '--prof' in self.extra_flags:
667 668 669 670 671
      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
      if os_prefix:
        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
        subprocess.check_call(tick_tools + " --only-summary", shell=True)
      else:  # pragma: no cover
672
        print "Profiler option currently supported on Linux and Mac OS."
673 674 675
    return output.stdout


676 677 678
class AndroidPlatform(Platform):  # pragma: no cover
  DEVICE_DIR = "/data/local/tmp/v8/"

679
  def __init__(self, options):
680
    super(AndroidPlatform, self).__init__(options)
681 682 683 684
    LoadAndroidBuildTools(options.android_build_tools)

    if not options.device:
      # Detect attached device if not specified.
685
      devices = adb_wrapper.AdbWrapper.Devices()
686 687 688
      assert devices and len(devices) == 1, (
          "None or multiple devices detected. Please specify the device on "
          "the command-line with --device")
689 690 691
      options.device = str(devices[0])
    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
    self.device = device_utils.DeviceUtils(self.adb_wrapper)
692 693 694 695 696

  def PreExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetHighPerfMode()

697 698 699
    # Remember what we have already pushed to the device.
    self.pushed = set()

700 701 702
  def PostExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetDefaultPerfMode()
703
    self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
704

705 706
  def _PushFile(self, host_dir, file_name, target_rel=".",
                skip_if_missing=False):
707
    file_on_host = os.path.join(host_dir, file_name)
708 709
    file_on_device_tmp = os.path.join(
        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
710 711
    file_on_device = os.path.join(
        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
712
    folder_on_device = os.path.dirname(file_on_device)
713

714 715 716 717 718 719
    # Only attempt to push files that exist.
    if not os.path.exists(file_on_host):
      if not skip_if_missing:
        logging.critical('Missing file on host: %s' % file_on_host)
      return

720 721 722 723 724 725
    # Only push files not yet pushed in one execution.
    if file_on_host in self.pushed:
      return
    else:
      self.pushed.add(file_on_host)

726
    # Work-around for "text file busy" errors. Push the files to a temporary
727
    # location and then copy them with a shell command.
728
    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
729 730 731 732
    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
    # Errors look like this: "failed to copy  ... ".
    if output and not re.search('^[0-9]', output.splitlines()[-1]):
      logging.critical('PUSH FAILED: ' + output)
733 734
    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
735

736 737
  def _PushExecutable(self, shell_dir, target_dir, binary):
    self._PushFile(shell_dir, binary, target_dir)
738 739 740

    # Push external startup data. Backwards compatible for revisions where
    # these files didn't exist.
741
    self._PushFile(
742
        shell_dir,
743
        "natives_blob.bin",
744
        target_dir,
745 746 747
        skip_if_missing=True,
    )
    self._PushFile(
748
        shell_dir,
749
        "snapshot_blob.bin",
750
        target_dir,
751 752
        skip_if_missing=True,
    )
753 754 755 756 757 758
    self._PushFile(
        shell_dir,
        "snapshot_blob_ignition.bin",
        target_dir,
        skip_if_missing=True,
    )
759

760 761 762 763 764 765 766 767 768 769 770 771 772 773
  def PreTests(self, node, path):
    suite_dir = os.path.abspath(os.path.dirname(path))
    if node.path:
      bench_rel = os.path.normpath(os.path.join(*node.path))
      bench_abs = os.path.join(suite_dir, bench_rel)
    else:
      bench_rel = "."
      bench_abs = suite_dir

    self._PushExecutable(self.shell_dir, "bin", node.binary)
    if self.shell_dir_no_patch:
      self._PushExecutable(
          self.shell_dir_no_patch, "bin_no_patch", node.binary)

774
    if isinstance(node, RunnableConfig):
775
      self._PushFile(bench_abs, node.main, bench_rel)
776
    for resource in node.resources:
777
      self._PushFile(bench_abs, resource, bench_rel)
778

779 780 781 782
  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    target_dir = "bin_no_patch" if no_patch else "bin"
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
783 784
    cache = cache_control.CacheControl(self.device)
    cache.DropRamCaches()
785
    binary_on_device = os.path.join(
786
        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
787
    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
788 789 790 791 792 793 794

    # Relative path to benchmark directory.
    if runnable.path:
      bench_rel = os.path.normpath(os.path.join(*runnable.path))
    else:
      bench_rel = "."

795 796 797
    try:
      output = self.device.RunShellCommand(
          cmd,
798
          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
799 800 801
          timeout=runnable.timeout,
          retries=0,
      )
802 803 804
      stdout = "\n".join(output)
      print title % "Stdout"
      print stdout
805 806 807 808
    except device_errors.CommandTimeoutError:
      print ">>> Test timed out after %ss." % runnable.timeout
      stdout = ""
    return stdout
809

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
class CustomMachineConfiguration:
  def __init__(self, disable_aslr = False, governor = None):
    self.aslr_backup = None
    self.governor_backup = None
    self.disable_aslr = disable_aslr
    self.governor = governor

  def __enter__(self):
    if self.disable_aslr:
      self.aslr_backup = CustomMachineConfiguration.GetASLR()
      CustomMachineConfiguration.SetASLR(0)
    if self.governor != None:
      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
      CustomMachineConfiguration.SetCPUGovernor(self.governor)
    return self

  def __exit__(self, type, value, traceback):
    if self.aslr_backup != None:
      CustomMachineConfiguration.SetASLR(self.aslr_backup)
    if self.governor_backup != None:
      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)

  @staticmethod
  def GetASLR():
    try:
      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
        return int(f.readline().strip())
    except Exception as e:
      print "Failed to get current ASLR settings."
      raise e

  @staticmethod
  def SetASLR(value):
    try:
      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
        f.write(str(value))
    except Exception as e:
      print "Failed to update ASLR to %s." % value
      print "Are we running under sudo?"
      raise e

    new_value = CustomMachineConfiguration.GetASLR()
    if value != new_value:
      raise Exception("Present value is %s" % new_value)

  @staticmethod
  def GetCPUCoresRange():
    try:
      with open("/sys/devices/system/cpu/present", "r") as f:
        indexes = f.readline()
860 861 862 863
        r = map(int, indexes.split("-"))
        if len(r) == 1:
          return range(r[0], r[0] + 1)
        return range(r[0], r[1] + 1)
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
    except Exception as e:
      print "Failed to retrieve number of CPUs."
      raise e

  @staticmethod
  def GetCPUPathForId(cpu_index):
    ret = "/sys/devices/system/cpu/cpu"
    ret += str(cpu_index)
    ret += "/cpufreq/scaling_governor"
    return ret

  @staticmethod
  def GetCPUGovernor():
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      ret = None
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "r") as f:
          # We assume the governors of all CPUs are set to the same value
          val = f.readline().strip()
          if ret == None:
            ret = val
          elif ret != val:
            raise Exception("CPU cores have differing governor settings")
      return ret
    except Exception as e:
      print "Failed to get the current CPU governor."
      print "Is the CPU governor disabled? Check BIOS."
      raise e

  @staticmethod
  def SetCPUGovernor(value):
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "w") as f:
          f.write(value)

    except Exception as e:
      print "Failed to change CPU governor to %s." % value
      print "Are we running under sudo?"
      raise e

    cur_value = CustomMachineConfiguration.GetCPUGovernor()
    if cur_value != value:
      raise Exception("Could not set CPU governor. Present value is %s"
                      % cur_value )
913

914 915
# TODO: Implement results_processor.
def Main(args):
916
  logging.getLogger().setLevel(logging.INFO)
917
  parser = optparse.OptionParser()
918
  parser.add_option("--android-build-tools",
919 920
                    help="Path to chromium's build/android. Specifying this "
                         "option will run tests using android platform.")
921 922 923 924 925 926 927
  parser.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="x64")
  parser.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
928 929 930
  parser.add_option("--device",
                    help="The device ID to run Android tests on. If not given "
                         "it will be autodetected.")
931 932 933
  parser.add_option("--extra-flags",
                    help="Additional flags to pass to the test executable",
                    default="")
934 935
  parser.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
936 937 938
  parser.add_option("--json-test-results-no-patch",
                    help="Path to a file for storing json results from run "
                         "without patch.")
939 940
  parser.add_option("--outdir", help="Base directory with compile output",
                    default="out")
941 942
  parser.add_option("--outdir-no-patch",
                    help="Base directory with compile output without patch")
943 944 945 946
  parser.add_option("--binary-override-path",
                    help="JavaScript engine binary. By default, d8 under "
                    "architecture-specific build dir. "
                    "Not supported in conjunction with outdir-no-patch.")
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
  parser.add_option("--prioritize",
                    help="Raise the priority to nice -20 for the benchmarking "
                    "process.Requires Linux, schedtool, and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--affinitize",
                    help="Run benchmarking process on the specified core. "
                    "For example: "
                    "--affinitize=0 will run the benchmark process on core 0. "
                    "--affinitize=3 will run the benchmark process on core 3. "
                    "Requires Linux, schedtool, and sudo privileges.",
                    default=None)
  parser.add_option("--noaslr",
                    help="Disable ASLR for the duration of the benchmarked "
                    "process. Requires Linux and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--cpu-governor",
                    help="Set cpu governor to specified policy for the "
                    "duration of the benchmarked process. Typical options: "
                    "'powersave' for more stable results, or 'performance' "
                    "for shorter completion time of suite, with potentially "
                    "more noise in results.")
968

969 970 971 972 973 974 975 976 977 978 979 980 981
  (options, args) = parser.parse_args(args)

  if len(args) == 0:  # pragma: no cover
    parser.print_help()
    return 1

  if options.arch in ["auto", "native"]:  # pragma: no cover
    options.arch = ARCH_GUESS

  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
    print "Unknown architecture %s" % options.arch
    return 1

982
  if options.device and not options.android_build_tools:  # pragma: no cover
983
    print "Specifying a device requires Android build tools."
984
    return 1
985

986 987 988 989 990 991
  if (options.json_test_results_no_patch and
      not options.outdir_no_patch):  # pragma: no cover
    print("For writing json test results without patch, an outdir without "
          "patch must be specified.")
    return 1

992 993 994
  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

  if options.buildbot:
995
    build_config = "Release"
996
  else:
997 998
    build_config = "%s.release" % options.arch

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
  if options.binary_override_path == None:
    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
    default_binary_name = "d8"
  else:
    if not os.path.isfile(options.binary_override_path):
      print "binary-override-path must be a file name"
      return 1
    if options.outdir_no_patch:
      print "specify either binary-override-path or outdir-no-patch"
      return 1
    options.shell_dir = os.path.dirname(options.binary_override_path)
    default_binary_name = os.path.basename(options.binary_override_path)
1011 1012 1013 1014 1015 1016

  if options.outdir_no_patch:
    options.shell_dir_no_patch = os.path.join(
        workspace, options.outdir_no_patch, build_config)
  else:
    options.shell_dir_no_patch = None
1017

1018 1019
  prev_aslr = None
  prev_cpu_gov = None
1020
  platform = Platform.GetPlatform(options)
1021 1022

  results = Results()
1023
  results_no_patch = Results()
1024 1025 1026 1027
  with CustomMachineConfiguration(governor = options.cpu_governor,
                                  disable_aslr = options.noaslr) as conf:
    for path in args:
      path = os.path.abspath(path)
1028

1029 1030 1031
      if not os.path.exists(path):  # pragma: no cover
        results.errors.append("Configuration file %s does not exist." % path)
        continue
1032

1033 1034
      with open(path) as f:
        suite = json.loads(f.read())
1035

1036 1037
      # If no name is given, default to the file name without .json.
      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
1038

1039 1040
      # Setup things common to one test suite.
      platform.PreExecution()
1041

1042 1043 1044
      # Build the graph/trace tree structure.
      default_parent = DefaultSentinel(default_binary_name)
      root = BuildGraphConfigs(suite, options.arch, default_parent)
1045

1046 1047 1048
      # Callback to be called on each node on traversal.
      def NodeCB(node):
        platform.PreTests(node, path)
1049

1050 1051 1052
      # Traverse graph/trace tree and interate over all runnables.
      for runnable in FlattenRunnables(root, NodeCB):
        print ">>> Running suite: %s" % "/".join(runnable.graphs)
1053

1054 1055 1056 1057 1058 1059
        def Runner():
          """Output generator that reruns several times."""
          for i in xrange(0, max(1, runnable.run_count)):
            # TODO(machenbach): Allow timeout per arch like with run_count per
            # arch.
            yield platform.Run(runnable, i)
1060

1061 1062
        # Let runnable iterate over all runs and handle output.
        result, result_no_patch = runnable.Run(
1063
          Runner, trybot=options.shell_dir_no_patch)
1064 1065 1066 1067 1068 1069 1070 1071
        results += result
        results_no_patch += result_no_patch
      platform.PostExecution()

    if options.json_test_results:
      results.WriteToFile(options.json_test_results)
    else:  # pragma: no cover
      print results
1072

1073 1074 1075
  if options.json_test_results_no_patch:
    results_no_patch.WriteToFile(options.json_test_results_no_patch)
  else:  # pragma: no cover
1076
    print results_no_patch
1077

1078 1079 1080 1081
  return min(1, len(results.errors))

if __name__ == "__main__":  # pragma: no cover
  sys.exit(Main(sys.argv[1:]))