run_perf.py 37.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
Performance runner for d8.

Call e.g. with tools/run-perf.py --arch ia32 some_suite.json

The suite json format is expected to be:
{
  "path": <relative path chunks to perf resources and main file>,
  "name": <optional suite name, file name is default>,
  "archs": [<architecture name for which this suite is run>, ...],
  "binary": <name of binary to run, default "d8">,
  "flags": [<flag to d8>, ...],
18
  "test_flags": [<flag to the test file>, ...],
19 20
  "run_count": <how often will this suite run (optional)>,
  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
21
  "resources": [<js file to be moved to android device>, ...]
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
  "main": <main js perf runner file>,
  "results_regexp": <optional regexp>,
  "results_processor": <optional python results processor script>,
  "units": <the unit specification for the performance dashboard>,
  "tests": [
    {
      "name": <name of the trace>,
      "results_regexp": <optional more specific regexp>,
      "results_processor": <optional python results processor script>,
      "units": <the unit specification for the performance dashboard>,
    }, ...
  ]
}

The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.

A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.

A suite's results_processor may point to an optional python script. If
45 46 47
specified, it is called after running the tests (with a path relative to the
suite level's path). It is expected to read the measurement's output text
on stdin and print the processed output to stdout.
48

49
The results_regexp will be applied to the processed output.
50 51 52 53 54 55 56

A suite without "tests" is considered a performance test itself.

Full example (suite with one runner):
{
  "path": ["."],
  "flags": ["--expose-gc"],
57
  "test_flags": ["5"],
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "run_count_ia32": 3,
  "main": "run.js",
  "results_regexp": "^%s: (.+)$",
  "units": "score",
  "tests": [
    {"name": "Richards"},
    {"name": "DeltaBlue"},
    {"name": "NavierStokes",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Full example (suite with several runners):
{
  "path": ["."],
  "flags": ["--expose-gc"],
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "units": "score",
  "tests": [
    {"name": "Richards",
     "path": ["richards"],
     "main": "run.js",
     "run_count": 3,
     "results_regexp": "^Richards: (.+)$"},
    {"name": "NavierStokes",
     "path": ["navier_stokes"],
     "main": "run.js",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Path pieces are concatenated. D8 is always run with the suite's path as cwd.
93 94

The test flags are passed to the js test file after '--'.
95 96
"""

97
from collections import OrderedDict
98
import json
99
import logging
100 101 102 103
import math
import optparse
import os
import re
104
import subprocess
105 106 107 108 109 110
import sys

from testrunner.local import commands
from testrunner.local import utils

ARCH_GUESS = utils.DefaultArch()
111
SUPPORTED_ARCHS = ["arm",
112 113 114 115 116 117
                   "ia32",
                   "mips",
                   "mipsel",
                   "x64",
                   "arm64"]

118 119 120
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
121
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
122

123

124 125 126 127
def LoadAndroidBuildTools(path):  # pragma: no cover
  assert os.path.exists(path)
  sys.path.insert(0, path)

128 129 130 131 132 133
  import devil_chromium
  from devil.android import device_errors  # pylint: disable=import-error
  from devil.android import device_utils  # pylint: disable=import-error
  from devil.android.sdk import adb_wrapper  # pylint: disable=import-error
  from devil.android.perf import cache_control  # pylint: disable=import-error
  from devil.android.perf import perf_control  # pylint: disable=import-error
134
  global adb_wrapper
135 136 137 138 139
  global cache_control
  global device_errors
  global device_utils
  global perf_control

140 141
  devil_chromium.Initialize()

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173

def GeometricMean(values):
  """Returns the geometric mean of a list of values.

  The mean is calculated using log to avoid overflow.
  """
  values = map(float, values)
  return str(math.exp(sum(map(math.log, values)) / len(values)))


class Results(object):
  """Place holder for result traces."""
  def __init__(self, traces=None, errors=None):
    self.traces = traces or []
    self.errors = errors or []

  def ToDict(self):
    return {"traces": self.traces, "errors": self.errors}

  def WriteToFile(self, file_name):
    with open(file_name, "w") as f:
      f.write(json.dumps(self.ToDict()))

  def __add__(self, other):
    self.traces += other.traces
    self.errors += other.errors
    return self

  def __str__(self):  # pragma: no cover
    return str(self.ToDict())


174 175 176 177 178 179 180
class Measurement(object):
  """Represents a series of results of one trace.

  The results are from repetitive runs of the same executable. They are
  gathered by repeated calls to ConsumeOutput.
  """
  def __init__(self, graphs, units, results_regexp, stddev_regexp):
181
    self.name = '/'.join(graphs)
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
    self.graphs = graphs
    self.units = units
    self.results_regexp = results_regexp
    self.stddev_regexp = stddev_regexp
    self.results = []
    self.errors = []
    self.stddev = ""

  def ConsumeOutput(self, stdout):
    try:
      result = re.search(self.results_regexp, stdout, re.M).group(1)
      self.results.append(str(float(result)))
    except ValueError:
      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
                         % (self.results_regexp, self.name))
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.results_regexp, self.name))

    try:
      if self.stddev_regexp and self.stddev:
        self.errors.append("Test %s should only run once since a stddev "
                           "is provided by the test." % self.name)
      if self.stddev_regexp:
        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.stddev_regexp, self.name))

  def GetResults(self):
    return Results([{
      "graphs": self.graphs,
      "units": self.units,
      "results": self.results,
      "stddev": self.stddev,
    }], self.errors)


220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
class NullMeasurement(object):
  """Null object to avoid having extra logic for configurations that didn't
  run like running without patch on trybots.
  """
  def ConsumeOutput(self, stdout):
    pass

  def GetResults(self):
    return Results()


def Unzip(iterable):
  left = []
  right = []
  for l, r in iterable:
    left.append(l)
    right.append(r)
  return lambda: iter(left), lambda: iter(right)


240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
def RunResultsProcessor(results_processor, stdout, count):
  # Dummy pass through for null-runs.
  if stdout is None:
    return None

  # We assume the results processor is relative to the suite.
  assert os.path.exists(results_processor)
  p = subprocess.Popen(
      [sys.executable, results_processor],
      stdin=subprocess.PIPE,
      stdout=subprocess.PIPE,
      stderr=subprocess.PIPE,
  )
  result, _ = p.communicate(input=stdout)
  print ">>> Processed stdout (#%d):" % count
  print result
  return result


259 260
def AccumulateResults(
    graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
261 262 263 264 265 266 267 268 269
  """Iterates over the output of multiple benchmark reruns and accumulates
  results for a configured list of traces.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    trace_configs: List of "TraceConfig" instances. Each trace config defines
                   how to perform a measurement.
    iter_output: Iterator over the standard output of each test run.
270 271 272
    trybot: Indicates that this is run in trybot mode, i.e. run twice, once
            with once without patch.
    no_patch: Indicates weather this is a trybot run without patch.
273 274 275
    calc_total: Boolean flag to speficy the calculation of a summary trace.
  Returns: A "Results" object.
  """
276 277
  measurements = [
    trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
  for stdout in iter_output():
    for measurement in measurements:
      measurement.ConsumeOutput(stdout)

  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())

  if not res.traces or not calc_total:
    return res

  # Assume all traces have the same structure.
  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
    res.errors.append("Not all traces have the same number of results.")
    return res

  # Calculate the geometric means for all traces. Above we made sure that
  # there is at least one trace and that the number of results is the same
  # for each trace.
  n_results = len(res.traces[0]["results"])
  total_results = [GeometricMean(t["results"][i] for t in res.traces)
                   for i in range(0, n_results)]
  res.traces.append({
    "graphs": graph_names + ["Total"],
    "units": res.traces[0]["units"],
    "results": total_results,
    "stddev": "",
  })
  return res


def AccumulateGenericResults(graph_names, suite_units, iter_output):
  """Iterates over the output of multiple benchmark reruns and accumulates
  generic results.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    suite_units: Measurement default units as defined by the benchmark suite.
    iter_output: Iterator over the standard output of each test run.
  Returns: A "Results" object.
  """
  traces = OrderedDict()
  for stdout in iter_output():
320 321 322
    if stdout is None:
      # The None value is used as a null object to simplify logic.
      continue
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
    for line in stdout.strip().splitlines():
      match = GENERIC_RESULTS_RE.match(line)
      if match:
        stddev = ""
        graph = match.group(1)
        trace = match.group(2)
        body = match.group(3)
        units = match.group(4)
        match_stddev = RESULT_STDDEV_RE.match(body)
        match_list = RESULT_LIST_RE.match(body)
        errors = []
        if match_stddev:
          result, stddev = map(str.strip, match_stddev.group(1).split(","))
          results = [result]
        elif match_list:
          results = map(str.strip, match_list.group(1).split(","))
        else:
          results = [body.strip()]

        try:
          results = map(lambda r: str(float(r)), results)
        except ValueError:
          results = []
          errors = ["Found non-numeric in %s" %
                    "/".join(graph_names + [graph, trace])]

        trace_result = traces.setdefault(trace, Results([{
          "graphs": graph_names + [graph, trace],
          "units": (units or suite_units).strip(),
          "results": [],
          "stddev": "",
        }], errors))
        trace_result.traces[0]["results"].extend(results)
        trace_result.traces[0]["stddev"] = stddev

  return reduce(lambda r, t: r + t, traces.itervalues(), Results())


361 362 363 364 365 366 367 368 369 370 371
class Node(object):
  """Represents a node in the suite tree structure."""
  def __init__(self, *args):
    self._children = []

  def AppendChild(self, child):
    self._children.append(child)


class DefaultSentinel(Node):
  """Fake parent node with all default values."""
372
  def __init__(self, binary = "d8"):
373
    super(DefaultSentinel, self).__init__()
374
    self.binary = binary
375
    self.run_count = 10
376
    self.timeout = 60
377 378 379
    self.path = []
    self.graphs = []
    self.flags = []
380
    self.test_flags = []
381
    self.resources = []
382
    self.results_processor = None
383 384 385 386 387 388
    self.results_regexp = None
    self.stddev_regexp = None
    self.units = "score"
    self.total = False


389
class GraphConfig(Node):
390 391 392 393 394
  """Represents a suite definition.

  Can either be a leaf or an inner node that provides default values.
  """
  def __init__(self, suite, parent, arch):
395
    super(GraphConfig, self).__init__()
396 397 398 399 400
    self._suite = suite

    assert isinstance(suite.get("path", []), list)
    assert isinstance(suite["name"], basestring)
    assert isinstance(suite.get("flags", []), list)
401
    assert isinstance(suite.get("test_flags", []), list)
402 403 404 405 406 407
    assert isinstance(suite.get("resources", []), list)

    # Accumulated values.
    self.path = parent.path[:] + suite.get("path", [])
    self.graphs = parent.graphs[:] + [suite["name"]]
    self.flags = parent.flags[:] + suite.get("flags", [])
408
    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
409 410 411

    # Values independent of parent node.
    self.resources = suite.get("resources", [])
412 413 414 415 416

    # Descrete values (with parent defaults).
    self.binary = suite.get("binary", parent.binary)
    self.run_count = suite.get("run_count", parent.run_count)
    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
417
    self.timeout = suite.get("timeout", parent.timeout)
418
    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
419 420
    self.units = suite.get("units", parent.units)
    self.total = suite.get("total", parent.total)
421 422
    self.results_processor = suite.get(
        "results_processor", parent.results_processor)
423 424 425 426 427 428 429

    # A regular expression for results. If the parent graph provides a
    # regexp and the current suite has none, a string place holder for the
    # suite name is expected.
    # TODO(machenbach): Currently that makes only sense for the leaf level.
    # Multiple place holders for multiple levels are not supported.
    if parent.results_regexp:
430
      regexp_default = parent.results_regexp % re.escape(suite["name"])
431 432 433 434 435 436 437 438 439 440 441 442
    else:
      regexp_default = None
    self.results_regexp = suite.get("results_regexp", regexp_default)

    # A similar regular expression for the standard deviation (optional).
    if parent.stddev_regexp:
      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
    else:
      stddev_default = None
    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)


443 444
class TraceConfig(GraphConfig):
  """Represents a leaf in the suite tree structure."""
445
  def __init__(self, suite, parent, arch):
446
    super(TraceConfig, self).__init__(suite, parent, arch)
447 448
    assert self.results_regexp

449 450 451 452 453
  def CreateMeasurement(self, trybot, no_patch):
    if not trybot and no_patch:
      # Use null object for no-patch logic if this is not a trybot run.
      return NullMeasurement()

454 455 456 457 458 459
    return Measurement(
        self.graphs,
        self.units,
        self.results_regexp,
        self.stddev_regexp,
    )
460 461


462
class RunnableConfig(GraphConfig):
463 464 465 466 467 468
  """Represents a runnable suite definition (i.e. has a main file).
  """
  @property
  def main(self):
    return self._suite.get("main", "")

469 470 471 472 473 474 475 476 477
  def PostProcess(self, stdouts_iter):
    if self.results_processor:
      def it():
        for i, stdout in enumerate(stdouts_iter()):
          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
      return it
    else:
      return stdouts_iter

478 479 480 481 482 483 484 485 486
  def ChangeCWD(self, suite_path):
    """Changes the cwd to to path defined in the current graph.

    The tests are supposed to be relative to the suite configuration.
    """
    suite_dir = os.path.abspath(os.path.dirname(suite_path))
    bench_dir = os.path.normpath(os.path.join(*self.path))
    os.chdir(os.path.join(suite_dir, bench_dir))

487
  def GetCommandFlags(self, extra_flags=None):
488
    suffix = ["--"] + self.test_flags if self.test_flags else []
489
    return self.flags + (extra_flags or []) + [self.main] + suffix
490

491
  def GetCommand(self, shell_dir, extra_flags=None):
492
    # TODO(machenbach): This requires +.exe if run on windows.
493
    extra_flags = extra_flags or []
494
    cmd = [os.path.join(shell_dir, self.binary)]
495 496
    if self.binary.endswith(".py"):
      cmd = [sys.executable] + cmd
497
    if self.binary != 'd8' and '--prof' in extra_flags:
498
      print "Profiler supported only on a benchmark run with d8"
499
    return cmd + self.GetCommandFlags(extra_flags=extra_flags)
500

501
  def Run(self, runner, trybot):
502
    """Iterates over several runs and handles the output for all traces."""
503 504 505 506 507
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateResults(
            self.graphs,
            self._children,
508
            iter_output=self.PostProcess(stdout_with_patch),
509 510 511 512 513 514 515
            trybot=trybot,
            no_patch=False,
            calc_total=self.total,
        ),
        AccumulateResults(
            self.graphs,
            self._children,
516
            iter_output=self.PostProcess(stdout_no_patch),
517 518 519 520 521
            trybot=trybot,
            no_patch=True,
            calc_total=self.total,
        ),
    )
522

523 524

class RunnableTraceConfig(TraceConfig, RunnableConfig):
525 526
  """Represents a runnable suite definition that is a leaf."""
  def __init__(self, suite, parent, arch):
527
    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
528

529
  def Run(self, runner, trybot):
530
    """Iterates over several runs and handles the output."""
531 532 533 534 535 536 537 538 539
    measurement_with_patch = self.CreateMeasurement(trybot, False)
    measurement_no_patch = self.CreateMeasurement(trybot, True)
    for stdout_with_patch, stdout_no_patch in runner():
      measurement_with_patch.ConsumeOutput(stdout_with_patch)
      measurement_no_patch.ConsumeOutput(stdout_no_patch)
    return (
        measurement_with_patch.GetResults(),
        measurement_no_patch.GetResults(),
    )
540 541


542
class RunnableGenericConfig(RunnableConfig):
543 544
  """Represents a runnable suite definition with generic traces."""
  def __init__(self, suite, parent, arch):
545
    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
546

547 548 549 550 551 552
  def Run(self, runner, trybot):
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
        AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
    )
553 554 555 556 557


def MakeGraphConfig(suite, arch, parent):
  """Factory method for making graph configuration objects."""
  if isinstance(parent, RunnableConfig):
558
    # Below a runnable can only be traces.
559
    return TraceConfig(suite, parent, arch)
560 561
  elif suite.get("main") is not None:
    # A main file makes this graph runnable. Empty strings are accepted.
562 563
    if suite.get("tests"):
      # This graph has subgraphs (traces).
564
      return RunnableConfig(suite, parent, arch)
565 566
    else:
      # This graph has no subgraphs, it's a leaf.
567
      return RunnableTraceConfig(suite, parent, arch)
568 569 570
  elif suite.get("generic"):
    # This is a generic suite definition. It is either a runnable executable
    # or has a main js file.
571
    return RunnableGenericConfig(suite, parent, arch)
572 573
  elif suite.get("tests"):
    # This is neither a leaf nor a runnable.
574
    return GraphConfig(suite, parent, arch)
575 576 577 578
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


579
def BuildGraphConfigs(suite, arch, parent):
580 581 582 583 584
  """Builds a tree structure of graph objects that corresponds to the suite
  configuration.
  """

  # TODO(machenbach): Implement notion of cpu type?
585
  if arch not in suite.get("archs", SUPPORTED_ARCHS):
586 587
    return None

588
  graph = MakeGraphConfig(suite, arch, parent)
589
  for subsuite in suite.get("tests", []):
590
    BuildGraphConfigs(subsuite, arch, graph)
591 592 593 594
  parent.AppendChild(graph)
  return graph


595
def FlattenRunnables(node, node_cb):
596 597 598
  """Generator that traverses the tree structure and iterates over all
  runnables.
  """
599
  node_cb(node)
600
  if isinstance(node, RunnableConfig):
601 602 603
    yield node
  elif isinstance(node, Node):
    for child in node._children:
604
      for result in FlattenRunnables(child, node_cb):
605 606 607 608 609
        yield result
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


610
class Platform(object):
611 612
  def __init__(self, options):
    self.shell_dir = options.shell_dir
613
    self.shell_dir_no_patch = options.shell_dir_no_patch
614 615
    self.extra_flags = options.extra_flags.split()

616 617
  @staticmethod
  def GetPlatform(options):
618
    if options.android_build_tools:
619 620 621 622
      return AndroidPlatform(options)
    else:
      return DesktopPlatform(options)

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
  def _Run(self, runnable, count, no_patch=False):
    raise NotImplementedError()  # pragma: no cover

  def Run(self, runnable, count):
    """Execute the benchmark's main file.

    If options.shell_dir_no_patch is specified, the benchmark is run once with
    and once without patch.
    Args:
      runnable: A Runnable benchmark instance.
      count: The number of this (repeated) run.
    Returns: A tuple with the benchmark outputs with and without patch. The
             latter will be None if options.shell_dir_no_patch was not
             specified.
    """
    stdout = self._Run(runnable, count, no_patch=False)
    if self.shell_dir_no_patch:
      return stdout, self._Run(runnable, count, no_patch=True)
    else:
      return stdout, None

644 645 646

class DesktopPlatform(Platform):
  def __init__(self, options):
647
    super(DesktopPlatform, self).__init__(options)
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
    self.command_prefix = []

    if options.prioritize or options.affinitize != None:
      self.command_prefix = ["schedtool"]
      if options.prioritize:
        self.command_prefix += ["-n", "-20"]
      if options.affinitize != None:
      # schedtool expects a bit pattern when setting affinity, where each
      # bit set to '1' corresponds to a core where the process may run on.
      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
      # a core number, we need to map to said bit pattern.
        cpu = int(options.affinitize)
        core = 1 << cpu
        self.command_prefix += ["-a", ("0x%x" % core)]
      self.command_prefix += ["-e"]
663

664 665 666 667 668
  def PreExecution(self):
    pass

  def PostExecution(self):
    pass
669

670
  def PreTests(self, node, path):
671
    if isinstance(node, RunnableConfig):
672
      node.ChangeCWD(path)
673

674 675 676 677
  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
678 679
    command = self.command_prefix + runnable.GetCommand(shell_dir,
                                                        self.extra_flags)
680
    try:
681
      output = commands.Execute(
682 683
        command,
        timeout=runnable.timeout,
684
      )
685 686
    except OSError as e:  # pragma: no cover
      print title % "OSError"
687 688
      print e
      return ""
689

690 691
    print title % "Stdout"
    print output.stdout
692 693
    if output.stderr:  # pragma: no cover
      # Print stderr for debugging.
694
      print title % "Stderr"
695 696 697
      print output.stderr
    if output.timed_out:
      print ">>> Test timed out after %ss." % runnable.timeout
698
    if '--prof' in self.extra_flags:
699 700 701 702 703
      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
      if os_prefix:
        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
        subprocess.check_call(tick_tools + " --only-summary", shell=True)
      else:  # pragma: no cover
704
        print "Profiler option currently supported on Linux and Mac OS."
705 706 707
    return output.stdout


708 709 710
class AndroidPlatform(Platform):  # pragma: no cover
  DEVICE_DIR = "/data/local/tmp/v8/"

711
  def __init__(self, options):
712
    super(AndroidPlatform, self).__init__(options)
713 714 715 716
    LoadAndroidBuildTools(options.android_build_tools)

    if not options.device:
      # Detect attached device if not specified.
717
      devices = adb_wrapper.AdbWrapper.Devices()
718 719 720
      assert devices and len(devices) == 1, (
          "None or multiple devices detected. Please specify the device on "
          "the command-line with --device")
721 722 723
      options.device = str(devices[0])
    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
    self.device = device_utils.DeviceUtils(self.adb_wrapper)
724 725 726 727 728

  def PreExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetHighPerfMode()

729 730 731
    # Remember what we have already pushed to the device.
    self.pushed = set()

732 733 734
  def PostExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetDefaultPerfMode()
735
    self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
736

737 738
  def _PushFile(self, host_dir, file_name, target_rel=".",
                skip_if_missing=False):
739
    file_on_host = os.path.join(host_dir, file_name)
740 741
    file_on_device_tmp = os.path.join(
        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
742 743
    file_on_device = os.path.join(
        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
744
    folder_on_device = os.path.dirname(file_on_device)
745

746 747 748 749 750 751
    # Only attempt to push files that exist.
    if not os.path.exists(file_on_host):
      if not skip_if_missing:
        logging.critical('Missing file on host: %s' % file_on_host)
      return

752 753 754 755 756 757
    # Only push files not yet pushed in one execution.
    if file_on_host in self.pushed:
      return
    else:
      self.pushed.add(file_on_host)

758
    # Work-around for "text file busy" errors. Push the files to a temporary
759
    # location and then copy them with a shell command.
760
    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
761 762 763 764
    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
    # Errors look like this: "failed to copy  ... ".
    if output and not re.search('^[0-9]', output.splitlines()[-1]):
      logging.critical('PUSH FAILED: ' + output)
765 766
    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
767

768 769
  def _PushExecutable(self, shell_dir, target_dir, binary):
    self._PushFile(shell_dir, binary, target_dir)
770 771 772

    # Push external startup data. Backwards compatible for revisions where
    # these files didn't exist.
773
    self._PushFile(
774
        shell_dir,
775
        "natives_blob.bin",
776
        target_dir,
777 778 779
        skip_if_missing=True,
    )
    self._PushFile(
780
        shell_dir,
781
        "snapshot_blob.bin",
782
        target_dir,
783 784
        skip_if_missing=True,
    )
785 786
    self._PushFile(
        shell_dir,
787
        "icudtl.dat",
788 789 790
        target_dir,
        skip_if_missing=True,
    )
791

792
  def PreTests(self, node, path):
793 794
    if isinstance(node, RunnableConfig):
      node.ChangeCWD(path)
795 796 797 798 799 800 801 802 803 804 805 806 807
    suite_dir = os.path.abspath(os.path.dirname(path))
    if node.path:
      bench_rel = os.path.normpath(os.path.join(*node.path))
      bench_abs = os.path.join(suite_dir, bench_rel)
    else:
      bench_rel = "."
      bench_abs = suite_dir

    self._PushExecutable(self.shell_dir, "bin", node.binary)
    if self.shell_dir_no_patch:
      self._PushExecutable(
          self.shell_dir_no_patch, "bin_no_patch", node.binary)

808
    if isinstance(node, RunnableConfig):
809
      self._PushFile(bench_abs, node.main, bench_rel)
810
    for resource in node.resources:
811
      self._PushFile(bench_abs, resource, bench_rel)
812

813 814 815 816
  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    target_dir = "bin_no_patch" if no_patch else "bin"
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
817 818
    cache = cache_control.CacheControl(self.device)
    cache.DropRamCaches()
819
    binary_on_device = os.path.join(
820
        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
821
    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
822 823 824 825 826 827 828

    # Relative path to benchmark directory.
    if runnable.path:
      bench_rel = os.path.normpath(os.path.join(*runnable.path))
    else:
      bench_rel = "."

829 830 831
    try:
      output = self.device.RunShellCommand(
          cmd,
832
          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
833 834 835
          timeout=runnable.timeout,
          retries=0,
      )
836 837 838
      stdout = "\n".join(output)
      print title % "Stdout"
      print stdout
839 840 841 842
    except device_errors.CommandTimeoutError:
      print ">>> Test timed out after %ss." % runnable.timeout
      stdout = ""
    return stdout
843

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
class CustomMachineConfiguration:
  def __init__(self, disable_aslr = False, governor = None):
    self.aslr_backup = None
    self.governor_backup = None
    self.disable_aslr = disable_aslr
    self.governor = governor

  def __enter__(self):
    if self.disable_aslr:
      self.aslr_backup = CustomMachineConfiguration.GetASLR()
      CustomMachineConfiguration.SetASLR(0)
    if self.governor != None:
      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
      CustomMachineConfiguration.SetCPUGovernor(self.governor)
    return self

  def __exit__(self, type, value, traceback):
    if self.aslr_backup != None:
      CustomMachineConfiguration.SetASLR(self.aslr_backup)
    if self.governor_backup != None:
      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)

  @staticmethod
  def GetASLR():
    try:
      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
        return int(f.readline().strip())
    except Exception as e:
      print "Failed to get current ASLR settings."
      raise e

  @staticmethod
  def SetASLR(value):
    try:
      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
        f.write(str(value))
    except Exception as e:
      print "Failed to update ASLR to %s." % value
      print "Are we running under sudo?"
      raise e

    new_value = CustomMachineConfiguration.GetASLR()
    if value != new_value:
      raise Exception("Present value is %s" % new_value)

  @staticmethod
  def GetCPUCoresRange():
    try:
      with open("/sys/devices/system/cpu/present", "r") as f:
        indexes = f.readline()
894 895 896 897
        r = map(int, indexes.split("-"))
        if len(r) == 1:
          return range(r[0], r[0] + 1)
        return range(r[0], r[1] + 1)
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
    except Exception as e:
      print "Failed to retrieve number of CPUs."
      raise e

  @staticmethod
  def GetCPUPathForId(cpu_index):
    ret = "/sys/devices/system/cpu/cpu"
    ret += str(cpu_index)
    ret += "/cpufreq/scaling_governor"
    return ret

  @staticmethod
  def GetCPUGovernor():
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      ret = None
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "r") as f:
          # We assume the governors of all CPUs are set to the same value
          val = f.readline().strip()
          if ret == None:
            ret = val
          elif ret != val:
            raise Exception("CPU cores have differing governor settings")
      return ret
    except Exception as e:
      print "Failed to get the current CPU governor."
      print "Is the CPU governor disabled? Check BIOS."
      raise e

  @staticmethod
  def SetCPUGovernor(value):
    try:
      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
      for cpu_index in cpu_indices:
        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
        with open(cpu_device, "w") as f:
          f.write(value)

    except Exception as e:
      print "Failed to change CPU governor to %s." % value
      print "Are we running under sudo?"
      raise e

    cur_value = CustomMachineConfiguration.GetCPUGovernor()
    if cur_value != value:
      raise Exception("Could not set CPU governor. Present value is %s"
                      % cur_value )
947

948
def Main(args):
949
  logging.getLogger().setLevel(logging.INFO)
950
  parser = optparse.OptionParser()
951
  parser.add_option("--android-build-tools",
952 953
                    help="Path to chromium's build/android. Specifying this "
                         "option will run tests using android platform.")
954 955 956 957 958 959 960
  parser.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="x64")
  parser.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
961 962 963
  parser.add_option("--device",
                    help="The device ID to run Android tests on. If not given "
                         "it will be autodetected.")
964 965 966
  parser.add_option("--extra-flags",
                    help="Additional flags to pass to the test executable",
                    default="")
967 968
  parser.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
969 970 971
  parser.add_option("--json-test-results-no-patch",
                    help="Path to a file for storing json results from run "
                         "without patch.")
972 973
  parser.add_option("--outdir", help="Base directory with compile output",
                    default="out")
974 975
  parser.add_option("--outdir-no-patch",
                    help="Base directory with compile output without patch")
976 977 978 979
  parser.add_option("--binary-override-path",
                    help="JavaScript engine binary. By default, d8 under "
                    "architecture-specific build dir. "
                    "Not supported in conjunction with outdir-no-patch.")
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
  parser.add_option("--prioritize",
                    help="Raise the priority to nice -20 for the benchmarking "
                    "process.Requires Linux, schedtool, and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--affinitize",
                    help="Run benchmarking process on the specified core. "
                    "For example: "
                    "--affinitize=0 will run the benchmark process on core 0. "
                    "--affinitize=3 will run the benchmark process on core 3. "
                    "Requires Linux, schedtool, and sudo privileges.",
                    default=None)
  parser.add_option("--noaslr",
                    help="Disable ASLR for the duration of the benchmarked "
                    "process. Requires Linux and sudo privileges.",
                    default=False, action="store_true")
  parser.add_option("--cpu-governor",
                    help="Set cpu governor to specified policy for the "
                    "duration of the benchmarked process. Typical options: "
                    "'powersave' for more stable results, or 'performance' "
                    "for shorter completion time of suite, with potentially "
                    "more noise in results.")
1001

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
  (options, args) = parser.parse_args(args)

  if len(args) == 0:  # pragma: no cover
    parser.print_help()
    return 1

  if options.arch in ["auto", "native"]:  # pragma: no cover
    options.arch = ARCH_GUESS

  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
    print "Unknown architecture %s" % options.arch
    return 1

1015
  if options.device and not options.android_build_tools:  # pragma: no cover
1016
    print "Specifying a device requires Android build tools."
1017
    return 1
1018

1019 1020 1021 1022 1023 1024
  if (options.json_test_results_no_patch and
      not options.outdir_no_patch):  # pragma: no cover
    print("For writing json test results without patch, an outdir without "
          "patch must be specified.")
    return 1

1025 1026 1027
  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

  if options.buildbot:
1028
    build_config = "Release"
1029
  else:
1030 1031
    build_config = "%s.release" % options.arch

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
  if options.binary_override_path == None:
    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
    default_binary_name = "d8"
  else:
    if not os.path.isfile(options.binary_override_path):
      print "binary-override-path must be a file name"
      return 1
    if options.outdir_no_patch:
      print "specify either binary-override-path or outdir-no-patch"
      return 1
    options.shell_dir = os.path.dirname(options.binary_override_path)
    default_binary_name = os.path.basename(options.binary_override_path)
1044 1045 1046 1047 1048 1049

  if options.outdir_no_patch:
    options.shell_dir_no_patch = os.path.join(
        workspace, options.outdir_no_patch, build_config)
  else:
    options.shell_dir_no_patch = None
1050

1051 1052
  prev_aslr = None
  prev_cpu_gov = None
1053
  platform = Platform.GetPlatform(options)
1054 1055

  results = Results()
1056
  results_no_patch = Results()
1057 1058 1059 1060
  with CustomMachineConfiguration(governor = options.cpu_governor,
                                  disable_aslr = options.noaslr) as conf:
    for path in args:
      path = os.path.abspath(path)
1061

1062 1063 1064
      if not os.path.exists(path):  # pragma: no cover
        results.errors.append("Configuration file %s does not exist." % path)
        continue
1065

1066 1067
      with open(path) as f:
        suite = json.loads(f.read())
1068

1069 1070
      # If no name is given, default to the file name without .json.
      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
1071

1072 1073
      # Setup things common to one test suite.
      platform.PreExecution()
1074

1075 1076 1077
      # Build the graph/trace tree structure.
      default_parent = DefaultSentinel(default_binary_name)
      root = BuildGraphConfigs(suite, options.arch, default_parent)
1078

1079 1080 1081
      # Callback to be called on each node on traversal.
      def NodeCB(node):
        platform.PreTests(node, path)
1082

1083 1084 1085
      # Traverse graph/trace tree and interate over all runnables.
      for runnable in FlattenRunnables(root, NodeCB):
        print ">>> Running suite: %s" % "/".join(runnable.graphs)
1086

1087 1088 1089 1090 1091 1092
        def Runner():
          """Output generator that reruns several times."""
          for i in xrange(0, max(1, runnable.run_count)):
            # TODO(machenbach): Allow timeout per arch like with run_count per
            # arch.
            yield platform.Run(runnable, i)
1093

1094 1095
        # Let runnable iterate over all runs and handle output.
        result, result_no_patch = runnable.Run(
1096
          Runner, trybot=options.shell_dir_no_patch)
1097 1098 1099 1100 1101 1102 1103 1104
        results += result
        results_no_patch += result_no_patch
      platform.PostExecution()

    if options.json_test_results:
      results.WriteToFile(options.json_test_results)
    else:  # pragma: no cover
      print results
1105

1106 1107 1108
  if options.json_test_results_no_patch:
    results_no_patch.WriteToFile(options.json_test_results_no_patch)
  else:  # pragma: no cover
1109
    print results_no_patch
1110

1111 1112 1113 1114
  return min(1, len(results.errors))

if __name__ == "__main__":  # pragma: no cover
  sys.exit(Main(sys.argv[1:]))