progress.py 11.2 KB
Newer Older
1 2 3 4
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

5 6 7
# for py2/py3 compatibility
from __future__ import print_function

8 9
import json
import os
10 11
import platform
import subprocess
12
import sys
13
import time
14 15 16 17

from . import base


18 19 20 21 22
# Base dir of the build products for Release and Debug.
OUT_DIR = os.path.abspath(
    os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))


23 24 25 26 27
def print_failure_header(test):
  if test.output_proc.negative:
    negative_marker = '[negative] '
  else:
    negative_marker = ''
28
  print("=== %(label)s %(negative)s===" % {
29 30
    'label': test,
    'negative': negative_marker,
31
  })
32 33 34


class ResultsTracker(base.TestProcObserver):
35 36
  """Tracks number of results and stops to run tests if max_failures reached."""
  def __init__(self, max_failures):
37
    super(ResultsTracker, self).__init__()
38 39
    self._requirement = base.DROP_OUTPUT

40 41 42
    self.failed = 0
    self.remaining = 0
    self.total = 0
43
    self.max_failures = max_failures
44 45 46 47 48

  def _on_next_test(self, test):
    self.total += 1
    self.remaining += 1

49
  def _on_result_for(self, test, result):
50 51 52
    self.remaining -= 1
    if result.has_unexpected_output:
      self.failed += 1
53
      if self.max_failures and self.failed >= self.max_failures:
54
        print('>>> Too many failures, exiting...')
55
        self.stop()
56 57 58 59 60 61 62 63 64 65


class ProgressIndicator(base.TestProcObserver):
  def finished(self):
    pass


class SimpleProgressIndicator(ProgressIndicator):
  def __init__(self):
    super(SimpleProgressIndicator, self).__init__()
66
    self._requirement = base.DROP_PASS_OUTPUT
67 68 69

    self._failed = []

70 71
  def _on_result_for(self, test, result):
    # TODO(majeski): Support for dummy/grouped results
72
    if result.has_unexpected_output:
73
      self._failed.append((test, result))
74 75 76

  def finished(self):
    crashed = 0
77
    print()
78
    for test, result in self._failed:
79
      print_failure_header(test)
80
      if result.output.stderr:
81 82
        print("--- stderr ---")
        print(result.output.stderr.strip())
83
      if result.output.stdout:
84 85 86
        print("--- stdout ---")
        print(result.output.stdout.strip())
      print("Command: %s" % result.cmd.to_string())
87
      if result.output.HasCrashed():
88 89
        print("exit code: %d" % result.output.exit_code)
        print("--- CRASHED ---")
90
        crashed += 1
91
      if result.output.HasTimedOut():
92
        print("--- TIMEOUT ---")
93
    if len(self._failed) == 0:
94 95 96
      print("===")
      print("=== All tests succeeded")
      print("===")
97
    else:
98 99 100
      print()
      print("===")
      print("=== %i tests failed" % len(self._failed))
101
      if crashed > 0:
102 103
        print("=== %i tests CRASHED" % crashed)
      print("===")
104 105 106


class VerboseProgressIndicator(SimpleProgressIndicator):
107 108 109 110 111
  def __init__(self):
    super(VerboseProgressIndicator, self).__init__()
    self._last_printed_time = time.time()

  def _print(self, text):
112
    print(text)
113 114 115
    sys.stdout.flush()
    self._last_printed_time = time.time()

116 117 118
  def _on_result_for(self, test, result):
    super(VerboseProgressIndicator, self)._on_result_for(test, result)
    # TODO(majeski): Support for dummy/grouped results
119 120 121 122 123 124 125
    if result.has_unexpected_output:
      if result.output.HasCrashed():
        outcome = 'CRASH'
      else:
        outcome = 'FAIL'
    else:
      outcome = 'pass'
126 127 128

    self._print('Done running %s %s: %s' % (
      test, test.variant or 'default', outcome))
129

130 131 132 133 134 135 136 137 138
  # TODO(machenbach): Remove this platform specific hack and implement a proper
  # feedback channel from the workers, providing which tests are currently run.
  def _print_processes_linux(self):
    if platform.system() == 'Linux':
      try:
        cmd = 'ps -aux | grep "%s"' % OUT_DIR
        output = subprocess.check_output(cmd, shell=True)
        self._print('List of processes:')
        for line in (output or '').splitlines():
139 140 141
          # Show command with pid, but other process info cut off.
          self._print('pid: %s cmd: %s' %
                      (line.split()[1], line[line.index(OUT_DIR):]))
142 143 144
      except:
        pass

145
  def _on_heartbeat(self):
146 147 148 149
    if time.time() - self._last_printed_time > 30:
      # Print something every 30 seconds to not get killed by an output
      # timeout.
      self._print('Still working...')
150
      self._print_processes_linux()
151 152 153 154 155 156 157


class DotsProgressIndicator(SimpleProgressIndicator):
  def __init__(self):
    super(DotsProgressIndicator, self).__init__()
    self._count = 0

158
  def _on_result_for(self, test, result):
159
    super(DotsProgressIndicator, self)._on_result_for(test, result)
160
    # TODO(majeski): Support for dummy/grouped results
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    self._count += 1
    if self._count > 1 and self._count % 50 == 1:
      sys.stdout.write('\n')
    if result.has_unexpected_output:
      if result.output.HasCrashed():
        sys.stdout.write('C')
        sys.stdout.flush()
      elif result.output.HasTimedOut():
        sys.stdout.write('T')
        sys.stdout.flush()
      else:
        sys.stdout.write('F')
        sys.stdout.flush()
    else:
      sys.stdout.write('.')
      sys.stdout.flush()


class CompactProgressIndicator(ProgressIndicator):
  def __init__(self, templates):
    super(CompactProgressIndicator, self).__init__()
182 183
    self._requirement = base.DROP_PASS_OUTPUT

184 185 186 187 188 189 190
    self._templates = templates
    self._last_status_length = 0
    self._start_time = time.time()

    self._passed = 0
    self._failed = 0

191 192 193
  def set_test_count(self, test_count):
    self._total = test_count

194 195
  def _on_result_for(self, test, result):
    # TODO(majeski): Support for dummy/grouped results
196 197 198 199 200 201 202 203 204 205 206 207 208 209
    if result.has_unexpected_output:
      self._failed += 1
    else:
      self._passed += 1

    self._print_progress(str(test))
    if result.has_unexpected_output:
      output = result.output
      stdout = output.stdout.strip()
      stderr = output.stderr.strip()

      self._clear_line(self._last_status_length)
      print_failure_header(test)
      if len(stdout):
210
        print(self._templates['stdout'] % stdout)
211
      if len(stderr):
212
        print(self._templates['stderr'] % stderr)
213
      print("Command: %s" % result.cmd.to_string(relative=True))
214
      if output.HasCrashed():
215 216
        print("exit code: %d" % output.exit_code)
        print("--- CRASHED ---")
217
      if output.HasTimedOut():
218
        print("--- TIMEOUT ---")
219 220 221

  def finished(self):
    self._print_progress('Done')
222
    print()
223 224 225 226

  def _print_progress(self, name):
    self._clear_line(self._last_status_length)
    elapsed = time.time() - self._start_time
227 228 229 230
    if self._total:
      progress = (self._passed + self._failed) * 100 // self._total
    else:
      progress = 0
231 232
    status = self._templates['status_line'] % {
      'passed': self._passed,
233
      'progress': progress,
234 235 236 237 238 239 240
      'failed': self._failed,
      'test': name,
      'mins': int(elapsed) / 60,
      'secs': int(elapsed) % 60
    }
    status = self._truncate(status, 78)
    self._last_status_length = len(status)
241
    print(status, end='')
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
    sys.stdout.flush()

  def _truncate(self, string, length):
    if length and len(string) > (length - 3):
      return string[:(length - 3)] + "..."
    else:
      return string

  def _clear_line(self, last_length):
    raise NotImplementedError()


class ColorProgressIndicator(CompactProgressIndicator):
  def __init__(self):
    templates = {
      'status_line': ("[%(mins)02i:%(secs)02i|"
258
                      "\033[34m%%%(progress) 4d\033[0m|"
259 260 261 262 263 264 265 266
                      "\033[32m+%(passed) 4d\033[0m|"
                      "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
      'stdout': "\033[1m%s\033[0m",
      'stderr': "\033[31m%s\033[0m",
    }
    super(ColorProgressIndicator, self).__init__(templates)

  def _clear_line(self, last_length):
267
    print("\033[1K\r", end='')
268 269 270 271 272


class MonochromeProgressIndicator(CompactProgressIndicator):
  def __init__(self):
    templates = {
273
      'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
274 275 276 277 278 279 280
                      "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
      'stdout': '%s',
      'stderr': '%s',
    }
    super(MonochromeProgressIndicator, self).__init__(templates)

  def _clear_line(self, last_length):
281
    print(("\r" + (" " * last_length) + "\r"), end='')
282 283


284
class JsonTestProgressIndicator(ProgressIndicator):
285
  def __init__(self, framework_name, json_test_results, arch, mode):
286
    super(JsonTestProgressIndicator, self).__init__()
287 288 289 290 291 292
    # We want to drop stdout/err for all passed tests on the first try, but we
    # need to get outputs for all runs after the first one. To accommodate that,
    # reruns are set to keep the result no matter what requirement says, i.e.
    # keep_output set to True in the RerunProc.
    self._requirement = base.DROP_PASS_STDOUT

293
    self.framework_name = framework_name
294 295 296 297 298 299
    self.json_test_results = json_test_results
    self.arch = arch
    self.mode = mode
    self.results = []
    self.tests = []

300
  def _on_result_for(self, test, result):
301 302 303 304 305 306 307 308 309 310
    if result.is_rerun:
      self.process_results(test, result.results)
    else:
      self.process_results(test, [result])

  def process_results(self, test, results):
    for run, result in enumerate(results):
      # TODO(majeski): Support for dummy/grouped results
      output = result.output
      # Buffer all tests for sorting the durations in the end.
311 312
      # TODO(machenbach): Running average + buffer only slowest 20 tests.
      self.tests.append((test, output.duration, result.cmd))
313 314 315 316 317 318 319 320 321

      # Omit tests that run as expected on the first try.
      # Everything that happens after the first run is included in the output
      # even if it flakily passes.
      if not result.has_unexpected_output and run == 0:
        continue

      self.results.append({
        "name": str(test),
322 323
        "flags": result.cmd.args,
        "command": result.cmd.to_string(relative=True),
324 325 326 327 328 329 330
        "run": run + 1,
        "stdout": output.stdout,
        "stderr": output.stderr,
        "exit_code": output.exit_code,
        "result": test.output_proc.get_outcome(output),
        "expected": test.expected_outcomes,
        "duration": output.duration,
331
        "random_seed": test.random_seed,
332 333
        "target_name": test.get_shell(),
        "variant": test.variant,
334
        "variant_flags": test.variant_flags,
335
        "framework_name": self.framework_name,
336
      })
337 338 339 340 341 342 343 344 345 346 347 348

  def finished(self):
    complete_results = []
    if os.path.exists(self.json_test_results):
      with open(self.json_test_results, "r") as f:
        # Buildbot might start out with an empty file.
        complete_results = json.loads(f.read() or "[]")

    duration_mean = None
    if self.tests:
      # Get duration mean.
      duration_mean = (
349
          sum(duration for (_, duration, cmd) in self.tests) /
350 351 352
          float(len(self.tests)))

    # Sort tests by duration.
353
    self.tests.sort(key=lambda __duration_cmd: __duration_cmd[1], reverse=True)
354 355 356
    slowest_tests = [
      {
        "name": str(test),
357 358
        "flags": cmd.args,
        "command": cmd.to_string(relative=True),
359 360
        "duration": duration,
        "marked_slow": test.is_slow,
361
      } for (test, duration, cmd) in self.tests[:20]
362 363 364 365 366 367 368 369 370 371 372 373 374
    ]

    complete_results.append({
      "arch": self.arch,
      "mode": self.mode,
      "results": self.results,
      "slowest_tests": slowest_tests,
      "duration_mean": duration_mean,
      "test_total": len(self.tests),
    })

    with open(self.json_test_results, "w") as f:
      f.write(json.dumps(complete_results))