run_tests_test.py 23.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
Global system tests for V8 test runners and fuzzers.

This hooks up the framework under tools/testrunner testing high-level scenarios
with different test suite extensions and build configurations.
"""

# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
# independent.
# TODO(machenbach): Move coverage recording to a global test entry point to
# include other unittest suites in the coverage report.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.

import collections
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest

from cStringIO import StringIO

TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')

Result = collections.namedtuple(
    'Result', ['stdout', 'stderr', 'returncode'])

Result.__str__ = lambda self: (
    '\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
    (self.returncode, self.stdout, self.stderr))


@contextlib.contextmanager
def temp_dir():
  """Wrapper making a temporary directory available."""
  path = None
  try:
    path = tempfile.mkdtemp('v8_test_')
    yield path
  finally:
    if path:
      shutil.rmtree(path)


@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
  """Wrapper that sets up a temporary V8 test root.

  Args:
    baseroot: The folder with the test root blueprint. Relevant files will be
        copied to the temporary test root, to guarantee a fresh setup with no
        dirty state.
  """
  basedir = os.path.join(TEST_DATA_ROOT, baseroot)
  with temp_dir() as tempbase:
    builddir = os.path.join(tempbase, 'out', 'Release')
    testroot = os.path.join(tempbase, 'test')
    os.makedirs(builddir)
    shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
    shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)

    for suite in os.listdir(os.path.join(basedir, 'test')):
      os.makedirs(os.path.join(testroot, suite))
      for entry in os.listdir(os.path.join(basedir, 'test', suite)):
        shutil.copy(
            os.path.join(basedir, 'test', suite, entry),
            os.path.join(testroot, suite))
    yield tempbase


@contextlib.contextmanager
def capture():
  """Wrapper that replaces system stdout/stderr an provides the streams."""
  oldout = sys.stdout
  olderr = sys.stderr
  try:
    stdout=StringIO()
    stderr=StringIO()
    sys.stdout = stdout
    sys.stderr = stderr
    yield stdout, stderr
  finally:
    sys.stdout = oldout
    sys.stderr = olderr


98
def run_tests(basedir, *args, **kwargs):
99 100 101
  """Executes the test runner with captured output."""
  with capture() as (stdout, stderr):
    sys_args = ['--command-prefix', sys.executable] + list(args)
102 103
    if kwargs.get('infra_staging', False):
      sys_args.append('--infra-staging')
104 105
    else:
      sys_args.append('--no-infra-staging')
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
    code = standard_runner.StandardTestRunner(
        basedir=basedir).execute(sys_args)
    return Result(stdout.getvalue(), stderr.getvalue(), code)


def override_build_config(basedir, **kwargs):
  """Override the build config with new values provided as kwargs."""
  path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
  with open(path) as f:
    config = json.load(f)
    config.update(kwargs)
  with open(path, 'w') as f:
    json.dump(config, f)


class SystemTest(unittest.TestCase):
  @classmethod
  def setUpClass(cls):
    # Try to set up python coverage and run without it if not available.
    cls._cov = None
    try:
      import coverage
      if int(coverage.__version__.split('.')[0]) < 4:
        cls._cov = None
        print 'Python coverage version >= 4 required.'
        raise ImportError()
      cls._cov = coverage.Coverage(
          source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
          omit=['*unittest*', '*__init__.py'],
      )
      cls._cov.exclude('raise NotImplementedError')
      cls._cov.exclude('if __name__ == .__main__.:')
      cls._cov.exclude('except TestRunnerError:')
      cls._cov.exclude('except KeyboardInterrupt:')
      cls._cov.exclude('if options.verbose:')
      cls._cov.exclude('if verbose:')
      cls._cov.exclude('pass')
      cls._cov.exclude('assert False')
      cls._cov.start()
    except ImportError:
      print 'Running without python coverage.'
    sys.path.append(TOOLS_ROOT)
    global standard_runner
    from testrunner import standard_runner
150 151
    from testrunner.local import pool
    pool.setup_testing()
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

  @classmethod
  def tearDownClass(cls):
    if cls._cov:
      cls._cov.stop()
      print ''
      print cls._cov.report(show_missing=True)

  def testPass(self):
    """Test running only passing tests in two variants.

    Also test printing durations.
    """
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default,stress',
          '--time',
          'sweet/bananas',
          'sweet/raspberries',
      )
      self.assertIn('Running 4 tests', result.stdout, result)
      self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
      self.assertIn('Total time:', result.stderr, result)
      self.assertIn('sweet/bananas', result.stderr, result)
      self.assertEqual(0, result.returncode, result)
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202

  def testShardedProc(self):
    with temp_base() as basedir:
      for shard in [1, 2]:
        result = run_tests(
            basedir,
            '--mode=Release',
            '--progress=verbose',
            '--variants=default,stress',
            '--shard-count=2',
            '--shard-run=%d' % shard,
            'sweet/bananas',
            'sweet/raspberries',
            infra_staging=True,
        )
        # One of the shards gets one variant of each test.
        self.assertIn('Running 1 base tests', result.stdout, result)
        self.assertIn('2 tests ran', result.stdout, result)
        if shard == 1:
          self.assertIn('Done running sweet/bananas', result.stdout, result)
        else:
          self.assertIn('Done running sweet/raspberries', result.stdout, result)
        self.assertEqual(0, result.returncode, result)
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

  def testSharded(self):
    """Test running a particular shard."""
    with temp_base() as basedir:
      for shard in [1, 2]:
        result = run_tests(
            basedir,
            '--mode=Release',
            '--progress=verbose',
            '--variants=default,stress',
            '--shard-count=2',
            '--shard-run=%d' % shard,
            'sweet/bananas',
            'sweet/raspberries',
        )
        # One of the shards gets one variant of each test.
        self.assertIn('Running 2 tests', result.stdout, result)
        self.assertIn('Done running sweet/bananas', result.stdout, result)
        self.assertIn('Done running sweet/raspberries', result.stdout, result)
        self.assertEqual(0, result.returncode, result)

224 225 226 227
  def testFailProc(self):
    self.testFail(infra_staging=True)

  def testFail(self, infra_staging=False):
228 229 230 231 232 233 234 235
    """Test running only failing tests in two variants."""
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default,stress',
          'sweet/strawberries',
236
          infra_staging=infra_staging,
237
      )
238
      if not infra_staging:
239
        self.assertIn('Running 2 tests', result.stdout, result)
240 241 242
      else:
        self.assertIn('Running 1 base tests', result.stdout, result)
        self.assertIn('2 tests ran', result.stdout, result)
243 244 245
      self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
  def check_cleaned_json_output(self, expected_results_name, actual_json):
    # Check relevant properties of the json output.
    with open(actual_json) as f:
      json_output = json.load(f)[0]
      pretty_json = json.dumps(json_output, indent=2, sort_keys=True)

    # Replace duration in actual output as it's non-deterministic. Also
    # replace the python executable prefix as it has a different absolute
    # path dependent on where this runs.
    def replace_variable_data(data):
      data['duration'] = 1
      data['command'] = ' '.join(
          ['/usr/bin/python'] + data['command'].split()[1:])
    for data in json_output['slowest_tests']:
      replace_variable_data(data)
    for data in json_output['results']:
      replace_variable_data(data)
    json_output['duration_mean'] = 1

    with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
      expected_test_results = json.load(f)

    msg = None  # Set to pretty_json for bootstrapping.
    self.assertDictEqual(json_output, expected_test_results, msg)

271 272 273 274
  def testFailWithRerunAndJSONProc(self):
    self.testFailWithRerunAndJSON(infra_staging=True)

  def testFailWithRerunAndJSON(self, infra_staging=False):
275 276 277 278 279 280 281 282 283 284 285 286
    """Test re-running a failing test and output to json."""
    with temp_base() as basedir:
      json_path = os.path.join(basedir, 'out.json')
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          '--rerun-failures-count=2',
          '--random-seed=123',
          '--json-test-results', json_path,
          'sweet/strawberries',
287
          infra_staging=infra_staging,
288
      )
289 290 291 292 293
      if not infra_staging:
        self.assertIn('Running 1 tests', result.stdout, result)
      else:
        self.assertIn('Running 1 base tests', result.stdout, result)
        self.assertIn('1 tests ran', result.stdout, result)
294
      self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
295 296 297 298 299
      if not infra_staging:
        # We run one test, which fails and gets re-run twice.
        self.assertIn('3 tests failed', result.stdout, result)
      else:
        # With test processors we don't count reruns as separated failures.
300
        # TODO(majeski): fix it?
301
        self.assertIn('1 tests failed', result.stdout, result)
302 303 304 305 306 307
      self.assertEqual(0, result.returncode, result)

      # TODO(majeski): Previously we only reported the variant flags in the
      # flags field of the test result.
      # After recent changes we report all flags, including the file names.
      # This is redundant to the command. Needs investigation.
308 309
      self.check_cleaned_json_output('expected_test_results1.json', json_path)

310 311
  def testFlakeWithRerunAndJSONProc(self):
    self.testFlakeWithRerunAndJSON(infra_staging=True)
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328

  def testFlakeWithRerunAndJSON(self, infra_staging=False):
    """Test re-running a failing test and output to json."""
    with temp_base(baseroot='testroot2') as basedir:
      json_path = os.path.join(basedir, 'out.json')
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          '--rerun-failures-count=2',
          '--random-seed=123',
          '--json-test-results', json_path,
          'sweet',
          infra_staging=infra_staging,
      )
      if not infra_staging:
329
        self.assertIn('Running 1 tests', result.stdout, result)
330 331 332 333
        self.assertIn(
            'Done running sweet/bananaflakes: FAIL', result.stdout, result)
        self.assertIn('1 tests failed', result.stdout, result)
      else:
334
        self.assertIn('Running 1 base tests', result.stdout, result)
335 336 337 338 339
        self.assertIn(
            'Done running sweet/bananaflakes: pass', result.stdout, result)
        self.assertIn('All tests succeeded', result.stdout, result)
      self.assertEqual(0, result.returncode, result)
      self.check_cleaned_json_output('expected_test_results2.json', json_path)
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375

  def testAutoDetect(self):
    """Fake a build with several auto-detected options.

    Using all those options at once doesn't really make much sense. This is
    merely for getting coverage.
    """
    with temp_base() as basedir:
      override_build_config(
          basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
          is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
          v8_enable_i18n_support=False, v8_target_cpu='x86',
          v8_use_snapshot=False)
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          'sweet/bananas',
      )
      expect_text = (
          '>>> Autodetected:\n'
          'asan\n'
          'cfi_vptr\n'
          'dcheck_always_on\n'
          'msan\n'
          'no_i18n\n'
          'no_snap\n'
          'tsan\n'
          'ubsan_vptr\n'
          '>>> Running tests for ia32.release')
      self.assertIn(expect_text, result.stdout, result)
      self.assertEqual(0, result.returncode, result)
      # TODO(machenbach): Test some more implications of the auto-detected
      # options, e.g. that the right env variables are set.

376 377
  def testSkipsProc(self):
    self.testSkips(infra_staging=True)
378 379

  def testSkips(self, infra_staging=False):
380 381 382 383 384 385 386 387
    """Test skipping tests in status file for a specific variant."""
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=nooptimization',
          'sweet/strawberries',
388
          infra_staging=infra_staging,
389
      )
390 391 392 393 394
      if not infra_staging:
        self.assertIn('Running 0 tests', result.stdout, result)
      else:
        self.assertIn('Running 1 base tests', result.stdout, result)
        self.assertIn('0 tests ran', result.stdout, result)
395
      self.assertEqual(3, result.returncode, result)
396

397 398
  def testDefaultProc(self):
    self.testDefault(infra_staging=True)
399 400

  def testDefault(self, infra_staging=False):
401 402 403 404
    """Test using default test suites, though no tests are run since they don't
    exist in a test setting.
    """
    with temp_base() as basedir:
405 406 407 408 409
      result = run_tests(
          basedir,
          '--mode=Release',
          infra_staging=infra_staging,
      )
410 411 412 413 414
      if not infra_staging:
        self.assertIn('Warning: no tests were run!', result.stdout, result)
      else:
        self.assertIn('Running 0 base tests', result.stdout, result)
        self.assertIn('0 tests ran', result.stdout, result)
415
      self.assertEqual(3, result.returncode, result)
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507

  def testNoBuildConfig(self):
    """Test failing run when build config is not found."""
    with temp_base() as basedir:
      result = run_tests(basedir)
      self.assertIn('Failed to load build config', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testGNOption(self):
    """Test using gn option, but no gn build folder is found."""
    with temp_base() as basedir:
      # TODO(machenbach): This should fail gracefully.
      with self.assertRaises(OSError):
        run_tests(basedir, '--gn')

  def testInconsistentMode(self):
    """Test failing run when attempting to wrongly override the mode."""
    with temp_base() as basedir:
      override_build_config(basedir, is_debug=True)
      result = run_tests(basedir, '--mode=Release')
      self.assertIn('execution mode (release) for release is inconsistent '
                    'with build config (debug)', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testInconsistentArch(self):
    """Test failing run when attempting to wrongly override the arch."""
    with temp_base() as basedir:
      result = run_tests(basedir, '--mode=Release', '--arch=ia32')
      self.assertIn(
          '--arch value (ia32) inconsistent with build config (x64).',
          result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testWrongVariant(self):
    """Test using a bogus variant."""
    with temp_base() as basedir:
      result = run_tests(basedir, '--mode=Release', '--variants=meh')
      self.assertEqual(1, result.returncode, result)

  def testModeFromBuildConfig(self):
    """Test auto-detection of mode from build config."""
    with temp_base() as basedir:
      result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
      self.assertIn('Running tests for x64.release', result.stdout, result)
      self.assertEqual(0, result.returncode, result)

  def testReport(self):
    """Test the report feature.

    This also exercises various paths in statusfile logic.
    """
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--variants=default',
          'sweet',
          '--report',
      )
      self.assertIn(
          '3 tests are expected to fail that we should fix',
          result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testWarnUnusedRules(self):
    """Test the unused-rules feature."""
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--variants=default,nooptimization',
          'sweet',
          '--warn-unused',
      )
      self.assertIn( 'Unused rule: carrots', result.stdout, result)
      self.assertIn( 'Unused rule: regress/', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testCatNoSources(self):
    """Test printing sources, but the suite's tests have none available."""
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--variants=default',
          'sweet/bananas',
          '--cat',
      )
      self.assertIn('begin source: sweet/bananas', result.stdout, result)
      self.assertIn('(no source available)', result.stdout, result)
      self.assertEqual(0, result.returncode, result)

508 509 510 511
  def testPredictableProc(self):
    self.testPredictable(infra_staging=True)

  def testPredictable(self, infra_staging=False):
512 513 514 515 516 517 518 519 520 521 522 523 524
    """Test running a test in verify-predictable mode.

    The test will fail because of missing allocation output. We verify that and
    that the predictable flags are passed and printed after failure.
    """
    with temp_base() as basedir:
      override_build_config(basedir, v8_enable_verify_predictable=True)
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          'sweet/bananas',
525
          infra_staging=infra_staging,
526
      )
527 528 529 530 531
      if not infra_staging:
        self.assertIn('Running 1 tests', result.stdout, result)
      else:
        self.assertIn('Running 1 base tests', result.stdout, result)
        self.assertIn('1 tests ran', result.stdout, result)
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
      self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
      self.assertIn('Test had no allocation output', result.stdout, result)
      self.assertIn('--predictable --verify_predictable', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testSlowArch(self):
    """Test timeout factor manipulation on slow architecture."""
    with temp_base() as basedir:
      override_build_config(basedir, v8_target_cpu='arm64')
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          'sweet/bananas',
      )
      # TODO(machenbach): We don't have a way for testing if the correct
      # timeout was used.
      self.assertEqual(0, result.returncode, result)

  def testRandomSeedStressWithDefault(self):
    """Test using random-seed-stress feature has the right number of tests."""
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          '--random-seed-stress-count=2',
          'sweet/bananas',
      )
      self.assertIn('Running 2 tests', result.stdout, result)
      self.assertEqual(0, result.returncode, result)

  def testRandomSeedStressWithSeed(self):
    """Test using random-seed-stress feature passing a random seed."""
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default',
          '--random-seed-stress-count=2',
          '--random-seed=123',
          'sweet/strawberries',
      )
      self.assertIn('Running 2 tests', result.stdout, result)
      # We use a failing test so that the command is printed and we can verify
      # that the right random seed was passed.
      self.assertIn('--random-seed=123', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testSpecificVariants(self):
585
    """Test using NO_VARIANTS modifiers in status files skips the desire tests.
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612

    The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
    But the status file applies a modifier to each skipping one of the
    variants.
    """
    with temp_base() as basedir:
      override_build_config(basedir, v8_use_snapshot=False)
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=verbose',
          '--variants=default,stress',
          'sweet/bananas',
          'sweet/raspberries',
      )
      # Both tests are either marked as running in only default or only
      # slow variant.
      self.assertIn('Running 2 tests', result.stdout, result)
      self.assertEqual(0, result.returncode, result)

  def testStatusFilePresubmit(self):
    """Test that the fake status file is well-formed."""
    with temp_base() as basedir:
      from testrunner.local import statusfile
      self.assertTrue(statusfile.PresubmitCheck(
          os.path.join(basedir, 'test', 'sweet', 'sweet.status')))

613 614 615 616 617 618 619 620 621 622 623 624 625 626
  def testDotsProgressProc(self):
    self.testDotsProgress(infra_staging=True)

  def testDotsProgress(self, infra_staging=False):
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=dots',
          'sweet/cherries',
          'sweet/bananas',
          '--no-sorting', '-j1', # make results order deterministic
          infra_staging=infra_staging,
      )
627 628 629 630 631
      if not infra_staging:
        self.assertIn('Running 2 tests', result.stdout, result)
      else:
        self.assertIn('Running 2 base tests', result.stdout, result)
        self.assertIn('2 tests ran', result.stdout, result)
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
      self.assertIn('F.', result.stdout, result)
      self.assertEqual(1, result.returncode, result)

  def testMonoProgressProc(self):
    self._testCompactProgress('mono', True)

  def testMonoProgress(self):
    self._testCompactProgress('mono', False)

  def testColorProgressProc(self):
    self._testCompactProgress('color', True)

  def testColorProgress(self):
    self._testCompactProgress('color', False)

  def _testCompactProgress(self, name, infra_staging):
    with temp_base() as basedir:
      result = run_tests(
          basedir,
          '--mode=Release',
          '--progress=%s' % name,
          'sweet/cherries',
          'sweet/bananas',
          infra_staging=infra_staging,
      )
      if name == 'color':
        expected = ('\033[34m% 100\033[0m|'
                    '\033[32m+   1\033[0m|'
                    '\033[31m-   1\033[0m]: Done')
      else:
        expected = '% 100|+   1|-   1]: Done'
      self.assertIn(expected, result.stdout)
      self.assertIn('sweet/cherries', result.stdout)
      self.assertIn('sweet/bananas', result.stdout)
      self.assertEqual(1, result.returncode, result)

668 669
if __name__ == '__main__':
  unittest.main()