Commit a8784a40 authored by Tamer Tas's avatar Tamer Tas Committed by Commit Bot

[testrunner] remove infra_staging tests for stable behavior

With and without infra_staging flag testrunner behaves the same for old
features. This CL removes duplicate tests testing the same behavior.

R=machenbach@chromium.org
CC=yangguo@chromium.org,sergiyb@chromium.org

No-Try: true
Bug: v8:8174
Change-Id: Icf7bea15b2343b90697016d050fa0d918a99997d
Reviewed-on: https://chromium-review.googlesource.com/c/1424859Reviewed-by: 's avatarSergiy Belozorov <sergiyb@chromium.org>
Commit-Queue: Tamer Tas <tmrts@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58951}
parent ba565577
...@@ -191,7 +191,7 @@ class SystemTest(unittest.TestCase): ...@@ -191,7 +191,7 @@ class SystemTest(unittest.TestCase):
'--shard-run=%d' % shard, '--shard-run=%d' % shard,
'sweet/bananas', 'sweet/bananas',
'sweet/raspberries', 'sweet/raspberries',
infra_staging=True, infra_staging=False,
) )
# One of the shards gets one variant of each test. # One of the shards gets one variant of each test.
self.assertIn('2 tests ran', result.stdout, result) self.assertIn('2 tests ran', result.stdout, result)
...@@ -222,10 +222,7 @@ class SystemTest(unittest.TestCase): ...@@ -222,10 +222,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/raspberries', result.stdout, result) self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
def testFailProc(self): def testFail(self):
self.testFail(infra_staging=True)
def testFail(self, infra_staging=True):
"""Test running only failing tests in two variants.""" """Test running only failing tests in two variants."""
with temp_base() as basedir: with temp_base() as basedir:
result = run_tests( result = run_tests(
...@@ -234,10 +231,8 @@ class SystemTest(unittest.TestCase): ...@@ -234,10 +231,8 @@ class SystemTest(unittest.TestCase):
'--progress=verbose', '--progress=verbose',
'--variants=default,stress', '--variants=default,stress',
'sweet/strawberries', 'sweet/strawberries',
infra_staging=infra_staging, infra_staging=False,
) )
if infra_staging:
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result) self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result) self.assertEqual(1, result.returncode, result)
...@@ -268,10 +263,7 @@ class SystemTest(unittest.TestCase): ...@@ -268,10 +263,7 @@ class SystemTest(unittest.TestCase):
msg = None # Set to pretty_json for bootstrapping. msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg) self.assertDictEqual(json_output, expected_test_results, msg)
def testFailWithRerunAndJSONProc(self): def testFailWithRerunAndJSON(self):
self.testFailWithRerunAndJSON(infra_staging=True)
def testFailWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json.""" """Test re-running a failing test and output to json."""
with temp_base() as basedir: with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json') json_path = os.path.join(basedir, 'out.json')
...@@ -284,18 +276,12 @@ class SystemTest(unittest.TestCase): ...@@ -284,18 +276,12 @@ class SystemTest(unittest.TestCase):
'--random-seed=123', '--random-seed=123',
'--json-test-results', json_path, '--json-test-results', json_path,
'sweet/strawberries', 'sweet/strawberries',
infra_staging=infra_staging, infra_staging=False,
) )
if infra_staging:
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result) self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
if not infra_staging: # With test processors we don't count reruns as separated failures.
# We run one test, which fails and gets re-run twice. # TODO(majeski): fix it?
self.assertIn('3 tests failed', result.stdout, result) self.assertIn('1 tests failed', result.stdout, result)
else:
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the # TODO(majeski): Previously we only reported the variant flags in the
...@@ -306,10 +292,7 @@ class SystemTest(unittest.TestCase): ...@@ -306,10 +292,7 @@ class SystemTest(unittest.TestCase):
self.check_cleaned_json_output( self.check_cleaned_json_output(
'expected_test_results1.json', json_path, basedir) 'expected_test_results1.json', json_path, basedir)
def testFlakeWithRerunAndJSONProc(self): def testFlakeWithRerunAndJSON(self):
self.testFlakeWithRerunAndJSON(infra_staging=True)
def testFlakeWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json.""" """Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir: with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json') json_path = os.path.join(basedir, 'out.json')
...@@ -322,16 +305,11 @@ class SystemTest(unittest.TestCase): ...@@ -322,16 +305,11 @@ class SystemTest(unittest.TestCase):
'--random-seed=123', '--random-seed=123',
'--json-test-results', json_path, '--json-test-results', json_path,
'sweet', 'sweet',
infra_staging=infra_staging, infra_staging=False,
) )
if not infra_staging: self.assertIn(
self.assertIn( 'Done running sweet/bananaflakes: pass', result.stdout, result)
'Done running sweet/bananaflakes: FAIL', result.stdout, result) self.assertIn('All tests succeeded', result.stdout, result)
self.assertIn('1 tests failed', result.stdout, result)
else:
self.assertIn(
'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
self.maxDiff = None self.maxDiff = None
self.check_cleaned_json_output( self.check_cleaned_json_output(
...@@ -374,10 +352,7 @@ class SystemTest(unittest.TestCase): ...@@ -374,10 +352,7 @@ class SystemTest(unittest.TestCase):
# TODO(machenbach): Test some more implications of the auto-detected # TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set. # options, e.g. that the right env variables are set.
def testSkipsProc(self): def testSkips(self):
self.testSkips(infra_staging=True)
def testSkips(self, infra_staging=True):
"""Test skipping tests in status file for a specific variant.""" """Test skipping tests in status file for a specific variant."""
with temp_base() as basedir: with temp_base() as basedir:
result = run_tests( result = run_tests(
...@@ -386,11 +361,9 @@ class SystemTest(unittest.TestCase): ...@@ -386,11 +361,9 @@ class SystemTest(unittest.TestCase):
'--progress=verbose', '--progress=verbose',
'--variants=nooptimization', '--variants=nooptimization',
'sweet/strawberries', 'sweet/strawberries',
infra_staging=infra_staging, infra_staging=False,
) )
if infra_staging: self.assertIn('0 tests ran', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result) self.assertEqual(2, result.returncode, result)
def testRunSkips(self): def testRunSkips(self):
...@@ -408,10 +381,7 @@ class SystemTest(unittest.TestCase): ...@@ -408,10 +381,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('1 tests ran', result.stdout, result) self.assertIn('1 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result) self.assertEqual(1, result.returncode, result)
def testDefaultProc(self): def testDefault(self):
self.testDefault(infra_staging=True)
def testDefault(self, infra_staging=True):
"""Test using default test suites, though no tests are run since they don't """Test using default test suites, though no tests are run since they don't
exist in a test setting. exist in a test setting.
""" """
...@@ -419,12 +389,9 @@ class SystemTest(unittest.TestCase): ...@@ -419,12 +389,9 @@ class SystemTest(unittest.TestCase):
result = run_tests( result = run_tests(
basedir, basedir,
'--mode=Release', '--mode=Release',
infra_staging=infra_staging, infra_staging=False,
) )
if not infra_staging: self.assertIn('0 tests ran', result.stdout, result)
self.assertIn('Warning: no tests were run!', result.stdout, result)
else:
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result) self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self): def testNoBuildConfig(self):
...@@ -521,10 +488,7 @@ class SystemTest(unittest.TestCase): ...@@ -521,10 +488,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('(no source available)', result.stdout, result) self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
def testPredictableProc(self): def testPredictable(self):
self.testPredictable(infra_staging=True)
def testPredictable(self, infra_staging=True):
"""Test running a test in verify-predictable mode. """Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and The test will fail because of missing allocation output. We verify that and
...@@ -538,10 +502,9 @@ class SystemTest(unittest.TestCase): ...@@ -538,10 +502,9 @@ class SystemTest(unittest.TestCase):
'--progress=verbose', '--progress=verbose',
'--variants=default', '--variants=default',
'sweet/bananas', 'sweet/bananas',
infra_staging=infra_staging, infra_staging=False,
) )
if infra_staging: self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result) self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result) self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result) self.assertIn('--predictable --verify_predictable', result.stdout, result)
...@@ -562,10 +525,7 @@ class SystemTest(unittest.TestCase): ...@@ -562,10 +525,7 @@ class SystemTest(unittest.TestCase):
# timeout was used. # timeout was used.
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefaultProc(self): def testRandomSeedStressWithDefault(self):
self.testRandomSeedStressWithDefault(infra_staging=True)
def testRandomSeedStressWithDefault(self, infra_staging=True):
"""Test using random-seed-stress feature has the right number of tests.""" """Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir: with temp_base() as basedir:
result = run_tests( result = run_tests(
...@@ -575,11 +535,9 @@ class SystemTest(unittest.TestCase): ...@@ -575,11 +535,9 @@ class SystemTest(unittest.TestCase):
'--variants=default', '--variants=default',
'--random-seed-stress-count=2', '--random-seed-stress-count=2',
'sweet/bananas', 'sweet/bananas',
infra_staging=infra_staging, infra_staging=False,
) )
if infra_staging: self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self): def testRandomSeedStressWithSeed(self):
...@@ -629,10 +587,7 @@ class SystemTest(unittest.TestCase): ...@@ -629,10 +587,7 @@ class SystemTest(unittest.TestCase):
self.assertTrue(statusfile.PresubmitCheck( self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status'))) os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
def testDotsProgressProc(self): def testDotsProgress(self):
self.testDotsProgress(infra_staging=True)
def testDotsProgress(self, infra_staging=True):
with temp_base() as basedir: with temp_base() as basedir:
result = run_tests( result = run_tests(
basedir, basedir,
...@@ -641,26 +596,19 @@ class SystemTest(unittest.TestCase): ...@@ -641,26 +596,19 @@ class SystemTest(unittest.TestCase):
'sweet/cherries', 'sweet/cherries',
'sweet/bananas', 'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic '--no-sorting', '-j1', # make results order deterministic
infra_staging=infra_staging, infra_staging=False,
) )
if infra_staging: self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result) self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result) self.assertEqual(1, result.returncode, result)
def testMonoProgressProc(self):
self._testCompactProgress('mono', True)
def testMonoProgress(self): def testMonoProgress(self):
self._testCompactProgress('mono', False) self._testCompactProgress('mono')
def testColorProgressProc(self):
self._testCompactProgress('color', True)
def testColorProgress(self): def testColorProgress(self):
self._testCompactProgress('color', False) self._testCompactProgress('color')
def _testCompactProgress(self, name, infra_staging): def _testCompactProgress(self, name):
with temp_base() as basedir: with temp_base() as basedir:
result = run_tests( result = run_tests(
basedir, basedir,
...@@ -668,7 +616,7 @@ class SystemTest(unittest.TestCase): ...@@ -668,7 +616,7 @@ class SystemTest(unittest.TestCase):
'--progress=%s' % name, '--progress=%s' % name,
'sweet/cherries', 'sweet/cherries',
'sweet/bananas', 'sweet/bananas',
infra_staging=infra_staging, infra_staging=False,
) )
if name == 'color': if name == 'color':
expected = ('\033[32m+ 1\033[0m|' expected = ('\033[32m+ 1\033[0m|'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment