Commit 95a85701 authored by Al Muthanna Athamina's avatar Al Muthanna Athamina Committed by V8 LUCI CQ

Allow V8 to run all unittests presubmit

Search for all files with testing naming convention and run that on v8_presubmit.
Also modify all PRESUBMIT files in the tools directory to include any test file
with the appropriate naming convention.

Bug: chromium:1306474
Change-Id: I61c1b7c71badbbc3b99705289588aa8280824d66
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3532266Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Almothana Athamneh <almuthanna@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79651}
parent 58836f73
......@@ -10,8 +10,10 @@ USE_PYTHON3 = True
def _RunTests(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, '.', files_to_check=['v8_foozzie_test.py$']))
return input_api.RunTests(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, '.', files_to_check=[r'.+_test\.py$']))
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
......
......@@ -18,8 +18,7 @@ def _CommonChecks(input_api, output_api):
# Run the MB unittests.
results.extend(
input_api.canned_checks.RunUnitTestsInDirectory(input_api, output_api,
'.',
[r'^.+_unittest\.py$']))
'.', [r'^.+_test\.py$']))
# Validate the format of the mb_config.pyl file.
cmd = [input_api.python_executable, 'mb.py', 'validate']
......
......@@ -3,7 +3,6 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for mb.py."""
import json
......@@ -16,6 +15,7 @@ import mb
class FakeMBW(mb.MetaBuildWrapper):
def __init__(self, win32=False):
super(FakeMBW, self).__init__()
......@@ -93,6 +93,7 @@ class FakeMBW(mb.MetaBuildWrapper):
class FakeFile(object):
def __init__(self, files):
self.name = '/tmp/file'
self.buf = ''
......@@ -102,7 +103,7 @@ class FakeFile(object):
self.buf += contents
def close(self):
self.files[self.name] = self.buf
self.files[self.name] = self.buf
TEST_CONFIG = """\
......@@ -152,7 +153,6 @@ TEST_CONFIG = """\
}
"""
TRYSERVER_CONFIG = """\
{
'builder_groups': {
......@@ -177,12 +177,12 @@ TRYSERVER_CONFIG = """\
class UnitTest(unittest.TestCase):
def fake_mbw(self, files=None, win32=False):
mbw = FakeMBW(win32=win32)
mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
mbw.files.setdefault(
mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'),
'''{
mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'), '''{
"foo_unittests": {
"label": "//foo:foo_unittests",
"type": "console_test_launcher",
......@@ -211,69 +211,94 @@ class UnitTest(unittest.TestCase):
return mbw
def test_analyze(self):
files = {'/tmp/in.json': '''{\
files = {
'/tmp/in.json':
'''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
'/tmp/out.json.gn': '''{\
'/tmp/out.json.gn':
'''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests"],
"test_targets": ["//foo:foo_unittests"]
}'''}
}'''
}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
self.check([
'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
'/tmp/out.json'
],
mbw=mbw,
ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
'status': 'Found dependency',
'compile_targets': ['foo:foo_unittests'],
'test_targets': ['foo_unittests']
})
self.assertEqual(
out, {
'status': 'Found dependency',
'compile_targets': ['foo:foo_unittests'],
'test_targets': ['foo_unittests']
})
def test_analyze_optimizes_compile_for_all(self):
files = {'/tmp/in.json': '''{\
files = {
'/tmp/in.json':
'''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
'/tmp/out.json.gn': '''{\
'/tmp/out.json.gn':
'''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests", "all"],
"test_targets": ["//foo:foo_unittests"]
}'''}
}'''
}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
self.check([
'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
'/tmp/out.json'
],
mbw=mbw,
ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
# check that 'foo_unittests' is not in the compile_targets
self.assertEqual(['all'], out['compile_targets'])
def test_analyze_handles_other_toolchains(self):
files = {'/tmp/in.json': '''{\
files = {
'/tmp/in.json':
'''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
'/tmp/out.json.gn': '''{\
'/tmp/out.json.gn':
'''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests",
"//foo:foo_unittests(bar)"],
"test_targets": ["//foo:foo_unittests"]
}'''}
}'''
}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
self.check([
'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
'/tmp/out.json'
],
mbw=mbw,
ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
# crbug.com/736215: If GN returns a label containing a toolchain,
......@@ -285,22 +310,30 @@ class UnitTest(unittest.TestCase):
def test_analyze_handles_way_too_many_results(self):
too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(4 * 1024)])
files = {'/tmp/in.json': '''{\
files = {
'/tmp/in.json':
'''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
'/tmp/out.json.gn': '''{\
'/tmp/out.json.gn':
'''{\
"status": "Found dependency",
"compile_targets": [''' + too_many_files + '''],
"test_targets": ["//foo:foo_unittests"]
}'''}
}'''
}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
self.check([
'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
'/tmp/out.json'
],
mbw=mbw,
ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
# If GN returns so many compile targets that we might have command-line
......@@ -312,7 +345,8 @@ class UnitTest(unittest.TestCase):
def test_gen(self):
mbw = self.fake_mbw()
self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
mbw=mbw, ret=0)
mbw=mbw,
ret=0)
self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
('goma_dir = "/goma"\n'
'is_debug = true\n'
......@@ -325,18 +359,22 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(win32=True)
self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
mbw=mbw, ret=0)
mbw=mbw,
ret=0)
self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
('goma_dir = "c:\\\\goma"\n'
'is_debug = true\n'
'use_goma = true\n'))
self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
'--check\n', mbw.out)
self.assertIn(
'c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
'--check\n', mbw.out)
mbw = self.fake_mbw()
self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_bot',
'//out/Debug'],
mbw=mbw, ret=0)
self.check([
'gen', '-m', 'fake_builder_group', '-b', 'fake_args_bot', '//out/Debug'
],
mbw=mbw,
ret=0)
# TODO(almuthanna): disable test temporarily to
# solve this issue https://crbug.com/v8/11102
# self.assertEqual(
......@@ -345,17 +383,23 @@ class UnitTest(unittest.TestCase):
def test_gen_args_file_mixins(self):
mbw = self.fake_mbw()
self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_file',
'//out/Debug'], mbw=mbw, ret=0)
self.check([
'gen', '-m', 'fake_builder_group', '-b', 'fake_args_file', '//out/Debug'
],
mbw=mbw,
ret=0)
self.assertEqual(
mbw.files['/fake_src/out/Debug/args.gn'],
('import("//build/args/fake.gn")\n'
'use_goma = true\n'))
self.assertEqual(mbw.files['/fake_src/out/Debug/args.gn'],
('import("//build/args/fake.gn")\n'
'use_goma = true\n'))
mbw = self.fake_mbw()
self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_file_twice',
'//out/Debug'], mbw=mbw, ret=1)
self.check([
'gen', '-m', 'fake_builder_group', '-b', 'fake_args_file_twice',
'//out/Debug'
],
mbw=mbw,
ret=1)
def test_gen_fails(self):
mbw = self.fake_mbw()
......@@ -364,167 +408,162 @@ class UnitTest(unittest.TestCase):
def test_gen_swarming(self):
files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
'/tmp/swarming_targets':
'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl':
("{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'/fake_src/out/Default/base_unittests.runtime_deps':
("base_unittests\n"),
}
mbw = self.fake_mbw(files)
self.check(['gen',
'-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('/fake_src/out/Default/base_unittests.isolate',
mbw.files)
self.check([
'gen', '-c', 'debug_goma', '--swarming-targets-file',
'/tmp/swarming_targets', '//out/Default'
],
mbw=mbw,
ret=0)
self.assertIn('/fake_src/out/Default/base_unittests.isolate', mbw.files)
self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
mbw.files)
def test_gen_swarming_script(self):
files = {
'/tmp/swarming_targets': 'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'script',"
" 'script': '/fake_src/out/Default/test_script.py',"
" 'args': [],"
"}}\n"
),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
"cc_perftests\n"
),
'/tmp/swarming_targets':
'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl':
("{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'script',"
" 'script': '/fake_src/out/Default/test_script.py',"
" 'args': [],"
"}}\n"),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps':
("cc_perftests\n"),
}
mbw = self.fake_mbw(files=files, win32=True)
self.check(['gen',
'-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
mbw.files)
self.check([
'gen', '-c', 'debug_goma', '--swarming-targets-file',
'/tmp/swarming_targets', '--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl', '//out/Default'
],
mbw=mbw,
ret=0)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate', mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
def test_multiple_isolate_maps(self):
files = {
'/tmp/swarming_targets': 'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
"{'cc_perftests2': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
"cc_perftests\n"
),
'/tmp/swarming_targets':
'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl':
("{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'/fake_src/testing/buildbot/gn_isolate_map2.pyl':
("{'cc_perftests2': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps':
("cc_perftests\n"),
}
mbw = self.fake_mbw(files=files, win32=True)
self.check(['gen',
'-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map2.pyl',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
mbw.files)
self.check([
'gen', '-c', 'debug_goma', '--swarming-targets-file',
'/tmp/swarming_targets', '--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl', '--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map2.pyl', '//out/Default'
],
mbw=mbw,
ret=0)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate', mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
def test_duplicate_isolate_maps(self):
files = {
'/tmp/swarming_targets': 'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
"{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
"cc_perftests\n"
),
'/tmp/swarming_targets':
'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl':
("{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'/fake_src/testing/buildbot/gn_isolate_map2.pyl':
("{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps':
("cc_perftests\n"),
}
mbw = self.fake_mbw(files=files, win32=True)
# Check that passing duplicate targets into mb fails.
self.check(['gen',
'-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map2.pyl',
'//out/Default'], mbw=mbw, ret=1)
self.check([
'gen', '-c', 'debug_goma', '--swarming-targets-file',
'/tmp/swarming_targets', '--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl', '--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map2.pyl', '//out/Default'
],
mbw=mbw,
ret=1)
def test_isolate(self):
files = {
'/fake_src/out/Default/toolchain.ninja': "",
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
'/fake_src/out/Default/toolchain.ninja':
"",
'/fake_src/testing/buildbot/gn_isolate_map.pyl':
("{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'/fake_src/out/Default/base_unittests.runtime_deps':
("base_unittests\n"),
}
self.check(['isolate', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
self.check(
['isolate', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
files=files,
ret=0)
# test running isolate on an existing build_dir
files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
files=files,
ret=0)
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
files=files,
ret=0)
def test_run(self):
files = {
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
'/fake_src/testing/buildbot/gn_isolate_map.pyl':
("{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"),
'/fake_src/out/Default/base_unittests.runtime_deps':
("base_unittests\n"),
}
self.check(['run', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
self.check(['run', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
files=files,
ret=0)
def test_lookup(self):
self.check(['lookup', '-c', 'debug_goma'], ret=0,
self.check(['lookup', '-c', 'debug_goma'],
ret=0,
out=('\n'
'Writing """\\\n'
'is_debug = true\n'
......@@ -533,12 +572,14 @@ class UnitTest(unittest.TestCase):
'/fake_src/buildtools/linux64/gn gen _path_\n'))
def test_quiet_lookup(self):
self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
self.check(['lookup', '-c', 'debug_goma', '--quiet'],
ret=0,
out=('is_debug = true\n'
'use_goma = true\n'))
def test_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'],
ret=0,
out=('\n'
'Writing """\\\n'
'enable_doom_melon = true\n'
......@@ -560,43 +601,52 @@ class UnitTest(unittest.TestCase):
def test_multiple_phases(self):
# Check that not passing a --phase to a multi-phase builder fails.
mbw = self.check(['lookup', '-m', 'fake_builder_group',
'-b', 'fake_multi_phase'],
ret=1)
mbw = self.check(
['lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase'], ret=1)
self.assertIn('Must specify a build --phase', mbw.out)
# Check that passing a --phase to a single-phase builder fails.
mbw = self.check(['lookup', '-m', 'fake_builder_group',
'-b', 'fake_builder',
'--phase', 'phase_1'], ret=1)
mbw = self.check([
'lookup', '-m', 'fake_builder_group', '-b', 'fake_builder', '--phase',
'phase_1'
],
ret=1)
self.assertIn('Must not specify a build --phase', mbw.out)
# Check that passing a wrong phase key to a multi-phase builder fails.
mbw = self.check(['lookup', '-m', 'fake_builder_group',
'-b', 'fake_multi_phase',
'--phase', 'wrong_phase'], ret=1)
mbw = self.check([
'lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase',
'--phase', 'wrong_phase'
],
ret=1)
self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out)
# Check that passing a correct phase key to a multi-phase builder passes.
mbw = self.check(['lookup', '-m', 'fake_builder_group',
'-b', 'fake_multi_phase',
'--phase', 'phase_1'], ret=0)
mbw = self.check([
'lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase',
'--phase', 'phase_1'
],
ret=0)
self.assertIn('phase = 1', mbw.out)
mbw = self.check(['lookup', '-m', 'fake_builder_group',
'-b', 'fake_multi_phase',
'--phase', 'phase_2'], ret=0)
mbw = self.check([
'lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase',
'--phase', 'phase_2'
],
ret=0)
self.assertIn('phase = 2', mbw.out)
def test_recursive_lookup(self):
files = {
'/fake_src/build/args/fake.gn': (
'enable_doom_melon = true\n'
'enable_antidoom_banana = true\n'
)
'/fake_src/build/args/fake.gn': ('enable_doom_melon = true\n'
'enable_antidoom_banana = true\n')
}
self.check(['lookup', '-m', 'fake_builder_group', '-b', 'fake_args_file',
'--recursive'], files=files, ret=0,
self.check([
'lookup', '-m', 'fake_builder_group', '-b', 'fake_args_file',
'--recursive'
],
files=files,
ret=0,
out=('enable_antidoom_banana = true\n'
'enable_doom_melon = true\n'
'use_goma = true\n'))
......@@ -608,7 +658,8 @@ class UnitTest(unittest.TestCase):
def test_buildbucket(self):
mbw = self.fake_mbw()
mbw.files[mbw.default_config] = TRYSERVER_CONFIG
self.check(['gerrit-buildbucket-config'], mbw=mbw,
self.check(['gerrit-buildbucket-config'],
mbw=mbw,
ret=0,
out=('# This file was generated using '
'"tools/mb/mb.py gerrit-buildbucket-config".\n'
......
......@@ -8,14 +8,16 @@ USE_PYTHON3 = True
def _CommonChecks(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.GetUnitTestsRecursively(
input_api,
output_api,
input_api.os_path.join(input_api.PresubmitLocalPath()),
files_to_check=[r'.+_unittest\.py$'],
files_to_skip=[],
run_on_python2=False,
))
return input_api.RunTests(
input_api.canned_checks.GetUnitTestsRecursively(
input_api,
output_api,
input_api.os_path.join(input_api.PresubmitLocalPath()),
files_to_check=[r'.+_test\.py$'],
files_to_skip=[],
run_on_python2=False,
))
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
......
......@@ -8,18 +8,21 @@ import sys
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.local.pool import Pool
def Run(x):
if x == 10:
raise Exception("Expected exception triggered by test.")
return x
class PoolTest(unittest.TestCase):
def testNormal(self):
results = set()
pool = Pool(3)
......@@ -55,9 +58,8 @@ class PoolTest(unittest.TestCase):
if result.value < 30:
pool.add([result.value + 20])
self.assertEqual(
set(range(0, 10)) | set(range(20, 30)) | set(range(40, 50)),
results)
set(range(0, 10)) | set(range(20, 30)) | set(range(40, 50)), results)
if __name__ == '__main__':
unittest.main()
unittest.main()
......@@ -7,20 +7,18 @@ import os
import sys
import unittest
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.local import statusfile
from testrunner.local.utils import Freeze
TEST_VARIABLES = {
'system': 'linux',
'mode': 'release',
'system': 'linux',
'mode': 'release',
}
TEST_STATUS_FILE = """
[
[ALWAYS, {
......@@ -44,6 +42,7 @@ def make_variables():
class UtilsTest(unittest.TestCase):
def test_freeze(self):
self.assertEqual(2, Freeze({1: [2]})[1][0])
self.assertEqual(set([3]), Freeze({1: [2], 2: set([3])})[2])
......@@ -65,32 +64,29 @@ class UtilsTest(unittest.TestCase):
class StatusFileTest(unittest.TestCase):
def test_eval_expression(self):
variables = make_variables()
variables.update(statusfile.VARIABLES)
self.assertTrue(
statusfile._EvalExpression(
'system==linux and mode==release', variables))
statusfile._EvalExpression('system==linux and mode==release',
variables))
self.assertTrue(
statusfile._EvalExpression(
'system==linux or variant==default', variables))
statusfile._EvalExpression('system==linux or variant==default',
variables))
self.assertFalse(
statusfile._EvalExpression(
'system==linux and mode==debug', variables))
statusfile._EvalExpression('system==linux and mode==debug', variables))
self.assertRaises(
AssertionError,
lambda: statusfile._EvalExpression(
AssertionError, lambda: statusfile._EvalExpression(
'system==linux and mode==foo', variables))
self.assertRaises(
SyntaxError,
lambda: statusfile._EvalExpression(
SyntaxError, lambda: statusfile._EvalExpression(
'system==linux and mode=release', variables))
self.assertEquals(
statusfile.VARIANT_EXPRESSION,
statusfile._EvalExpression(
'system==linux and variant==default', variables)
)
statusfile._EvalExpression('system==linux and variant==default',
variables))
def test_read_statusfile_section_true(self):
rules, prefix_rules = statusfile.ReadStatusFile(
......@@ -98,14 +94,14 @@ class StatusFileTest(unittest.TestCase):
self.assertEquals(
{
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL', 'SLOW']),
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL', 'SLOW']),
},
rules[''],
)
self.assertEquals(
{
'foo/': set(['SLOW', 'FAIL']),
'foo/': set(['SLOW', 'FAIL']),
},
prefix_rules[''],
)
......@@ -118,14 +114,14 @@ class StatusFileTest(unittest.TestCase):
self.assertEquals(
{
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
},
rules[''],
)
self.assertEquals(
{
'foo/': set(['PASS', 'SLOW']),
'foo/': set(['PASS', 'SLOW']),
},
prefix_rules[''],
)
......@@ -140,30 +136,30 @@ class StatusFileTest(unittest.TestCase):
self.assertEquals(
{
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
},
rules[''],
)
self.assertEquals(
{
'foo/': set(['PASS', 'SLOW']),
'foo/': set(['PASS', 'SLOW']),
},
prefix_rules[''],
)
self.assertEquals(
{
'baz/bar': set(['PASS', 'SLOW']),
'baz/bar': set(['PASS', 'SLOW']),
},
rules['default'],
)
self.assertEquals(
{
'foo/': set(['FAIL']),
'foo/': set(['FAIL']),
},
prefix_rules['default'],
)
if __name__ == '__main__':
unittest.main()
unittest.main()
......@@ -10,8 +10,8 @@ import tempfile
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.local.testsuite import TestSuite, TestGenerator
......@@ -20,6 +20,7 @@ from testrunner.test_config import TestConfig
class TestSuiteTest(unittest.TestCase):
def setUp(self):
test_dir = os.path.dirname(__file__)
self.test_root = os.path.join(test_dir, "fake_testsuite")
......@@ -37,8 +38,8 @@ class TestSuiteTest(unittest.TestCase):
verbose=False,
)
self.suite = TestSuite.Load(
self.test_root, self.test_config, "standard_runner")
self.suite = TestSuite.Load(self.test_root, self.test_config,
"standard_runner")
def testLoadingTestSuites(self):
self.assertEquals(self.suite.name, "fake_testsuite")
......@@ -49,8 +50,8 @@ class TestSuiteTest(unittest.TestCase):
self.assertIsNone(self.suite.statusfile)
def testLoadingTestsFromDisk(self):
tests = self.suite.load_tests_from_disk(
statusfile_variables={})
tests = self.suite.load_tests_from_disk(statusfile_variables={})
def is_generator(iterator):
return iterator == iter(iterator)
......@@ -65,10 +66,8 @@ class TestSuiteTest(unittest.TestCase):
self.assertIsNotNone(self.suite.statusfile)
def testMergingTestGenerators(self):
tests = self.suite.load_tests_from_disk(
statusfile_variables={})
more_tests = self.suite.load_tests_from_disk(
statusfile_variables={})
tests = self.suite.load_tests_from_disk(statusfile_variables={})
more_tests = self.suite.load_tests_from_disk(statusfile_variables={})
# Merge the test generators
tests.merge(more_tests)
......@@ -83,4 +82,4 @@ class TestSuiteTest(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
unittest.main()
......@@ -2,7 +2,6 @@
# Copyright 2021 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test integrating the sequence processor into a simple test pipeline.
"""
......@@ -12,8 +11,8 @@ import sys
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc import base
......@@ -26,6 +25,7 @@ class FakeExecutionProc(base.TestProc):
Test execution is simulated for each test by calling run().
"""
def __init__(self):
super(FakeExecutionProc, self).__init__()
self.tests = []
......@@ -41,6 +41,7 @@ class FakeExecutionProc(base.TestProc):
class FakeResultObserver(base.TestProcObserver):
"""Observer to track all results sent back through the pipeline."""
def __init__(self):
super(FakeResultObserver, self).__init__()
self.tests = set([])
......@@ -51,6 +52,7 @@ class FakeResultObserver(base.TestProcObserver):
class FakeTest(object):
"""Simple test representation to differentiate light/heavy tests."""
def __init__(self, n, is_heavy):
self.n = n
self.is_heavy = is_heavy
......@@ -58,6 +60,7 @@ class FakeTest(object):
class TestSequenceProc(unittest.TestCase):
def _test(self, tests, batch_size, max_heavy):
# Set up a simple processing pipeline:
# Loader -> observe results -> sequencer -> execution.
......@@ -95,69 +98,70 @@ class TestSequenceProc(unittest.TestCase):
def test_large_batch_light(self):
self._test([
FakeTest(0, False),
FakeTest(1, False),
FakeTest(2, False),
FakeTest(0, False),
FakeTest(1, False),
FakeTest(2, False),
], 4, 1)
def test_small_batch_light(self):
self._test([
FakeTest(0, False),
FakeTest(1, False),
FakeTest(2, False),
FakeTest(0, False),
FakeTest(1, False),
FakeTest(2, False),
], 2, 1)
def test_large_batch_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
], 4, 1)
def test_small_batch_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
], 2, 1)
def test_large_batch_mixed(self):
self._test([
FakeTest(0, True),
FakeTest(1, False),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(0, True),
FakeTest(1, False),
FakeTest(2, True),
FakeTest(3, False),
], 4, 1)
def test_small_batch_mixed(self):
self._test([
FakeTest(0, True),
FakeTest(1, False),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(0, True),
FakeTest(1, False),
FakeTest(2, True),
FakeTest(3, False),
], 2, 1)
def test_large_batch_more_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(4, True),
FakeTest(5, True),
FakeTest(6, False),
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(4, True),
FakeTest(5, True),
FakeTest(6, False),
], 4, 2)
def test_small_batch_more_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(4, True),
FakeTest(5, True),
FakeTest(6, False),
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(4, True),
FakeTest(5, True),
FakeTest(6, False),
], 2, 2)
if __name__ == '__main__':
unittest.main()
......@@ -9,14 +9,15 @@ import tempfile
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc.shard import radix_hash
class TestRadixHashing(unittest.TestCase):
def test_hash_character_by_radix(self):
self.assertEqual(97, radix_hash(capacity=2**32, key="a"))
......@@ -28,20 +29,20 @@ class TestRadixHashing(unittest.TestCase):
def test_hash_test_id(self):
self.assertEqual(
5,
radix_hash(capacity=7,
key="test262/Map/class-private-method-Variant-0-1"))
5,
radix_hash(
capacity=7, key="test262/Map/class-private-method-Variant-0-1"))
def test_hash_boundaries(self):
total_variants = 5
cases = []
for case in [
"test262/Map/class-private-method",
"test262/Map/class-public-method",
"test262/Map/object-retrieval",
"test262/Map/object-deletion",
"test262/Map/object-creation",
"test262/Map/garbage-collection",
"test262/Map/class-private-method",
"test262/Map/class-public-method",
"test262/Map/object-retrieval",
"test262/Map/object-deletion",
"test262/Map/object-creation",
"test262/Map/garbage-collection",
]:
for variant_index in range(total_variants):
cases.append("%s-Variant-%d" % (case, variant_index))
......
......@@ -7,13 +7,15 @@ import os
import sys
import unittest
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc.util import FixedSizeTopList
class TestOrderedFixedSizeList(unittest.TestCase):
def test_empty(self):
ofsl = FixedSizeTopList(3)
self.assertEqual(ofsl.as_list(), [])
......@@ -22,7 +24,7 @@ class TestOrderedFixedSizeList(unittest.TestCase):
ofsl = FixedSizeTopList(3)
ofsl.add(1)
ofsl.add(2)
self.assertEqual(ofsl.as_list(), [2,1])
self.assertEqual(ofsl.as_list(), [2, 1])
def test_4321(self):
ofsl = FixedSizeTopList(3)
......@@ -31,7 +33,7 @@ class TestOrderedFixedSizeList(unittest.TestCase):
ofsl.add(2)
ofsl.add(1)
data = ofsl.as_list()
self.assertEqual(data, [4,3,2])
self.assertEqual(data, [4, 3, 2])
def test_544321(self):
ofsl = FixedSizeTopList(4)
......@@ -45,21 +47,21 @@ class TestOrderedFixedSizeList(unittest.TestCase):
self.assertEqual(data, [5, 4, 4, 3])
def test_withkey(self):
ofsl = FixedSizeTopList(3,key=lambda x: x['val'])
ofsl.add({'val':4, 'something': 'four'})
ofsl.add({'val':3, 'something': 'three'})
ofsl.add({'val':-1, 'something': 'minusone'})
ofsl.add({'val':5, 'something': 'five'})
ofsl.add({'val':0, 'something': 'zero'})
ofsl = FixedSizeTopList(3, key=lambda x: x['val'])
ofsl.add({'val': 4, 'something': 'four'})
ofsl.add({'val': 3, 'something': 'three'})
ofsl.add({'val': -1, 'something': 'minusone'})
ofsl.add({'val': 5, 'something': 'five'})
ofsl.add({'val': 0, 'something': 'zero'})
data = [e['something'] for e in ofsl.as_list()]
self.assertEqual(data, ['five', 'four', 'three'])
def test_withkeyclash(self):
# Test that a key clash does not throw exeption
ofsl = FixedSizeTopList(2,key=lambda x: x['val'])
ofsl.add({'val':2, 'something': 'two'})
ofsl.add({'val':2, 'something': 'two'})
ofsl.add({'val':0, 'something': 'zero'})
ofsl = FixedSizeTopList(2, key=lambda x: x['val'])
ofsl.add({'val': 2, 'something': 'two'})
ofsl.add({'val': 2, 'something': 'two'})
ofsl.add({'val': 0, 'something': 'zero'})
data = [e['something'] for e in ofsl.as_list()]
self.assertEqual(data, ['two', 'two'])
......
......@@ -9,8 +9,8 @@ import tempfile
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc import base
......@@ -18,6 +18,7 @@ from testrunner.testproc.variant import VariantProc
class FakeResultObserver(base.TestProcObserver):
def __init__(self):
super(FakeResultObserver, self).__init__()
......@@ -28,6 +29,7 @@ class FakeResultObserver(base.TestProcObserver):
class FakeFilter(base.TestProcFilter):
def __init__(self, filter_predicate):
super(FakeFilter, self).__init__()
......@@ -47,11 +49,13 @@ class FakeFilter(base.TestProcFilter):
class FakeSuite(object):
def __init__(self, name):
self.name = name
class FakeTest(object):
def __init__(self, procid):
self.suite = FakeSuite("fake_suite")
self.procid = procid
......@@ -66,6 +70,7 @@ class FakeTest(object):
class FakeVariantGen(object):
def __init__(self, variants):
self._variants = variants
......@@ -75,6 +80,7 @@ class FakeVariantGen(object):
class TestVariantProcLoading(unittest.TestCase):
def setUp(self):
self.test = FakeTest("test")
......@@ -86,12 +92,11 @@ class TestVariantProcLoading(unittest.TestCase):
# Creates a Variant processor containing the possible types of test
# variants.
self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
self.variant_proc._variant_gens = {
"fake_suite": FakeVariantGen(variants)}
self.variant_proc._variant_gens = {"fake_suite": FakeVariantGen(variants)}
# FakeFilter only lets tests passing the predicate to be loaded.
self.fake_filter = FakeFilter(
filter_predicate=(lambda t: t.procid == "to_filter"))
filter_predicate=(lambda t: t.procid == "to_filter"))
# FakeResultObserver to verify that VariantProc calls result_for correctly.
self.fake_result_observer = FakeResultObserver()
......@@ -112,10 +117,10 @@ class TestVariantProcLoading(unittest.TestCase):
def test_filters_first_two_variants(self):
variants = [
FakeTest('to_filter'),
FakeTest('to_filter'),
FakeTest('to_load'),
FakeTest('to_load'),
FakeTest('to_filter'),
FakeTest('to_filter'),
FakeTest('to_load'),
FakeTest('to_load'),
]
expected_load_results = {variants[2]}
......@@ -126,9 +131,9 @@ class TestVariantProcLoading(unittest.TestCase):
def test_stops_loading_after_first_successful_load(self):
variants = [
FakeTest('to_load'),
FakeTest('to_load'),
FakeTest('to_filter'),
FakeTest('to_load'),
FakeTest('to_load'),
FakeTest('to_filter'),
]
expected_load_results = {variants[0]}
......@@ -139,8 +144,8 @@ class TestVariantProcLoading(unittest.TestCase):
def test_return_result_when_out_of_variants(self):
variants = [
FakeTest('to_filter'),
FakeTest('to_load'),
FakeTest('to_filter'),
FakeTest('to_load'),
]
self._simulate_proc(variants)
......@@ -153,9 +158,9 @@ class TestVariantProcLoading(unittest.TestCase):
def test_return_result_after_running_variants(self):
variants = [
FakeTest('to_filter'),
FakeTest('to_load'),
FakeTest('to_load'),
FakeTest('to_filter'),
FakeTest('to_load'),
FakeTest('to_load'),
]
self._simulate_proc(variants)
......@@ -168,5 +173,6 @@ class TestVariantProcLoading(unittest.TestCase):
expected_results = {(self.test, None)}
self.assertSetEqual(expected_results, self.fake_result_observer.results)
if __name__ == '__main__':
unittest.main()
......@@ -122,7 +122,7 @@ def TorqueLintWorker(command):
error_count += 1
sys.stdout.write(out_lines)
if error_count != 0:
sys.stdout.write(
sys.stdout.write(
"warning: formatting and overwriting unformatted Torque files\n")
return error_count
except KeyboardInterrupt:
......@@ -727,16 +727,35 @@ def CheckDeps(workspace):
return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
def FindTests(workspace):
scripts = []
# TODO(almuthanna): unskip valid tests when they are properly migrated
exclude = [
'tools/clang',
'tools/unittests/v8_presubmit_test.py',
'tools/testrunner/local/pool_test.py',
'tools/testrunner/testproc/sequence_test.py',
'tools/mb/mb_test.py',
'tools/cppgc/gen_cmake_test.py',
'tools/ignition/linux_perf_report_test.py',
'tools/ignition/bytecode_dispatches_report_test.py',
'tools/ignition/linux_perf_bytecode_annotate_test.py',
]
scripts_without_excluded = []
for root, dirs, files in os.walk(join(workspace, 'tools')):
for f in files:
if f.endswith('_test.py'):
fullpath = os.path.join(root, f)
scripts.append(fullpath)
for script in scripts:
if not any(exc_dir in script for exc_dir in exclude):
scripts_without_excluded.append(script)
return scripts_without_excluded
def PyTests(workspace):
result = True
for script in [
join(workspace, 'tools', 'clusterfuzz', 'foozzie', 'v8_foozzie_test.py'),
join(workspace, 'tools', 'release', 'test_scripts.py'),
join(workspace, 'tools', 'unittests', 'predictable_wrapper_test.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
]:
for script in FindTests(workspace):
print('Running ' + script)
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment