Commit 45833d9b authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

[test] Group test features behind a single GN switch

This will switch on various testing features in sanitizer builds and
for correctness fuzzer builds.

Currently we group enabling the slow-path runtime flag and allocation
timeout for atomic gc stress, but more features could be added in the
future.

This will enable gc fuzzer, clusterfuzz and correctness fuzzer to use
both slow-path and atomic gc stress in release sanitizer builds.

Bug: v8:6972
Change-Id: I5cade68241a8a0711f8a388be7e1543aab0035fa
Reviewed-on: https://chromium-review.googlesource.com/869932
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50656}
parent 41f231a2
......@@ -89,8 +89,8 @@ declare_args() {
# Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = true
# Sets -dV8_ENABLE_FORCE_SLOW_PATH
v8_enable_force_slow_path = ""
# Enables various testing features.
v8_enable_test_features = ""
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
......@@ -153,8 +153,8 @@ if (v8_enable_disassembler == "") {
if (v8_enable_trace_maps == "") {
v8_enable_trace_maps = is_debug
}
if (v8_enable_force_slow_path == "") {
v8_enable_force_slow_path = is_debug
if (v8_enable_test_features == "") {
v8_enable_test_features = is_debug || dcheck_always_on
}
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = is_debug
......@@ -282,7 +282,8 @@ config("features") {
if (v8_enable_trace_feedback_updates) {
defines += [ "V8_TRACE_FEEDBACK_UPDATES" ]
}
if (v8_enable_force_slow_path) {
if (v8_enable_test_features) {
defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ]
defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ]
}
if (v8_enable_v8_checks) {
......
......@@ -430,7 +430,7 @@
'gn_release_x64_trybot': [
'gn', 'release_trybot', 'x64', 'swarming'],
'gn_release_x64_test_features_trybot': [
'gn', 'release_trybot', 'x64', 'swarming', 'v8_enable_force_slow_path'],
'gn', 'release_trybot', 'x64', 'swarming', 'v8_enable_test_features'],
'gn_release_x64_tsan': [
'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
'gn_release_x64_tsan_concurrent_marking': [
......@@ -558,7 +558,7 @@
},
'asan': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_asan=true',
'gyp_defines': 'clang=1 asan=1',
},
......@@ -569,14 +569,14 @@
},
'cfi': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=false'),
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
'cfi_clusterfuzz': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=true'),
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
......@@ -653,7 +653,7 @@
},
'lsan': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_lsan=true',
'gyp_defines': 'lsan=1',
},
......@@ -669,13 +669,13 @@
},
'msan': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_msan=true msan_track_origins=2 '
'use_prebuilt_instrumented_libraries=true'),
},
'msan_no_origins': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_msan=true msan_track_origins=0 '
'use_prebuilt_instrumented_libraries=true'),
},
......@@ -765,13 +765,13 @@
},
'tsan': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_tsan=true',
'gyp_defines': 'clang=1 tsan=1',
},
'ubsan_vptr': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
# ubsan_vptr instead.
......@@ -779,7 +779,7 @@
},
'ubsan_vptr_recover': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
# Ubsan vptr with recovery.
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
},
......@@ -794,7 +794,7 @@
},
'v8_correctness_fuzzer': {
'mixins': ['v8_enable_force_slow_path'],
'mixins': ['v8_enable_test_features'],
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
......@@ -808,8 +808,8 @@
'gyp_defines': 'v8_enable_slow_dchecks=1',
},
'v8_enable_force_slow_path': {
'gn_args': 'v8_enable_force_slow_path=true',
'v8_enable_test_features': {
'gn_args': 'v8_enable_test_features=true',
},
'v8_enable_verify_predictable': {
......
......@@ -250,12 +250,14 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
#ifdef DEBUG
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
return AllocationResult::Retry(space);
}
}
#endif
#ifdef DEBUG
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
......
......@@ -182,9 +182,6 @@ Heap::Heap()
gc_count_(0),
mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
inline_allocation_disabled_(false),
tracer_(nullptr),
......@@ -233,7 +230,12 @@ Heap::Heap()
use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
pending_layout_change_object_(nullptr)
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
,
allocation_timeout_(0)
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
{
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
......@@ -1214,7 +1216,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
#ifdef DEBUG
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
// of allocation sequences and we assume that a garbage collection will allow
......@@ -5513,7 +5515,7 @@ void Heap::DisableInlineAllocation() {
}
bool Heap::SetUp() {
#ifdef DEBUG
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
#endif
......
......@@ -1600,12 +1600,14 @@ class Heap {
void VerifyRememberedSetFor(HeapObject* object);
#endif
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
void Print();
void PrintHandles();
......@@ -2415,13 +2417,6 @@ class Heap {
int remembered_unmapped_pages_index_;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
#ifdef DEBUG
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
int allocation_timeout_;
#endif // DEBUG
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
......@@ -2567,6 +2562,13 @@ class Heap {
HeapObject* pending_layout_change_object_;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
int allocation_timeout_;
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
std::map<HeapObject*, HeapObject*> retainer_;
std::map<HeapObject*, Root> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment