Commit 9b78e758 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Make creation of NewSpace and NewLargeObjectSpace optional

Both NewSpace and NewLargeObjectSpace aren't used with
FLAG_single_generation enabled. So far both spaces still existed but
weren't used in this mode. This CL makes both spaces optional, which
ensure that we do not inadvertently create objects in them or use them
in any other way.

Bug: v8:11644
Change-Id: I52a449c62e9d3df126c95419433d2abbd75539a5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2862768
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74345}
parent 3fa681db
......@@ -8560,10 +8560,11 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
}
} else {
i::Space* space = heap->space(static_cast<int>(index));
space_statistics->space_size_ = space->CommittedMemory();
space_statistics->space_used_size_ = space->SizeOfObjects();
space_statistics->space_available_size_ = space->Available();
space_statistics->physical_space_size_ = space->CommittedPhysicalMemory();
space_statistics->space_size_ = space ? space->CommittedMemory() : 0;
space_statistics->space_used_size_ = space ? space->SizeOfObjects() : 0;
space_statistics->space_available_size_ = space ? space->Available() : 0;
space_statistics->physical_space_size_ =
space ? space->CommittedPhysicalMemory() : 0;
}
return true;
}
......
......@@ -416,13 +416,13 @@ DEFINE_BOOL_READONLY(enable_unconditional_write_barriers,
"always use full write barriers")
#ifdef V8_ENABLE_SINGLE_GENERATION
#define V8_GENERATION_BOOL true
#define V8_SINGLE_GENERATION_BOOL true
#else
#define V8_GENERATION_BOOL false
#define V8_SINGLE_GENERATION_BOOL false
#endif
DEFINE_BOOL_READONLY(
single_generation, V8_GENERATION_BOOL,
single_generation, V8_SINGLE_GENERATION_BOOL,
"allocate all objects from young generation to old generation")
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
......
......@@ -460,11 +460,23 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
break;
}
objects_processed++;
// The order of the two loads is important.
Address new_space_top = heap_->new_space()->original_top_acquire();
Address new_space_limit = heap_->new_space()->original_limit_relaxed();
Address new_large_object = heap_->new_lo_space()->pending_object();
Address new_space_top = kNullAddress;
Address new_space_limit = kNullAddress;
Address new_large_object = kNullAddress;
if (heap_->new_space()) {
// The order of the two loads is important.
new_space_top = heap_->new_space()->original_top_acquire();
new_space_limit = heap_->new_space()->original_limit_relaxed();
}
if (heap_->new_lo_space()) {
new_large_object = heap_->new_lo_space()->pending_object();
}
Address addr = object.address();
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
local_marking_worklists.PushOnHold(object);
......
......@@ -308,8 +308,10 @@ void GCTracer::StartInSafepoint() {
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
current_.young_object_size =
heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
size_t new_space_size = (heap_->new_space() ? heap_->new_space()->Size() : 0);
size_t new_lo_space_size =
(heap_->new_lo_space() ? heap_->new_lo_space()->SizeOfObjects() : 0);
current_.young_object_size = new_space_size + new_lo_space_size;
}
void GCTracer::ResetIncrementalMarkingCounters() {
......
......@@ -158,14 +158,6 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
return old_space_->allocation_limit_address();
}
void Heap::UpdateNewSpaceAllocationCounter() {
new_space_allocation_counter_ = NewSpaceAllocationCounter();
}
size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
inline const base::AddressRegion& Heap::code_region() {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return tp_heap_->GetCodeRange();
......@@ -186,7 +178,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
return AllocationResult::Retry();
AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
return AllocationResult::Retry(space);
}
}
#endif
......@@ -292,10 +285,9 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
allocation == AllocationType::kYoung &&
if (allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned &&
size <= MaxRegularHeapObjectSize(allocation)) {
size <= MaxRegularHeapObjectSize(allocation) && !FLAG_single_generation) {
Address* top = heap->NewSpaceAllocationTopAddress();
Address* limit = heap->NewSpaceAllocationLimitAddress();
if ((*limit - *top >= static_cast<unsigned>(size)) &&
......@@ -400,7 +392,9 @@ void Heap::FinalizeExternalString(String string) {
ext_string.DisposeResource(isolate());
}
Address Heap::NewSpaceTop() { return new_space_->top(); }
Address Heap::NewSpaceTop() {
return new_space_ ? new_space_->top() : kNullAddress;
}
bool Heap::InYoungGeneration(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
......@@ -589,9 +583,14 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
bool Heap::IsPendingAllocation(HeapObject object) {
// TODO(ulan): Optimize this function to perform 3 loads at most.
Address addr = object.address();
Address top = new_space_->original_top_acquire();
Address limit = new_space_->original_limit_relaxed();
if (top <= addr && addr < limit) return true;
Address top, limit;
if (new_space_) {
top = new_space_->original_top_acquire();
limit = new_space_->original_limit_relaxed();
if (top <= addr && addr < limit) return true;
}
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
......@@ -600,7 +599,7 @@ bool Heap::IsPendingAllocation(HeapObject object) {
if (top <= addr && addr < limit) return true;
}
if (addr == lo_space_->pending_object()) return true;
if (addr == new_lo_space_->pending_object()) return true;
if (new_lo_space_ && addr == new_lo_space_->pending_object()) return true;
if (addr == code_lo_space_->pending_object()) return true;
return false;
}
......
This diff is collapsed.
......@@ -199,7 +199,7 @@ class StrongRootsEntry {
class AllocationResult {
public:
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
static inline AllocationResult Retry(AllocationSpace space) {
return AllocationResult(space);
}
......@@ -518,6 +518,9 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
size_t NewSpaceSize();
size_t NewSpaceCapacity();
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
......@@ -1370,9 +1373,9 @@ class Heap {
survived_since_last_expansion_ += survived;
}
inline void UpdateNewSpaceAllocationCounter();
void UpdateNewSpaceAllocationCounter();
inline size_t NewSpaceAllocationCounter();
V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter();
// This should be used only for testing.
void set_new_space_allocation_counter(size_t new_value) {
......
......@@ -36,7 +36,7 @@ class EvacuationAllocator {
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
new_space_->MaybeFreeUnusedLab(info);
if (new_space_) new_space_->MaybeFreeUnusedLab(info);
}
inline AllocationResult Allocate(AllocationSpace space, int object_size,
......
......@@ -151,6 +151,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
}
void MarkingVerifier::VerifyMarking(NewSpace* space) {
if (!space) return;
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
......@@ -173,6 +174,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
if (!lo_space) return;
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
......@@ -313,6 +315,7 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
}
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
if (!space) return;
PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
......@@ -559,6 +562,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
if (!space) return;
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
......@@ -566,6 +570,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
if (!space) return;
LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
......@@ -874,9 +879,14 @@ void MarkCompactCollector::Prepare() {
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
// All objects are guaranteed to be initialized in atomic pause
heap()->new_lo_space()->ResetPendingObject();
DCHECK_EQ(heap()->new_space()->top(),
heap()->new_space()->original_top_acquire());
if (heap()->new_lo_space()) {
heap()->new_lo_space()->ResetPendingObject();
}
if (heap()->new_space()) {
DCHECK_EQ(heap()->new_space()->top(),
heap()->new_space()->original_top_acquire());
}
}
void MarkCompactCollector::FinishConcurrentMarking() {
......@@ -2862,18 +2872,23 @@ static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
void MarkCompactCollector::EvacuatePrologue() {
// New space.
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p :
PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetLinearAllocationArea();
DCHECK_EQ(new_space->Size(), 0);
if (new_space) {
// Append the list of new space pages to be processed.
for (Page* p :
PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetLinearAllocationArea();
heap()->new_lo_space()->Flip();
heap()->new_lo_space()->ResetPendingObject();
DCHECK_EQ(new_space->Size(), 0);
}
if (heap()->new_lo_space()) {
heap()->new_lo_space()->Flip();
heap()->new_lo_space()->ResetPendingObject();
}
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
......@@ -2884,18 +2899,27 @@ void MarkCompactCollector::EvacuatePrologue() {
void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear();
// New space.
heap()->new_space()->set_age_mark(heap()->new_space()->top());
DCHECK_IMPLIES(FLAG_always_promote_young_mc,
heap()->new_space()->Size() == 0);
if (heap()->new_space()) {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
DCHECK_IMPLIES(FLAG_always_promote_young_mc,
heap()->new_space()->Size() == 0);
}
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
heap()->new_lo_space()->FreeUnmarkedObjects();
if (heap()->new_lo_space()) {
heap()->new_lo_space()->FreeUnmarkedObjects();
}
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
......@@ -3295,19 +3319,21 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Promote young generation large objects.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
if (heap()->new_lo_space()) {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
}
......@@ -3464,7 +3490,7 @@ void MarkCompactCollector::Evacuate() {
UpdatePointersAfterEvacuation();
{
if (heap()->new_space()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
......@@ -3836,6 +3862,8 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem(
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items) {
if (!heap()->new_space()) return 0;
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
......
......@@ -112,7 +112,7 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
return AllocationResult::Retry();
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
......@@ -137,7 +137,7 @@ AllocationResult NewSpace::AllocateFastAligned(
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
return AllocationResult::Retry();
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
......
......@@ -4,6 +4,7 @@
#include "src/heap/new-spaces.h"
#include "src/common/globals.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
......@@ -630,7 +631,7 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
return AllocationResult::Retry(NEW_SPACE);
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
......@@ -649,7 +650,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
return AllocationResult::Retry(NEW_SPACE);
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
......
......@@ -141,7 +141,8 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + size_in_bytes;
if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(NEW_SPACE);
allocation_info_.set_top(new_top);
if (filler_size > 0) {
......
......@@ -392,6 +392,7 @@ UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
}
TEST(ArrayBuffer_ExternalBackingStoreSizeIncreases) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
......@@ -413,6 +414,7 @@ TEST(ArrayBuffer_ExternalBackingStoreSizeIncreases) {
}
TEST(ArrayBuffer_ExternalBackingStoreSizeDecreases) {
if (FLAG_single_generation) return;
FLAG_concurrent_array_buffer_sweeping = false;
CcTest::InitializeVM();
LocalContext env;
......
......@@ -130,6 +130,7 @@ TEST(ExternalString_ExternalBackingStoreSizeIncreasesMarkCompact) {
}
TEST(ExternalString_ExternalBackingStoreSizeIncreasesAfterExternalization) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
LocalContext env;
......
......@@ -365,6 +365,8 @@ TEST(Tagging) {
TEST(GarbageCollection) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
......@@ -1557,7 +1559,7 @@ TEST(TestInternalWeakLists) {
// Some flags turn Scavenge collections into Mark-sweep collections
// and hence are incompatible with this test case.
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
FLAG_retain_maps_for_n_gc = 0;
......@@ -2501,7 +2503,7 @@ TEST(OptimizedPretenuringAllocationFolding) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
......@@ -2551,7 +2553,7 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking) {
FLAG_stress_incremental_marking || FLAG_single_generation) {
return;
}
v8::HandleScope scope(CcTest::isolate());
......@@ -2632,7 +2634,7 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
......@@ -2679,7 +2681,7 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
......@@ -2718,7 +2720,7 @@ TEST(OptimizedPretenuringDoubleArrayLiterals) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
......@@ -2756,7 +2758,7 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
......@@ -2806,7 +2808,7 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
......@@ -2856,7 +2858,7 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_opt) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
FLAG_stress_incremental_marking || FLAG_single_generation)
return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
......@@ -6635,7 +6637,7 @@ Isolate* oom_isolate = nullptr;
void OOMCallback(const char* location, bool is_heap_oom) {
Heap* heap = oom_isolate->heap();
size_t kSlack = heap->new_space()->Capacity();
size_t kSlack = heap->new_space() ? heap->new_space()->Capacity() : 0;
CHECK_LE(heap->OldGenerationCapacity(), kHeapLimit + kSlack);
CHECK_LE(heap->memory_allocator()->Size(), heap->MaxReserved() + kSlack);
base::OS::ExitProcess(0);
......@@ -6798,8 +6800,10 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
state->oom_triggered = true;
state->old_generation_capacity_at_oom = heap->OldGenerationCapacity();
state->memory_allocator_size_at_oom = heap->memory_allocator()->Size();
state->new_space_capacity_at_oom = heap->new_space()->Capacity();
state->new_lo_space_size_at_oom = heap->new_lo_space()->Size();
state->new_space_capacity_at_oom =
heap->new_space() ? heap->new_space()->Capacity() : 0;
state->new_lo_space_size_at_oom =
heap->new_lo_space() ? heap->new_lo_space()->Size() : 0;
state->current_heap_limit = current_heap_limit;
state->initial_heap_limit = initial_heap_limit;
return initial_heap_limit + 100 * MB;
......@@ -7442,6 +7446,7 @@ TEST(LongTaskStatsFullIncremental) {
}
TEST(LongTaskStatsYoung) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(CcTest::isolate());
......
......@@ -350,6 +350,8 @@ TEST(EmptyWeakArray) {
}
TEST(WeakArraysBasic) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -363,7 +365,7 @@ TEST(WeakArraysBasic) {
CHECK(!array->IsFixedArray());
CHECK_EQ(array->length(), length);
if (!FLAG_single_generation) CHECK(Heap::InYoungGeneration(*array));
CHECK(Heap::InYoungGeneration(*array));
for (int i = 0; i < length; ++i) {
HeapObject heap_object;
......@@ -419,6 +421,8 @@ TEST(WeakArraysBasic) {
}
TEST(WeakArrayListBasic) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -388,6 +388,8 @@ TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavenge) {
}
TEST(TracedGlobalToJSApiObjectWithIdentityHashSurvivesScavenge) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
Isolate* i_isolate = CcTest::i_isolate();
......@@ -441,6 +443,7 @@ TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
}
TEST(TracedGlobalToJSApiObjectWithModifiedMapSurvivesScavenge) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
LocalContext context;
......@@ -462,6 +465,7 @@ TEST(TracedGlobalToJSApiObjectWithModifiedMapSurvivesScavenge) {
}
TEST(TracedGlobalTOJsApiObjectWithElementsSurvivesScavenge) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
LocalContext context;
......
......@@ -3919,7 +3919,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
i::FLAG_stress_incremental_marking ||
i::FLAG_stress_concurrent_allocation) {
i::FLAG_stress_concurrent_allocation || i::FLAG_single_generation) {
return;
}
......
......@@ -61,7 +61,7 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
TEST(Regress340063) {
CcTest::InitializeVM();
if (!i::FLAG_allocation_site_pretenuring) return;
if (!i::FLAG_allocation_site_pretenuring || FLAG_single_generation) return;
v8::HandleScope scope(CcTest::isolate());
SetUpNewSpaceWithPoisonedMementoAtTop();
......@@ -74,7 +74,7 @@ TEST(Regress340063) {
TEST(Regress470390) {
CcTest::InitializeVM();
if (!i::FLAG_allocation_site_pretenuring) return;
if (!i::FLAG_allocation_site_pretenuring || FLAG_single_generation) return;
v8::HandleScope scope(CcTest::isolate());
SetUpNewSpaceWithPoisonedMementoAtTop();
......@@ -91,7 +91,7 @@ TEST(Regress470390) {
TEST(BadMementoAfterTopForceScavenge) {
CcTest::InitializeVM();
if (!i::FLAG_allocation_site_pretenuring) return;
if (!i::FLAG_allocation_site_pretenuring || FLAG_single_generation) return;
v8::HandleScope scope(CcTest::isolate());
SetUpNewSpaceWithPoisonedMementoAtTop();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment