Commit f6ac5064 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

Reland "[heap] Disable the young generation in shared heaps"

This is a reland of 1186fc50

This reland fixes NewSpaceAllocationTopAddress() and
NewSpaceAllocationLimitAddress() by returning nullptr if no new space
is available. This is okay since those are never used later on.

We can't make this a build-time flag because we may only want to disable
the new space for the shared heap.

Original change's description:
> [heap] Disable the young generation in shared heaps
>
> A shared heap will not have a young generation in the beginning.
>
> Bug: v8:11708
> Change-Id: I947ddb91a23a72a8cee3aa3e554723dda8146011
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2891569
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74697}

Bug: v8:11708
Change-Id: I254b919f7076ce624d15c924e63cbde5eb4df749
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2912731Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74735}
parent c55994a0
...@@ -143,11 +143,11 @@ PagedSpace* Heap::paged_space(int idx) { ...@@ -143,11 +143,11 @@ PagedSpace* Heap::paged_space(int idx) {
Space* Heap::space(int idx) { return space_[idx]; } Space* Heap::space(int idx) { return space_[idx]; }
Address* Heap::NewSpaceAllocationTopAddress() { Address* Heap::NewSpaceAllocationTopAddress() {
return new_space_->allocation_top_address(); return new_space_ ? new_space_->allocation_top_address() : nullptr;
} }
Address* Heap::NewSpaceAllocationLimitAddress() { Address* Heap::NewSpaceAllocationLimitAddress() {
return new_space_->allocation_limit_address(); return new_space_ ? new_space_->allocation_limit_address() : nullptr;
} }
Address* Heap::OldSpaceAllocationTopAddress() { Address* Heap::OldSpaceAllocationTopAddress() {
......
...@@ -449,7 +449,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, ...@@ -449,7 +449,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR; return MARK_COMPACTOR;
} }
if (FLAG_gc_global || ShouldStressCompaction() || FLAG_single_generation) { if (FLAG_gc_global || ShouldStressCompaction() || !new_space()) {
*reason = "GC in old space forced by flags"; *reason = "GC in old space forced by flags";
return MARK_COMPACTOR; return MARK_COMPACTOR;
} }
...@@ -2410,7 +2410,8 @@ void Heap::MarkCompact() { ...@@ -2410,7 +2410,8 @@ void Heap::MarkCompact() {
void Heap::MinorMarkCompact() { void Heap::MinorMarkCompact() {
#ifdef ENABLE_MINOR_MC #ifdef ENABLE_MINOR_MC
DCHECK(FLAG_minor_mc && !FLAG_single_generation); DCHECK(FLAG_minor_mc);
DCHECK(new_space());
PauseAllocationObserversScope pause_observers(this); PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT); SetGCState(MINOR_MARK_COMPACT);
...@@ -2517,7 +2518,7 @@ void Heap::EvacuateYoungGeneration() { ...@@ -2517,7 +2518,7 @@ void Heap::EvacuateYoungGeneration() {
} }
void Heap::Scavenge() { void Heap::Scavenge() {
DCHECK(!FLAG_single_generation); DCHECK_NOT_NULL(new_space());
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) { if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling( tracer()->NotifyYoungGenerationHandling(
...@@ -5435,7 +5436,8 @@ class StressConcurrentAllocationObserver : public AllocationObserver { ...@@ -5435,7 +5436,8 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
void Heap::SetUpSpaces() { void Heap::SetUpSpaces() {
// Ensure SetUpFromReadOnlySpace has been ran. // Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_); DCHECK_NOT_NULL(read_only_space_);
if (!FLAG_single_generation) { const bool has_young_gen = !FLAG_single_generation && !IsShared();
if (has_young_gen) {
space_[NEW_SPACE] = new_space_ = space_[NEW_SPACE] = new_space_ =
new NewSpace(this, memory_allocator_->data_page_allocator(), new NewSpace(this, memory_allocator_->data_page_allocator(),
initial_semispace_size_, max_semi_space_size_); initial_semispace_size_, max_semi_space_size_);
...@@ -5444,7 +5446,7 @@ void Heap::SetUpSpaces() { ...@@ -5444,7 +5446,7 @@ void Heap::SetUpSpaces() {
space_[CODE_SPACE] = code_space_ = new CodeSpace(this); space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this); space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this); space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
if (!FLAG_single_generation) { if (has_young_gen) {
space_[NEW_LO_SPACE] = new_lo_space_ = space_[NEW_LO_SPACE] = new_lo_space_ =
new NewLargeObjectSpace(this, NewSpaceCapacity()); new NewLargeObjectSpace(this, NewSpaceCapacity());
} }
...@@ -5483,7 +5485,7 @@ void Heap::SetUpSpaces() { ...@@ -5483,7 +5485,7 @@ void Heap::SetUpSpaces() {
} }
#endif // ENABLE_MINOR_MC #endif // ENABLE_MINOR_MC
if (!FLAG_single_generation) { if (new_space()) {
scavenge_job_.reset(new ScavengeJob()); scavenge_job_.reset(new ScavengeJob());
scavenge_task_observer_.reset(new ScavengeTaskObserver( scavenge_task_observer_.reset(new ScavengeTaskObserver(
this, ScavengeJob::YoungGenerationTaskTriggerSize(this))); this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
......
...@@ -83,6 +83,9 @@ UNINITIALIZED_TEST(SharedCollection) { ...@@ -83,6 +83,9 @@ UNINITIALIZED_TEST(SharedCollection) {
create_params.array_buffer_allocator = allocator.get(); create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params); Isolate* shared_isolate = Isolate::NewShared(create_params);
DCHECK_NULL(shared_isolate->heap()->new_space());
DCHECK_NULL(shared_isolate->heap()->new_lo_space());
CcTest::CollectGarbage(OLD_SPACE, shared_isolate); CcTest::CollectGarbage(OLD_SPACE, shared_isolate);
Isolate::Delete(shared_isolate); Isolate::Delete(shared_isolate);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment