Commit 1c3ac2d9 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Fix data race when promoting objects into shared heap

Each GC thread needs their own instance of ConcurrentAllocator for
allocation. The LAB is always considered thread-local.

Bug: v8:12582, v8:11708
Change-Id: I39200202ec9fd07fa33b3ababa88e84a1a270778
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3429294Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78917}
parent cbdc8337
...@@ -24,7 +24,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size, ...@@ -24,7 +24,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
DCHECK(!FLAG_enable_third_party_heap); DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers // TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG #ifdef DEBUG
local_heap_->VerifyCurrent(); if (local_heap_) local_heap_->VerifyCurrent();
#endif #endif
if (object_size > kMaxLabObjectSize) { if (object_size > kMaxLabObjectSize) {
......
...@@ -145,7 +145,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { ...@@ -145,7 +145,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
HeapObject object = HeapObject::FromAddress(result->first); HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_); LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult( lab_ = LocalAllocationBuffer::FromResult(
local_heap_->heap(), AllocationResult(object), result->second); space_->heap(), AllocationResult(object), result->second);
DCHECK(lab_.IsValid()); DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) { if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseAndMakeIterable(); saved_lab.CloseAndMakeIterable();
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "src/heap/array-buffer-sweeper.h" #include "src/heap/array-buffer-sweeper.h"
#include "src/heap/basic-memory-chunk.h" #include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h" #include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h" #include "src/heap/incremental-marking-inl.h"
...@@ -1590,14 +1591,13 @@ class EvacuateVisitorBase : public HeapObjectVisitor { ...@@ -1590,14 +1591,13 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
} }
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator, EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor) RecordMigratedSlotVisitor* record_visitor)
: heap_(heap), : heap_(heap),
local_allocator_(local_allocator), local_allocator_(local_allocator),
record_visitor_(record_visitor) { shared_old_allocator_(shared_old_allocator),
if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) { record_visitor_(record_visitor),
shared_string_table_ = true; shared_string_table_(shared_old_allocator != nullptr) {
shared_old_allocator_ = heap_->shared_old_allocator_.get();
}
migration_function_ = RawMigrateObject<MigrationMode::kFast>; migration_function_ = RawMigrateObject<MigrationMode::kFast>;
} }
...@@ -1671,7 +1671,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor { ...@@ -1671,7 +1671,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Heap* heap_; Heap* heap_;
EvacuationAllocator* local_allocator_; EvacuationAllocator* local_allocator_;
ConcurrentAllocator* shared_old_allocator_ = nullptr; ConcurrentAllocator* shared_old_allocator_;
RecordMigratedSlotVisitor* record_visitor_; RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_; std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_; MigrateFunction migration_function_;
...@@ -1682,10 +1682,12 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1682,10 +1682,12 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public: public:
explicit EvacuateNewSpaceVisitor( explicit EvacuateNewSpaceVisitor(
Heap* heap, EvacuationAllocator* local_allocator, Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor, RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback, Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
AlwaysPromoteYoung always_promote_young) AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor), : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()), buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0), promoted_size_(0),
semispace_copied_size_(0), semispace_copied_size_(0),
...@@ -1839,8 +1841,10 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { ...@@ -1839,8 +1841,10 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public: public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator, EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor) RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, record_visitor) {} : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor) {}
inline bool Visit(HeapObject object, int size) override { inline bool Visit(HeapObject object, int size) override {
HeapObject target_object; HeapObject target_object;
...@@ -3464,6 +3468,16 @@ void MarkCompactCollector::EvacuateEpilogue() { ...@@ -3464,6 +3468,16 @@ void MarkCompactCollector::EvacuateEpilogue() {
#endif #endif
} }
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
return new ConcurrentAllocator(nullptr, heap->shared_old_space());
}
return nullptr;
}
} // namespace
class Evacuator : public Malloced { class Evacuator : public Malloced {
public: public:
enum EvacuationMode { enum EvacuationMode {
...@@ -3510,14 +3524,17 @@ class Evacuator : public Malloced { ...@@ -3510,14 +3524,17 @@ class Evacuator : public Malloced {
AlwaysPromoteYoung always_promote_young) AlwaysPromoteYoung always_promote_young)
: heap_(heap), : heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, local_allocator, record_visitor, shared_old_allocator_(CreateSharedOldAllocator(heap_)),
&local_pretenuring_feedback_, always_promote_young), new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
record_visitor, &local_pretenuring_feedback_,
always_promote_young),
new_to_new_page_visitor_(heap_, record_visitor, new_to_new_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_), &local_pretenuring_feedback_),
new_to_old_page_visitor_(heap_, record_visitor, new_to_old_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_), &local_pretenuring_feedback_),
old_space_visitor_(heap_, local_allocator, record_visitor), old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
record_visitor),
local_allocator_(local_allocator), local_allocator_(local_allocator),
duration_(0.0), duration_(0.0),
bytes_compacted_(0) {} bytes_compacted_(0) {}
...@@ -3556,6 +3573,9 @@ class Evacuator : public Malloced { ...@@ -3556,6 +3573,9 @@ class Evacuator : public Malloced {
Heap::PretenuringFeedbackMap local_pretenuring_feedback_; Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
// Allocator for the shared heap.
std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
// Visitors for the corresponding spaces. // Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_; EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW> EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
...@@ -3601,6 +3621,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) { ...@@ -3601,6 +3621,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
void Evacuator::Finalize() { void Evacuator::Finalize() {
local_allocator_->Finalize(); local_allocator_->Finalize();
if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_to_old_page_visitor_.moved_bytes()); new_to_old_page_visitor_.moved_bytes());
......
...@@ -334,7 +334,7 @@ Page* PagedSpace::Expand() { ...@@ -334,7 +334,7 @@ Page* PagedSpace::Expand() {
} }
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground( base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
LocalHeap* local_heap, size_t size_in_bytes) { size_t size_in_bytes) {
Page* page = AllocatePage(); Page* page = AllocatePage();
if (page == nullptr) return {}; if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_); base::MutexGuard lock(&space_mutex_);
...@@ -585,10 +585,11 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -585,10 +585,11 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
identity() == MAP_SPACE); identity() == MAP_SPACE);
DCHECK(origin == AllocationOrigin::kRuntime || DCHECK(origin == AllocationOrigin::kRuntime ||
origin == AllocationOrigin::kGC); origin == AllocationOrigin::kGC);
DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
base::Optional<std::pair<Address, size_t>> result = base::Optional<std::pair<Address, size_t>> result =
TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes, TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
max_size_in_bytes, alignment, origin); alignment, origin);
if (result) return result; if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
...@@ -600,7 +601,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -600,7 +601,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Retry the free list allocation. // Retry the free list allocation.
result = TryAllocationFromFreeListBackground( result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin); min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result; if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) { if (IsSweepingAllowedOnThread(local_heap)) {
...@@ -619,8 +620,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -619,8 +620,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) { if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground( result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, min_size_in_bytes, max_size_in_bytes, alignment, origin);
origin);
if (result) return result; if (result) return result;
} }
} }
...@@ -628,7 +628,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -628,7 +628,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) { heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
result = ExpandBackground(local_heap, max_size_in_bytes); result = ExpandBackground(max_size_in_bytes);
if (result) { if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0); DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result; return result;
...@@ -645,15 +645,14 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -645,15 +645,14 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Last try to acquire memory from free list. // Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground( return TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin); min_size_in_bytes, max_size_in_bytes, alignment, origin);
} }
return {}; return {};
} }
base::Optional<std::pair<Address, size_t>> base::Optional<std::pair<Address, size_t>>
PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap, PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
...@@ -700,7 +699,8 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap, ...@@ -700,7 +699,8 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) { bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
// Code space sweeping is only allowed on main thread. // Code space sweeping is only allowed on main thread.
return local_heap->is_main_thread() || identity() != CODE_SPACE; return (local_heap && local_heap->is_main_thread()) ||
identity() != CODE_SPACE;
} }
#ifdef DEBUG #ifdef DEBUG
......
...@@ -376,7 +376,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -376,7 +376,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// a memory area of the given size in it. If successful the method returns // a memory area of the given size in it. If successful the method returns
// the address and size of the area. // the address and size of the area.
base::Optional<std::pair<Address, size_t>> ExpandBackground( base::Optional<std::pair<Address, size_t>> ExpandBackground(
LocalHeap* local_heap, size_t size_in_bytes); size_t size_in_bytes);
Page* AllocatePage(); Page* AllocatePage();
...@@ -415,8 +415,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -415,8 +415,7 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationOrigin origin); AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>> V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(LocalHeap* local_heap, TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/handles/global-handles.h" #include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h" #include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h" #include "src/heap/barrier.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h" #include "src/heap/invalidated-slots-inl.h"
...@@ -540,6 +541,15 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list) ...@@ -540,6 +541,15 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
large_object_promotion_list_local_( large_object_promotion_list_local_(
&promotion_list->large_object_promotion_list_) {} &promotion_list->large_object_promotion_list_) {}
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
return new ConcurrentAllocator(nullptr, heap->shared_old_space());
}
return nullptr;
}
} // namespace
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging, Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
EmptyChunksList* empty_chunks, CopiedList* copied_list, EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list, PromotionList* promotion_list,
...@@ -554,12 +564,11 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging, ...@@ -554,12 +564,11 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
copied_size_(0), copied_size_(0),
promoted_size_(0), promoted_size_(0),
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge), allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
shared_old_allocator_(heap_->shared_old_allocator_.get()), shared_old_allocator_(CreateSharedOldAllocator(heap_)),
is_logging_(is_logging), is_logging_(is_logging),
is_incremental_marking_(heap->incremental_marking()->IsMarking()), is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()), is_compacting_(heap->incremental_marking()->IsCompacting()),
shared_string_table_(FLAG_shared_string_table && shared_string_table_(shared_old_allocator_.get() != nullptr) {}
(heap->isolate()->shared_isolate() != nullptr)) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map, void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) { int size) {
...@@ -741,6 +750,7 @@ void Scavenger::Finalize() { ...@@ -741,6 +750,7 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_); heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_); collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize(); allocator_.Finalize();
if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
empty_chunks_local_.Publish(); empty_chunks_local_.Publish();
ephemeron_table_list_local_.Publish(); ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin(); for (auto it = ephemeron_remembered_set_.begin();
......
...@@ -197,7 +197,7 @@ class Scavenger { ...@@ -197,7 +197,7 @@ class Scavenger {
size_t copied_size_; size_t copied_size_;
size_t promoted_size_; size_t promoted_size_;
EvacuationAllocator allocator_; EvacuationAllocator allocator_;
ConcurrentAllocator* shared_old_allocator_ = nullptr; std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_; SurvivingNewLargeObjectsMap surviving_new_large_objects_;
EphemeronRememberedSet ephemeron_remembered_set_; EphemeronRememberedSet ephemeron_remembered_set_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment