Commit 1c3ac2d9 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Fix data race when promoting objects into shared heap

Each GC thread needs their own instance of ConcurrentAllocator for
allocation. The LAB is always considered thread-local.

Bug: v8:12582, v8:11708
Change-Id: I39200202ec9fd07fa33b3ababa88e84a1a270778
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3429294Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78917}
parent cbdc8337
......@@ -24,7 +24,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
local_heap_->VerifyCurrent();
if (local_heap_) local_heap_->VerifyCurrent();
#endif
if (object_size > kMaxLabObjectSize) {
......
......@@ -145,7 +145,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
local_heap_->heap(), AllocationResult(object), result->second);
space_->heap(), AllocationResult(object), result->second);
DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseAndMakeIterable();
......
......@@ -22,6 +22,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
......@@ -1590,14 +1591,13 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(local_allocator),
record_visitor_(record_visitor) {
if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
shared_string_table_ = true;
shared_old_allocator_ = heap_->shared_old_allocator_.get();
}
shared_old_allocator_(shared_old_allocator),
record_visitor_(record_visitor),
shared_string_table_(shared_old_allocator != nullptr) {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
......@@ -1671,7 +1671,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Heap* heap_;
EvacuationAllocator* local_allocator_;
ConcurrentAllocator* shared_old_allocator_ = nullptr;
ConcurrentAllocator* shared_old_allocator_;
RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_;
......@@ -1682,10 +1682,12 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(
Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor),
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
......@@ -1839,8 +1841,10 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor) {}
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
......@@ -3464,6 +3468,16 @@ void MarkCompactCollector::EvacuateEpilogue() {
#endif
}
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
return new ConcurrentAllocator(nullptr, heap->shared_old_space());
}
return nullptr;
}
} // namespace
class Evacuator : public Malloced {
public:
enum EvacuationMode {
......@@ -3510,14 +3524,17 @@ class Evacuator : public Malloced {
AlwaysPromoteYoung always_promote_young)
: heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, local_allocator, record_visitor,
&local_pretenuring_feedback_, always_promote_young),
shared_old_allocator_(CreateSharedOldAllocator(heap_)),
new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
record_visitor, &local_pretenuring_feedback_,
always_promote_young),
new_to_new_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
new_to_old_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
old_space_visitor_(heap_, local_allocator, record_visitor),
old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
record_visitor),
local_allocator_(local_allocator),
duration_(0.0),
bytes_compacted_(0) {}
......@@ -3556,6 +3573,9 @@ class Evacuator : public Malloced {
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
// Allocator for the shared heap.
std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
......@@ -3601,6 +3621,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
void Evacuator::Finalize() {
local_allocator_->Finalize();
if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_to_old_page_visitor_.moved_bytes());
......
......@@ -334,7 +334,7 @@ Page* PagedSpace::Expand() {
}
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
LocalHeap* local_heap, size_t size_in_bytes) {
size_t size_in_bytes) {
Page* page = AllocatePage();
if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
......@@ -585,10 +585,11 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
identity() == MAP_SPACE);
DCHECK(origin == AllocationOrigin::kRuntime ||
origin == AllocationOrigin::kGC);
DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
base::Optional<std::pair<Address, size_t>> result =
TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes,
max_size_in_bytes, alignment, origin);
TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
......@@ -600,7 +601,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Retry the free list allocation.
result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) {
......@@ -619,8 +620,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment,
origin);
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
}
}
......@@ -628,7 +628,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
result = ExpandBackground(local_heap, max_size_in_bytes);
result = ExpandBackground(max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
......@@ -645,15 +645,14 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
return {};
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
......@@ -700,7 +699,8 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
// Code space sweeping is only allowed on main thread.
return local_heap->is_main_thread() || identity() != CODE_SPACE;
return (local_heap && local_heap->is_main_thread()) ||
identity() != CODE_SPACE;
}
#ifdef DEBUG
......
......@@ -376,7 +376,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// a memory area of the given size in it. If successful the method returns
// the address and size of the area.
base::Optional<std::pair<Address, size_t>> ExpandBackground(
LocalHeap* local_heap, size_t size_in_bytes);
size_t size_in_bytes);
Page* AllocatePage();
......@@ -415,8 +415,7 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
......
......@@ -7,6 +7,7 @@
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
......@@ -540,6 +541,15 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
large_object_promotion_list_local_(
&promotion_list->large_object_promotion_list_) {}
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
return new ConcurrentAllocator(nullptr, heap->shared_old_space());
}
return nullptr;
}
} // namespace
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
......@@ -554,12 +564,11 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
copied_size_(0),
promoted_size_(0),
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
shared_old_allocator_(heap_->shared_old_allocator_.get()),
shared_old_allocator_(CreateSharedOldAllocator(heap_)),
is_logging_(is_logging),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()),
shared_string_table_(FLAG_shared_string_table &&
(heap->isolate()->shared_isolate() != nullptr)) {}
shared_string_table_(shared_old_allocator_.get() != nullptr) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) {
......@@ -741,6 +750,7 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
empty_chunks_local_.Publish();
ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin();
......
......@@ -197,7 +197,7 @@ class Scavenger {
size_t copied_size_;
size_t promoted_size_;
EvacuationAllocator allocator_;
ConcurrentAllocator* shared_old_allocator_ = nullptr;
std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
EphemeronRememberedSet ephemeron_remembered_set_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment