Commit 69d507ca authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Various marking data races

This resolves several races identified by concurrent marking tests.
These include:
(*) Several instances of not using atomic accesses.
(*) Synchronizing page on page creation.

Bug: chromium:1056170
Change-Id: I4a32a44b93a6995a11e3cc75c9446fb8860ae780
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2423717
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70287}
parent dae25c02
......@@ -152,8 +152,16 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
encoded_high_ = GCInfoIndexField::encode(gc_info_index);
encoded_low_ = EncodeSize(size);
// Objects may get published to the marker without any other synchronization
// (e.g., write barrier) in which case the in-construction bit is read
// concurrently which requires reading encoded_high_ atomically. It is ok if
// this write is not observed by the marker, since the sweeper sets the
// in-construction bit to 0 and we can rely on that to guarantee a correct
// answer when checking if objects are in-construction.
v8::base::AsAtomicPtr(&encoded_high_)
->store(GCInfoIndexField::encode(gc_info_index),
std::memory_order_relaxed);
DCHECK(IsInConstruction());
#ifdef DEBUG
CheckApiConstants();
......@@ -235,7 +243,7 @@ bool HeapObjectHeader::IsYoung() const {
template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsFree() const {
return GetGCInfoIndex() == kFreeListGCInfoIndex;
return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
}
bool HeapObjectHeader::IsFinalizable() const {
......
......@@ -112,6 +112,7 @@ NormalPage* NormalPage::Create(PageBackend* page_backend,
DCHECK_NOT_NULL(space);
void* memory = page_backend->AllocateNormalPageMemory(space->index());
auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
normal_page->SynchronizedStore();
return normal_page;
}
......@@ -189,6 +190,7 @@ LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
auto* heap = space->raw_heap()->heap();
void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
page->SynchronizedStore();
return page;
}
......
......@@ -63,8 +63,24 @@ class V8_EXPORT_PRIVATE BasePage {
const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
const void* address) const;
// SynchronizedLoad and SynchronizedStore are used to sync pages after they
// are allocated. std::atomic_thread_fence is sufficient in practice but is
// not recognized by tsan. Atomic load and store of the |type_| field are
// added for tsan builds.
void SynchronizedLoad() const {
#if defined(THREAD_SANITIZER)
v8::base::AsAtomicPtr(&type_)->load(std::memory_order_acquire);
#endif
}
void SynchronizedStore() {
std::atomic_thread_fence(std::memory_order_seq_cst);
#if defined(THREAD_SANITIZER)
v8::base::AsAtomicPtr(&type_)->store(type_, std::memory_order_release);
#endif
}
protected:
enum class PageType { kNormal, kLarge };
enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase*, BaseSpace*, PageType);
private:
......@@ -247,6 +263,13 @@ HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
template <HeapObjectHeader::AccessMode mode>
const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
const void* address) const {
// This method might be called for |address| found via a Trace method of
// another object. If |address| is on a newly allocated page , there will
// be no sync between the page allocation and a concurrent marking thread,
// resulting in a race with page initialization (specifically with writing
// the page |type_| field). This can occur when tracing a Member holding a
// reference to a mixin type
SynchronizedLoad();
const HeapObjectHeader* header =
ObjectHeaderFromInnerAddressImpl<mode>(this, address);
DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
......
......@@ -111,7 +111,7 @@ void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) {
void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) {
DCHECK_NOT_NULL(desc.callback);
if (header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
if (header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push(&header);
} else if (MarkNoPush(header)) {
marking_worklist_.Push(desc);
......@@ -123,7 +123,7 @@ bool MarkingState::MarkNoPush(HeapObjectHeader& header) {
DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
DCHECK(!header.IsFree());
DCHECK(!header.IsFree<HeapObjectHeader::AccessMode::kAtomic>());
return header.TryMarkAtomic();
}
......@@ -182,10 +182,10 @@ void MarkingState::RegisterWeakCallback(WeakCallback callback,
void MarkingState::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
header.IsLargeObject()
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
: header.GetSize();
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>();
}
} // namespace internal
......
......@@ -128,9 +128,11 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
#endif
auto* header = new (raw) HeapObjectHeader(size, gcinfo);
// The marker needs to find the object start concurrently.
NormalPage::From(BasePage::FromPayload(header))
->object_start_bitmap()
.SetBit(reinterpret_cast<ConstAddress>(header));
.SetBit<HeapObjectHeader::AccessMode::kAtomic>(
reinterpret_cast<ConstAddress>(header));
return header->Payload();
}
......
......@@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
Address offset_;
const Address offset_;
// The bitmap contains a bit for every kGranularity aligned address on a
// a NormalPage, i.e., for a page of size kBlinkPageSize.
std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment