Commit 003473e6 authored by Maciej Goszczycki's avatar Maciej Goszczycki Committed by Commit Bot

[heap] Change synchronized_heap uses to not assume heap_ is non-null

Read-only heap sharing clears heap_ in read-only memory chunks because
ReadOnlySpace is shared between multiple isolates.

Bug: v8:7464
Change-Id: I821c94303ab3710c279e6c11a8ca8537aac0d0af
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1642809
Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61987}
parent f4b6f4af
......@@ -121,11 +121,7 @@ class ConcurrentMarkingVisitor final
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// in mark-bit initialization. See MemoryChunk::Initialize for the
// corresponding release store.
MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
CHECK_NOT_NULL(chunk->synchronized_heap());
MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
......@@ -528,11 +524,7 @@ class ConcurrentMarkingVisitor final
void MarkObject(HeapObject object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// in mark-bit initialization. See MemoryChunk::Initialize for the
// corresponding release store.
MemoryChunk* chunk = MemoryChunk::FromAddress(object.address());
CHECK_NOT_NULL(chunk->synchronized_heap());
MemoryChunk::FromHeapObject(object)->SynchronizedHeapLoad();
#endif
if (marking_state_.WhiteToGrey(object)) {
shared_.Push(object);
......
......@@ -97,8 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
CHECK_NOT_NULL(chunk->synchronized_heap());
MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
}
#endif
}
......
......@@ -540,10 +540,13 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
Heap* MemoryChunk::synchronized_heap() {
return reinterpret_cast<Heap*>(
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
#ifdef THREAD_SANITIZER
void MemoryChunk::SynchronizedHeapLoad() {
CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
InReadOnlySpace());
}
#endif
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
......
......@@ -504,7 +504,12 @@ class MemoryChunk {
return heap_;
}
Heap* synchronized_heap();
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
// release store.
void SynchronizedHeapLoad();
#endif
template <RememberedSetType type>
bool ContainsSlots() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment