Commit e9af212a authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Check liveness of invalidated objects in UpdateUntypedPointers

This is a follow-up CL to https://crrev.com/c/3623542.

When updating pointers during a full GC, a page might not be swept
already. In such cases there might be invalid objects in free memory.
Since these objects might be dead, their maps might have been reclaimed
already as well.

The previous CL cached the size of invalid objects in order to avoid
accessing an invalid object's map. However, as soon as a slot is within
an invalid object, we also need to check whether this slot is still a
tagged pointer which would require map access. This CL checks marking
bits on invalid objects to skip that check on such invalid objects.

Bug: v8:12578, chromium:1316289
Change-Id: Ie1d736f897a2994dbed7bfb95ed37732cd3b0882
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3596123Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80609}
parent 2cb5a08f
......@@ -5,8 +5,10 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_INL_H_
#define V8_HEAP_INVALIDATED_SLOTS_INL_H_
#include "src/base/logging.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects-inl.h"
#include "src/utils/allocation.h"
......@@ -20,23 +22,37 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
if (slot < invalidated_start_) {
if (slot < current_.address) {
return true;
}
while (slot >= next_invalidated_start_) {
while (slot >= next_.address) {
NextInvalidatedObject();
}
int offset = static_cast<int>(slot - invalidated_start_);
int offset = static_cast<int>(slot - current_.address);
// OLD_TO_OLD can have slots in map word unlike other remembered sets.
DCHECK_GE(offset, 0);
DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0);
if (offset < invalidated_size_) {
#if DEBUG
if (current_.is_live) {
HeapObject object = HeapObject::FromAddress(current_.address);
DCHECK_EQ(object.Size(), current_.size);
}
#endif
if (offset < current_.size) {
// Slots in dead invalid objects are all treated as invalid.
if (!current_.is_live) return false;
// Map word is always a valid tagged reference.
if (offset == 0) return true;
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
// Check whether object has a tagged field at that particular offset.
HeapObject invalidated_object = HeapObject::FromAddress(current_.address);
DCHECK_IMPLIES(marking_state_, marking_state_->IsBlack(invalidated_object));
DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map()));
return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
}
......@@ -46,15 +62,14 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
}
void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_;
invalidated_size_ = next_invalidated_size_;
current_ = next_;
if (iterator_ == iterator_end_) {
next_invalidated_start_ = sentinel_;
next_invalidated_size_ = 0;
next_ = {sentinel_, 0, false};
} else {
next_invalidated_start_ = iterator_->first.address();
next_invalidated_size_ = iterator_->second;
HeapObject object = iterator_->first;
bool is_live = marking_state_ ? marking_state_->IsBlack(object) : true;
next_ = {object.address(), iterator_->second, is_live};
iterator_++;
}
}
......
......@@ -4,6 +4,7 @@
#include "src/heap/invalidated-slots.h"
#include "src/base/logging.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
......@@ -12,24 +13,28 @@
namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(
MemoryChunk* chunk, LivenessCheck liveness_check) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
OLD_TO_OLD);
OLD_TO_OLD, liveness_check);
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(
MemoryChunk* chunk, LivenessCheck liveness_check) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
OLD_TO_NEW);
OLD_TO_NEW, liveness_check);
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToShared(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(
chunk, chunk->invalidated_slots<OLD_TO_SHARED>(), OLD_TO_SHARED);
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToShared(
MemoryChunk* chunk, LivenessCheck liveness_check) {
return InvalidatedSlotsFilter(chunk,
chunk->invalidated_slots<OLD_TO_SHARED>(),
OLD_TO_SHARED, liveness_check);
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
RememberedSetType remembered_set_type) {
RememberedSetType remembered_set_type, LivenessCheck liveness_check) {
USE(remembered_set_type);
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
......@@ -37,6 +42,14 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
if (liveness_check == LivenessCheck::kYes) {
marking_state_ =
chunk->heap()->mark_compact_collector()->non_atomic_marking_state();
} else {
DCHECK_EQ(LivenessCheck::kNo, liveness_check);
marking_state_ = nullptr;
}
// Invoke NextInvalidatedObject twice, to initialize
// invalidated_start_ to the first invalidated object and
// next_invalidated_object_ to the second one.
......
......@@ -23,6 +23,8 @@ namespace internal {
// change.
using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
class MajorNonAtomicMarkingState;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
// The sequence of queried slot must be non-decreasing. This allows fast
......@@ -31,24 +33,38 @@ using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// n is the number of IsValid queries.
class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
public:
static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToShared(MemoryChunk* chunk);
enum class LivenessCheck {
kYes,
kNo,
};
static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk,
LivenessCheck liveness_check);
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk,
LivenessCheck liveness_check);
static InvalidatedSlotsFilter OldToShared(MemoryChunk* chunk,
LivenessCheck liveness_check);
inline bool IsValid(Address slot);
private:
struct InvalidatedObjectInfo {
Address address;
int size;
bool is_live;
};
explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots,
RememberedSetType remembered_set_type);
RememberedSetType remembered_set_type,
LivenessCheck liveness_check);
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_{kNullAddress};
Address next_invalidated_start_{kNullAddress};
int invalidated_size_{0};
int next_invalidated_size_{0};
InvalidatedObjectInfo current_{kNullAddress, 0, false};
InvalidatedObjectInfo next_{kNullAddress, 0, false};
MajorNonAtomicMarkingState* marking_state_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
......
......@@ -31,6 +31,7 @@
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-barrier.h"
......@@ -4496,7 +4497,16 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
// Marking bits are cleared already when the page is already swept. This
// is fine since in that case the sweeper has already removed dead invalid
// objects as well.
InvalidatedSlotsFilter::LivenessCheck liveness_check =
updating_mode_ == RememberedSetUpdatingMode::ALL &&
!chunk_->SweepingDone()
? InvalidatedSlotsFilter::LivenessCheck::kYes
: InvalidatedSlotsFilter::LivenessCheck::kNo;
InvalidatedSlotsFilter filter =
InvalidatedSlotsFilter::OldToNew(chunk_, liveness_check);
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
......@@ -4520,7 +4530,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
PtrComprCageBase cage_base = heap_->isolate();
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
......@@ -4572,8 +4583,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()) {
// Client GCs need to remove invalidated OLD_TO_SHARED slots.
DCHECK(!heap_->IsShared());
InvalidatedSlotsFilter filter =
InvalidatedSlotsFilter::OldToShared(chunk_);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_SHARED>::Iterate(
chunk_,
[&filter](MaybeObjectSlot slot) {
......@@ -4817,7 +4828,8 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
MemoryChunk* chunk = chunk_iterator.Next();
CodePageMemoryModificationScope unprotect_code_page(chunk);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(chunk);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(
chunk, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_SHARED>::Iterate(
chunk,
[cage_base, &filter](MaybeObjectSlot slot) {
......@@ -5797,7 +5809,8 @@ class PageMarkingItem : public ParallelWorkItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
......
......@@ -632,7 +632,8 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(page);
if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
page,
[this, &filter](MaybeObjectSlot slot) {
......
......@@ -53,7 +53,8 @@ HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
Heap* heap = CcTest::heap();
std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
for (ByteArray byte_array : byte_arrays) {
Address start = byte_array.address() + ByteArray::kHeaderSize;
Address end = byte_array.address() + byte_array.Size();
......@@ -75,7 +76,8 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -102,7 +104,8 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -132,7 +135,8 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Address end = byte_array.address() + byte_array.Size();
heap->RightTrimFixedArray(byte_array, byte_array.length());
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
}
......@@ -155,7 +159,8 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
byte_array.Size());
}
// All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -181,7 +186,8 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
byte_array.Size());
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment