Commit f7602bb8 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[heap][ptr-compr] Optimize range write barrier

... by combining generational and marking write barriers in one loop.

Bug: v8:7703
Change-Id: I825d530040d3f39143dd2d051dc5a9916e2f2997
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1611541Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61487}
parent a41aaf78
......@@ -11,6 +11,7 @@
#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
......@@ -5839,23 +5840,97 @@ void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
}
isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(table, key_slot,
maybe_key);
isolate->heap()->incremental_marking()->RecordWrite(table, key_slot,
maybe_key);
}
void Heap::WriteBarrierForRange(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot) {
// TODO(ishell): iterate values only once and avoid generic decompression.
if (!InYoungGeneration(object)) {
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
if (InYoungGeneration(value)) {
store_buffer()->InsertEntry(slot.address());
enum RangeWriteBarrierMode {
kDoGenerational = 1 << 0,
kDoMarking = 1 << 1,
kDoEvacuationSlotRecording = 1 << 2,
};
template <int kModeMask>
void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
ObjectSlot start_slot,
ObjectSlot end_slot) {
// At least one of generational or marking write barrier should be requested.
STATIC_ASSERT(kModeMask & (kDoGenerational | kDoMarking));
// kDoEvacuationSlotRecording implies kDoMarking.
STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
(kModeMask & kDoMarking));
StoreBuffer* store_buffer = this->store_buffer();
IncrementalMarking* incremental_marking = this->incremental_marking();
MarkCompactCollector* collector = this->mark_compact_collector();
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
HeapObject value_heap_object;
if (!value.GetHeapObject(&value_heap_object)) continue;
if ((kModeMask & kDoGenerational) &&
Heap::InYoungGeneration(value_heap_object)) {
store_buffer->InsertEntry(slot.address());
}
if ((kModeMask & kDoMarking) &&
incremental_marking->BaseRecordWrite(object, value_heap_object)) {
if (kModeMask & kDoEvacuationSlotRecording) {
collector->RecordSlot(source_page, HeapObjectSlot(slot),
value_heap_object);
}
}
}
}
void Heap::WriteBarrierForRange(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
base::Flags<RangeWriteBarrierMode> mode;
if (!source_page->InYoungGeneration()) {
mode |= kDoGenerational;
}
if (incremental_marking()->IsMarking()) {
incremental_marking()->RecordWrites(object, start_slot, end_slot);
mode |= kDoMarking;
if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
mode |= kDoEvacuationSlotRecording;
}
}
switch (mode) {
// Nothing to be done.
case 0:
return;
// Generational only.
case kDoGenerational:
return WriteBarrierForRangeImpl<kDoGenerational>(source_page, object,
start_slot, end_slot);
// Marking, no evacuation slot recording.
case kDoMarking:
return WriteBarrierForRangeImpl<kDoMarking>(source_page, object,
start_slot, end_slot);
// Marking with evacuation slot recording.
case kDoMarking | kDoEvacuationSlotRecording:
return WriteBarrierForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
source_page, object, start_slot, end_slot);
// Generational and marking, no evacuation slot recording.
case kDoGenerational | kDoMarking:
return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking>(
source_page, object, start_slot, end_slot);
// Generational and marking with evacuation slot recording.
case kDoGenerational | kDoMarking | kDoEvacuationSlotRecording:
return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking |
kDoEvacuationSlotRecording>(
source_page, object, start_slot, end_slot);
default:
UNREACHABLE();
}
}
......
......@@ -1503,6 +1503,13 @@ class Heap {
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
int size_in_bytes);
// Range write barrier implementation.
template <int kModeMask>
V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page,
HeapObject object,
ObjectSlot start_slot,
ObjectSlot end_slot);
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
void ResetAllAllocationSitesDependentCode(AllocationType allocation);
......
......@@ -33,24 +33,43 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
}
}
void IncrementalMarking::RecordWrite(HeapObject obj, ObjectSlot slot,
Object value) {
DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (IsMarking() && value->IsHeapObject()) {
RecordWriteSlow(obj, HeapObjectSlot(slot), HeapObject::cast(value));
bool IncrementalMarking::BaseRecordWrite(HeapObject obj, HeapObject value) {
DCHECK(!marking_state()->IsImpossible(value));
DCHECK(!marking_state()->IsImpossible(obj));
// The write barrier stub generated with V8_CONCURRENT_MARKING does not
// check the color of the source object.
const bool need_recording =
V8_CONCURRENT_MARKING_BOOL || marking_state()->IsBlack(obj);
if (need_recording && WhiteToGreyAndPush(value)) {
RestartIfNotMarking();
}
return is_compacting_ && need_recording;
}
void IncrementalMarking::RecordMaybeWeakWrite(HeapObject obj,
MaybeObjectSlot slot,
MaybeObject value) {
template <typename TSlot>
void IncrementalMarking::RecordWrite(HeapObject obj, TSlot slot,
typename TSlot::TObject value) {
static_assert(std::is_same<TSlot, ObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only ObjectSlot and MaybeObjectSlot are expected here");
DCHECK_NE(slot.address(), kNullAddress);
DCHECK_IMPLIES(!TSlot::kCanBeWeak, !HAS_WEAK_HEAP_OBJECT_TAG((*slot).ptr()));
DCHECK_IMPLIES(!TSlot::kCanBeWeak, !HAS_WEAK_HEAP_OBJECT_TAG(value.ptr()));
// When writing a weak reference, treat it as strong for the purposes of the
// marking barrier.
HeapObject heap_object;
if (IsMarking() && value->GetHeapObject(&heap_object)) {
RecordWriteSlow(obj, HeapObjectSlot(slot), heap_object);
HeapObject value_heap_object;
if (IsMarking() && value.GetHeapObject(&value_heap_object)) {
RecordWriteSlow(obj, HeapObjectSlot(slot), value_heap_object);
}
}
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
return true;
}
return false;
}
void IncrementalMarking::RestartIfNotMarking() {
......
......@@ -70,29 +70,11 @@ IncrementalMarking::IncrementalMarking(
SetState(STOPPED);
}
bool IncrementalMarking::BaseRecordWrite(HeapObject obj, HeapObject value) {
DCHECK(!marking_state()->IsImpossible(value));
DCHECK(!marking_state()->IsImpossible(obj));
#ifdef V8_CONCURRENT_MARKING
// The write barrier stub generated with V8_CONCURRENT_MARKING does not
// check the color of the source object.
const bool need_recording = true;
#else
const bool need_recording = marking_state()->IsBlack(obj);
#endif
if (need_recording && WhiteToGreyAndPush(value)) {
RestartIfNotMarking();
}
return is_compacting_ && need_recording;
}
void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
HeapObject value) {
if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
// Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot,
HeapObject::cast(value));
heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
}
}
......@@ -101,8 +83,7 @@ int IncrementalMarking::RecordWriteFromCode(Address raw_obj,
Isolate* isolate) {
HeapObject obj = HeapObject::cast(Object(raw_obj));
MaybeObjectSlot slot(slot_address);
isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
*slot);
isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
......@@ -116,39 +97,6 @@ void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
}
}
void IncrementalMarking::RecordWrites(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
HeapObject value_heap_object;
if (value.GetHeapObject(&value_heap_object)) {
BaseRecordWrite(object, value_heap_object);
}
}
} else {
MarkCompactCollector* collector = heap_->mark_compact_collector();
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
HeapObject value_heap_object;
if (value.GetHeapObject(&value_heap_object) &&
BaseRecordWrite(object, value_heap_object)) {
collector->RecordSlot(source_page, HeapObjectSlot(slot),
value_heap_object);
}
}
}
}
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
return true;
}
return false;
}
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
......
......@@ -184,7 +184,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool ShouldDoEmbedderStep();
StepResult EmbedderStep(double duration);
inline void RestartIfNotMarking();
V8_INLINE void RestartIfNotMarking();
// {raw_obj} and {slot_address} are raw Address values instead of a
// HeapObject and a MaybeObjectSlot because this is called from
......@@ -199,11 +199,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
V8_INLINE bool BaseRecordWrite(HeapObject obj, HeapObject value);
V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value);
V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot,
MaybeObject value);
void RecordWrites(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot);
template <typename TSlot>
V8_INLINE void RecordWrite(HeapObject obj, TSlot slot,
typename TSlot::TObject value);
void RevisitObject(HeapObject obj);
// Ensures that all descriptors int range [0, number_of_own_descripts)
// are visited.
......@@ -215,7 +213,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// Returns true if the function succeeds in transitioning the object
// from white to grey.
bool WhiteToGreyAndPush(HeapObject obj);
V8_INLINE bool WhiteToGreyAndPush(HeapObject obj);
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment