Commit c57e8f14 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[heap] Refactor elements write barrier to range write barrier

... in order to make it optimizable for enabled pointer compression.

Bug: v8:9183
Change-Id: I8b92e48cc43dcc823eefb5a8a4a29de7a8ba0e78
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1609545
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61476}
parent d0e889b0
...@@ -178,7 +178,7 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base, ...@@ -178,7 +178,7 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
(IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind)) (IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind))
? UPDATE_WRITE_BARRIER ? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER; : SKIP_WRITE_BARRIER;
to->CopyElements(isolate->heap(), to_start, from, from_start, copy_size, to->CopyElements(isolate, to_start, from, from_start, copy_size,
write_barrier_mode); write_barrier_mode);
} }
...@@ -470,32 +470,30 @@ static void TraceTopFrame(Isolate* isolate) { ...@@ -470,32 +470,30 @@ static void TraceTopFrame(Isolate* isolate) {
JavaScriptFrame::PrintTop(isolate, stdout, false, true); JavaScriptFrame::PrintTop(isolate, stdout, false, true);
} }
static void SortIndices( static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
Isolate* isolate, Handle<FixedArray> indices, uint32_t sort_size, uint32_t sort_size) {
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking. // store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress()); AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size, AtomicSlot end(start + sort_size);
[isolate](Tagged_t elementA, Tagged_t elementB) { std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
Object a(DecompressTaggedAny(isolate->isolate_root(), elementA)); Object a(DecompressTaggedAny(isolate->isolate_root(), elementA));
Object b(DecompressTaggedAny(isolate->isolate_root(), elementB)); Object b(DecompressTaggedAny(isolate->isolate_root(), elementB));
#else #else
Object a(elementA); Object a(elementA);
Object b(elementB); Object b(elementB);
#endif #endif
if (a->IsSmi() || !a->IsUndefined(isolate)) { if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) { if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true; return true;
} }
return a->Number() < b->Number(); return a->Number() < b->Number();
} }
return !b->IsSmi() && b->IsUndefined(isolate); return !b->IsSmi() && b->IsUndefined(isolate);
}); });
if (write_barrier_mode != SKIP_WRITE_BARRIER) { isolate->heap()->WriteBarrierForRange(*indices, ObjectSlot(start),
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(isolate->heap(), *indices, 0, sort_size); ObjectSlot(end));
}
} }
static Maybe<bool> IncludesValueSlowPath(Isolate* isolate, static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
...@@ -2239,13 +2237,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { ...@@ -2239,13 +2237,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> backing_store, int dst_index, Handle<FixedArrayBase> backing_store, int dst_index,
int src_index, int len, int hole_start, int src_index, int len, int hole_start,
int hole_end) { int hole_end) {
Heap* heap = isolate->heap();
Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store); Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
if (len > JSArray::kMaxCopyElements && dst_index == 0 && if (len > JSArray::kMaxCopyElements && dst_index == 0 &&
heap->CanMoveObjectStart(*dst_elms)) { isolate->heap()->CanMoveObjectStart(*dst_elms)) {
// Update all the copies of this backing_store handle. // Update all the copies of this backing_store handle.
*dst_elms.location() = *dst_elms.location() =
BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index)) BackingStore::cast(
isolate->heap()->LeftTrimFixedArray(*dst_elms, src_index))
->ptr(); ->ptr();
receiver->set_elements(*dst_elms); receiver->set_elements(*dst_elms);
// Adjust the hole offset as the array has been shrunk. // Adjust the hole offset as the array has been shrunk.
...@@ -2254,7 +2252,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { ...@@ -2254,7 +2252,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK_LE(hole_end, backing_store->length()); DCHECK_LE(hole_end, backing_store->length());
} else if (len != 0) { } else if (len != 0) {
WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind); WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
dst_elms->MoveElements(heap, dst_index, src_index, len, mode); dst_elms->MoveElements(isolate, dst_index, src_index, len, mode);
} }
if (hole_start != hole_end) { if (hole_start != hole_end) {
dst_elms->FillWithHoles(hole_start, hole_end); dst_elms->FillWithHoles(hole_start, hole_end);
......
...@@ -38,12 +38,6 @@ V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(Code host, ...@@ -38,12 +38,6 @@ V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(Code host,
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForCodeSlow(Code host, V8_EXPORT_PRIVATE void Heap_MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo, RelocInfo* rinfo,
HeapObject object); HeapObject object);
V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForElementsSlow(Heap* heap,
FixedArray array,
int offset,
int length);
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForElementsSlow(Heap* heap,
FixedArray object);
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow( V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array, Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors); int number_of_own_descriptors);
...@@ -183,15 +177,6 @@ inline void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot, ...@@ -183,15 +177,6 @@ inline void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
value_heap_object); value_heap_object);
} }
inline void GenerationalBarrierForElements(Heap* heap, FixedArray array,
int offset, int length) {
heap_internals::MemoryChunk* array_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
if (array_chunk->InYoungGeneration()) return;
Heap_GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) { HeapObject object) {
heap_internals::MemoryChunk* object_chunk = heap_internals::MemoryChunk* object_chunk =
...@@ -216,14 +201,6 @@ inline void MarkingBarrier(HeapObject object, MaybeObjectSlot slot, ...@@ -216,14 +201,6 @@ inline void MarkingBarrier(HeapObject object, MaybeObjectSlot slot,
value_heap_object); value_heap_object);
} }
inline void MarkingBarrierForElements(Heap* heap, FixedArray array) {
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
if (!object_chunk->IsMarking()) return;
Heap_MarkingBarrierForElementsSlow(heap, array);
}
inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo, inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) { HeapObject object) {
DCHECK(!HasWeakHeapObjectTag(object)); DCHECK(!HasWeakHeapObjectTag(object));
......
...@@ -20,13 +20,6 @@ class EphemeronHashTable; ...@@ -20,13 +20,6 @@ class EphemeronHashTable;
// Note: In general it is preferred to use the macros defined in // Note: In general it is preferred to use the macros defined in
// object-macros.h. // object-macros.h.
// Write barrier for FixedArray elements.
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
do { \
GenerationalBarrierForElements(heap, array, start, length); \
MarkingBarrierForElements(heap, array); \
} while (false)
// Combined write barriers. // Combined write barriers.
void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value); void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value);
void WriteBarrierForCode(Code host); void WriteBarrierForCode(Code host);
...@@ -37,14 +30,11 @@ void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot, ...@@ -37,14 +30,11 @@ void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
MaybeObject value); MaybeObject value);
void GenerationalEphemeronKeyBarrier(EphemeronHashTable table, ObjectSlot slot, void GenerationalEphemeronKeyBarrier(EphemeronHashTable table, ObjectSlot slot,
Object value); Object value);
void GenerationalBarrierForElements(Heap* heap, FixedArray array, int offset,
int length);
void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object); void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
// Marking write barrier. // Marking write barrier.
void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value); void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value);
void MarkingBarrier(HeapObject object, MaybeObjectSlot slot, MaybeObject value); void MarkingBarrier(HeapObject object, MaybeObjectSlot slot, MaybeObject value);
void MarkingBarrierForElements(Heap* heap, FixedArray array);
void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object); void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host, void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include "src/objects/hash-table-inl.h" #include "src/objects/hash-table-inl.h"
#include "src/objects/maybe-object.h" #include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h" #include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h" #include "src/objects/slots-inl.h"
#include "src/regexp/jsregexp.h" #include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h" #include "src/runtime-profiler.h"
...@@ -107,15 +108,6 @@ void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo, ...@@ -107,15 +108,6 @@ void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
Heap::MarkingBarrierForCodeSlow(host, rinfo, object); Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
} }
void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
void Heap_MarkingBarrierForElementsSlow(Heap* heap, FixedArray array) {
Heap::MarkingBarrierForElementsSlow(heap, array);
}
void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host, void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
HeapObject descriptor_array, HeapObject descriptor_array,
int number_of_own_descriptors) { int number_of_own_descriptors) {
...@@ -1507,70 +1499,73 @@ void Heap::StartIdleIncrementalMarking( ...@@ -1507,70 +1499,73 @@ void Heap::StartIdleIncrementalMarking(
gc_callback_flags); gc_callback_flags);
} }
void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len, void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
WriteBarrierMode mode) { const ObjectSlot src_slot, int len,
if (len == 0) return; WriteBarrierMode mode) {
DCHECK_NE(len, 0);
DCHECK_NE(dst_object->map(), ReadOnlyRoots(this).fixed_cow_array_map());
const ObjectSlot dst_end(dst_slot + len);
// Ensure no range overflow.
DCHECK(dst_slot < dst_end);
DCHECK(src_slot < src_slot + len);
DCHECK_NE(array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
ObjectSlot src = array->RawFieldOfElementAt(src_index);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) { if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) { if (dst_slot < src_slot) {
for (int i = 0; i < len; i++) { // Copy tagged values forward using relaxed load/stores that do not
dst.Relaxed_Store(src.Relaxed_Load()); // involve value decompression.
const AtomicSlot atomic_dst_end(dst_end);
AtomicSlot dst(dst_slot);
AtomicSlot src(src_slot);
while (dst < atomic_dst_end) {
*dst = *src;
++dst; ++dst;
++src; ++src;
} }
} else { } else {
// Copy backwards. // Copy tagged values backwards using relaxed load/stores that do not
dst += len - 1; // involve value decompression.
src += len - 1; const AtomicSlot atomic_dst_begin(dst_slot);
for (int i = 0; i < len; i++) { AtomicSlot dst(dst_slot + len - 1);
dst.Relaxed_Store(src.Relaxed_Load()); AtomicSlot src(src_slot + len - 1);
while (dst >= atomic_dst_begin) {
*dst = *src;
--dst; --dst;
--src; --src;
} }
} }
} else { } else {
MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize); MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
} }
if (mode == SKIP_WRITE_BARRIER) return; if (mode == SKIP_WRITE_BARRIER) return;
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len); WriteBarrierForRange(dst_object, dst_slot, dst_end);
} }
void Heap::CopyElements(FixedArray dst_array, FixedArray src_array, void Heap::CopyRange(HeapObject dst_object, const ObjectSlot dst_slot,
int dst_index, int src_index, int len, const ObjectSlot src_slot, int len,
WriteBarrierMode mode) { WriteBarrierMode mode) {
DCHECK_NE(dst_array, src_array); DCHECK_NE(len, 0);
if (len == 0) return;
DCHECK_NE(dst_array->map(), ReadOnlyRoots(this).fixed_cow_array_map()); DCHECK_NE(dst_object->map(), ReadOnlyRoots(this).fixed_cow_array_map());
ObjectSlot dst = dst_array->RawFieldOfElementAt(dst_index); const ObjectSlot dst_end(dst_slot + len);
ObjectSlot src = src_array->RawFieldOfElementAt(src_index);
// Ensure ranges do not overlap. // Ensure ranges do not overlap.
DCHECK(dst + len <= src || src + len <= dst); DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) { if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) { // Copy tagged values using relaxed load/stores that do not involve value
for (int i = 0; i < len; i++) { // decompression.
dst.Relaxed_Store(src.Relaxed_Load()); const AtomicSlot atomic_dst_end(dst_end);
++dst; AtomicSlot dst(dst_slot);
++src; AtomicSlot src(src_slot);
} while (dst < atomic_dst_end) {
} else { *dst = *src;
// Copy backwards. ++dst;
dst += len - 1; ++src;
src += len - 1;
for (int i = 0; i < len; i++) {
dst.Relaxed_Store(src.Relaxed_Load());
--dst;
--src;
}
} }
} else { } else {
MemCopy(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize); MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
} }
if (mode == SKIP_WRITE_BARRIER) return; if (mode == SKIP_WRITE_BARRIER) return;
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, dst_array, dst_index, len); WriteBarrierForRange(dst_object, dst_slot, dst_end);
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
...@@ -5848,12 +5843,19 @@ void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object, ...@@ -5848,12 +5843,19 @@ void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
maybe_key); maybe_key);
} }
void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array, void Heap::WriteBarrierForRange(HeapObject object, ObjectSlot start_slot,
int offset, int length) { ObjectSlot end_slot) {
for (int i = 0; i < length; i++) { // TODO(ishell): iterate values only once and avoid generic decompression.
if (!InYoungGeneration(array->get(offset + i))) continue; if (!InYoungGeneration(object)) {
heap->store_buffer()->InsertEntry( for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
array->RawFieldOfElementAt(offset + i).address()); Object value = *slot;
if (InYoungGeneration(value)) {
store_buffer()->InsertEntry(slot.address());
}
}
}
if (incremental_marking()->IsMarking()) {
incremental_marking()->RecordWrites(object, start_slot, end_slot);
} }
} }
...@@ -5889,10 +5891,6 @@ void Heap::MarkingBarrierSlow(HeapObject object, Address slot, ...@@ -5889,10 +5891,6 @@ void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
value); value);
} }
void Heap::MarkingBarrierForElementsSlow(Heap* heap, FixedArray array) {
heap->incremental_marking()->RecordWrites(array);
}
void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo, void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) { HeapObject object) {
Heap* heap = Heap::FromWritableHeapObject(host); Heap* heap = Heap::FromWritableHeapObject(host);
......
...@@ -351,6 +351,11 @@ class Heap { ...@@ -351,6 +351,11 @@ class Heap {
// by pointer size. // by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size); static inline void CopyBlock(Address dst, Address src, int byte_size);
// Executes generational and/or marking write barrier for a [start, end) range
// of non-weak slots inside |object|.
V8_EXPORT_PRIVATE void WriteBarrierForRange(HeapObject object,
ObjectSlot start, ObjectSlot end);
V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host); V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object, V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot, Address slot,
...@@ -359,15 +364,11 @@ class Heap { ...@@ -359,15 +364,11 @@ class Heap {
Address key_slot); Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode( V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate); Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
Heap* heap, FixedArray array, int offset, int length);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow( V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
Code host, RelocInfo* rinfo, HeapObject value); Code host, RelocInfo* rinfo, HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object, V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
Address slot, Address slot,
HeapObject value); HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(Heap* heap,
FixedArray array);
V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host, V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo, RelocInfo* rinfo,
HeapObject value); HeapObject value);
...@@ -389,14 +390,15 @@ class Heap { ...@@ -389,14 +390,15 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress(); inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress(); inline Address* OldSpaceAllocationLimitAddress();
// Move len elements within a given array from src_index index to dst_index // Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// index. // The source and destination memory ranges can overlap.
void MoveElements(FixedArray array, int dst_index, int src_index, int len, void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); ObjectSlot src_slot, int len, WriteBarrierMode mode);
// Copy len elements from src_index of src array to dst_index of dst array. // Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
void CopyElements(FixedArray dst, FixedArray src, int dst_index, // The source and destination memory ranges must not overlap.
int src_index, int len, WriteBarrierMode mode); void CopyRange(HeapObject dst_object, ObjectSlot dst_slot,
ObjectSlot src_slot, int len, WriteBarrierMode mode);
// Initialize a filler object to keep the ability to iterate over the heap // Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in // when introducing gaps within pages. If slots could have been recorded in
......
...@@ -70,9 +70,8 @@ IncrementalMarking::IncrementalMarking( ...@@ -70,9 +70,8 @@ IncrementalMarking::IncrementalMarking(
SetState(STOPPED); SetState(STOPPED);
} }
bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) { bool IncrementalMarking::BaseRecordWrite(HeapObject obj, HeapObject value) {
HeapObject value_heap_obj = HeapObject::cast(value); DCHECK(!marking_state()->IsImpossible(value));
DCHECK(!marking_state()->IsImpossible(value_heap_obj));
DCHECK(!marking_state()->IsImpossible(obj)); DCHECK(!marking_state()->IsImpossible(obj));
#ifdef V8_CONCURRENT_MARKING #ifdef V8_CONCURRENT_MARKING
// The write barrier stub generated with V8_CONCURRENT_MARKING does not // The write barrier stub generated with V8_CONCURRENT_MARKING does not
...@@ -82,14 +81,14 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) { ...@@ -82,14 +81,14 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
const bool need_recording = marking_state()->IsBlack(obj); const bool need_recording = marking_state()->IsBlack(obj);
#endif #endif
if (need_recording && WhiteToGreyAndPush(value_heap_obj)) { if (need_recording && WhiteToGreyAndPush(value)) {
RestartIfNotMarking(); RestartIfNotMarking();
} }
return is_compacting_ && need_recording; return is_compacting_ && need_recording;
} }
void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
Object value) { HeapObject value) {
if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) { if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
// Object is not going to be rescanned we need to record the slot. // Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot, heap_->mark_compact_collector()->RecordSlot(obj, slot,
...@@ -117,25 +116,26 @@ void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo, ...@@ -117,25 +116,26 @@ void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
} }
} }
void IncrementalMarking::RecordWrites(FixedArray array) { void IncrementalMarking::RecordWrites(HeapObject object, ObjectSlot start_slot,
int length = array->length(); ObjectSlot end_slot) {
MarkCompactCollector* collector = heap_->mark_compact_collector(); MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
MemoryChunk* source_page = MemoryChunk::FromHeapObject(array);
if (source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) { if (source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
for (int i = 0; i < length; i++) { for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = array->get(i); Object value = *slot;
if (value->IsHeapObject()) { HeapObject value_heap_object;
BaseRecordWrite(array, HeapObject::cast(value)); if (value.GetHeapObject(&value_heap_object)) {
BaseRecordWrite(object, value_heap_object);
} }
} }
} else { } else {
for (int i = 0; i < length; i++) { MarkCompactCollector* collector = heap_->mark_compact_collector();
Object value = array->get(i); for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
if (value->IsHeapObject() && Object value = *slot;
BaseRecordWrite(array, HeapObject::cast(value))) { HeapObject value_heap_object;
collector->RecordSlot(source_page, if (value.GetHeapObject(&value_heap_object) &&
HeapObjectSlot(array->RawFieldOfElementAt(i)), BaseRecordWrite(object, value_heap_object)) {
HeapObject::cast(value)); collector->RecordSlot(source_page, HeapObjectSlot(slot),
value_heap_object);
} }
} }
} }
......
...@@ -198,18 +198,19 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -198,18 +198,19 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and // No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive // cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white). // the incremental cycle (stays white).
V8_INLINE bool BaseRecordWrite(HeapObject obj, Object value); V8_INLINE bool BaseRecordWrite(HeapObject obj, HeapObject value);
V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value); V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value);
V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot, V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot,
MaybeObject value); MaybeObject value);
void RecordWrites(FixedArray array); void RecordWrites(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot);
void RevisitObject(HeapObject obj); void RevisitObject(HeapObject obj);
// Ensures that all descriptors int range [0, number_of_own_descripts) // Ensures that all descriptors int range [0, number_of_own_descripts)
// are visited. // are visited.
void VisitDescriptors(HeapObject host, DescriptorArray array, void VisitDescriptors(HeapObject host, DescriptorArray array,
int number_of_own_descriptors); int number_of_own_descriptors);
void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, Object value); void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, HeapObject value);
void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject value); void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject value);
// Returns true if the function succeeds in transitioning the object // Returns true if the function succeeds in transitioning the object
......
...@@ -223,16 +223,23 @@ ObjectSlot FixedArray::RawFieldOfElementAt(int index) { ...@@ -223,16 +223,23 @@ ObjectSlot FixedArray::RawFieldOfElementAt(int index) {
return RawField(OffsetOfElementAt(index)); return RawField(OffsetOfElementAt(index));
} }
void FixedArray::MoveElements(Heap* heap, int dst_index, int src_index, int len, void FixedArray::MoveElements(Isolate* isolate, int dst_index, int src_index,
WriteBarrierMode mode) { int len, WriteBarrierMode mode) {
if (len == 0) return;
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
heap->MoveElements(*this, dst_index, src_index, len, mode); ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
ObjectSlot src_slot(RawFieldOfElementAt(src_index));
isolate->heap()->MoveRange(*this, dst_slot, src_slot, len, mode);
} }
void FixedArray::CopyElements(Heap* heap, int dst_index, FixedArray src, void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode) { int src_index, int len, WriteBarrierMode mode) {
if (len == 0) return;
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
heap->CopyElements(*this, src, dst_index, src_index, len, mode);
ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
ObjectSlot src_slot(src->RawFieldOfElementAt(src_index));
isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
} }
// Perform a binary search in a fixed array. // Perform a binary search in a fixed array.
...@@ -392,8 +399,9 @@ bool FixedDoubleArray::is_the_hole(int index) { ...@@ -392,8 +399,9 @@ bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64; return get_representation(index) == kHoleNanInt64;
} }
void FixedDoubleArray::MoveElements(Heap* heap, int dst_index, int src_index, void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index,
int len, WriteBarrierMode mode) { int src_index, int len,
WriteBarrierMode mode) {
DCHECK_EQ(SKIP_WRITE_BARRIER, mode); DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
double* data_start = double* data_start =
reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize)); reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));
......
...@@ -153,10 +153,10 @@ class FixedArray : public FixedArrayBase { ...@@ -153,10 +153,10 @@ class FixedArray : public FixedArrayBase {
// Gives access to raw memory which stores the array's data. // Gives access to raw memory which stores the array's data.
inline ObjectSlot data_start(); inline ObjectSlot data_start();
inline void MoveElements(Heap* heap, int dst_index, int src_index, int len, inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
WriteBarrierMode mode); int len, WriteBarrierMode mode);
inline void CopyElements(Heap* heap, int dst_index, FixedArray src, inline void CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode); int src_index, int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to); inline void FillWithHoles(int from, int to);
...@@ -245,8 +245,8 @@ class FixedDoubleArray : public FixedArrayBase { ...@@ -245,8 +245,8 @@ class FixedDoubleArray : public FixedArrayBase {
return kHeaderSize + length * kDoubleSize; return kHeaderSize + length * kDoubleSize;
} }
inline void MoveElements(Heap* heap, int dst_index, int src_index, int len, inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
WriteBarrierMode mode); int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to); inline void FillWithHoles(int from, int to);
......
...@@ -1605,7 +1605,8 @@ class ThreadImpl { ...@@ -1605,7 +1605,8 @@ class ThreadImpl {
int dst = static_cast<int>(StackHeight() - (sp_ - dest)); int dst = static_cast<int>(StackHeight() - (sp_ - dest));
int src = static_cast<int>(StackHeight() - arity); int src = static_cast<int>(StackHeight() - arity);
int len = static_cast<int>(arity); int len = static_cast<int>(arity);
isolate_->heap()->MoveElements(reference_stack(), dst, src, len); reference_stack().MoveElements(isolate_, dst, src, len,
UPDATE_WRITE_BARRIER);
} }
sp_ = dest + arity; sp_ = dest + arity;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment