Commit e7c2a24e authored by Rong Wang's avatar Rong Wang Committed by Commit Bot

[heap] Additional V8_DISABLE_WRITE_BARRIERS guards

This CL is necessary for disabling write-barriers that involoves
referencing pages via address arithmetic, which is required from
third-party heap implementation.

Change-Id: I1d3f572d48015e5c8cf691b2dc71a32834621c2f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1781008Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63644}
parent faa0b50d
......@@ -118,7 +118,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -711,7 +711,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -103,7 +103,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -183,7 +183,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -159,7 +159,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -144,7 +144,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -366,7 +366,7 @@ void RelocInfo::set_target_address(Address target,
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
IsCodeTargetMode(rmode_)) {
IsCodeTargetMode(rmode_) && !FLAG_disable_write_barriers) {
Code target_code = Code::GetCodeFromTargetAddress(target);
MarkingBarrierForCode(host(), this, target_code);
}
......
......@@ -150,7 +150,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -363,7 +363,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
......
......@@ -2703,7 +2703,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
// allocation is on.
heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
// Record all references to embedded objects in the new code object.
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrierForCode(*new_code);
#endif
}
#ifdef VERIFY_HEAP
......
......@@ -212,6 +212,7 @@ inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
inline WriteBarrierMode GetWriteBarrierModeForObject(
HeapObject object, const DisallowHeapAllocation* promise) {
if (FLAG_disable_write_barriers) return SKIP_WRITE_BARRIER;
DCHECK(Heap_PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
......@@ -221,6 +222,9 @@ inline WriteBarrierMode GetWriteBarrierModeForObject(
}
inline bool ObjectInYoungGeneration(Object object) {
// TODO(rong): Fix caller of this function when we deploy
// v8_use_third_party_heap.
if (FLAG_single_generation) return false;
if (object.IsSmi()) return false;
return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
->InYoungGeneration();
......
......@@ -5542,6 +5542,7 @@ void Heap::MoveStoreBufferEntriesToRememberedSet() {
}
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
......@@ -5549,10 +5550,12 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
}
#endif
}
#ifdef DEBUG
void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
......@@ -5562,10 +5565,12 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
// Old to old slots are filtered with invalidated slots.
CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
#endif
}
#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
#ifndef V8_DISABLE_WRITE_BARRIERS
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
......@@ -5574,6 +5579,7 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
SlotSet::KEEP_EMPTY_BUCKETS);
}
#endif
}
PagedSpace* PagedSpaceIterator::Next() {
......
......@@ -540,7 +540,9 @@ Code JSFunction::code() const {
void JSFunction::set_code(Code value) {
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrier(*this, RawField(kCodeOffset), value);
#endif
}
void JSFunction::set_code_no_write_barrier(Code value) {
......
......@@ -675,8 +675,10 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
// barrier.
descriptors.Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors + 1);
#endif
}
// Properly mark the map if the {desc} is an "interesting symbol".
if (desc->GetKey()->IsInterestingSymbol()) {
......
......@@ -625,8 +625,10 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
Map current = *this;
#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
to_replace.number_of_descriptors());
#endif
while (current.instance_descriptors(isolate) == to_replace) {
Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
......@@ -1108,8 +1110,10 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), *map, *descriptors,
descriptors->number_of_descriptors());
#endif
Map current = *map;
while (current.instance_descriptors() == *descriptors) {
......@@ -2548,8 +2552,10 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
set_synchronized_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(number_of_own_descriptors);
#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors);
#endif
}
// static
......
......@@ -756,11 +756,13 @@ void HeapObject::set_map(Map value) {
#endif
}
set_map_word(MapWord::FromMap(value));
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
#endif
}
DEF_GETTER(HeapObject, synchronized_map, Map) {
......@@ -774,11 +776,13 @@ void HeapObject::synchronized_set_map(Map value) {
#endif
}
synchronized_set_map_word(MapWord::FromMap(value));
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
#endif
}
// Unsafe accessor omitting write barrier.
......@@ -793,12 +797,14 @@ void HeapObject::set_map_no_write_barrier(Map value) {
void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
set_map_word(MapWord::FromMap(value));
#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
#endif
}
ObjectSlot HeapObject::map_slot() const {
......
......@@ -527,9 +527,10 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
bool write_barrier_needed = (current_object_address != kNullAddress &&
source_space != SnapshotSpace::kNew &&
source_space != SnapshotSpace::kCode);
bool write_barrier_needed =
(current_object_address != kNullAddress &&
source_space != SnapshotSpace::kNew &&
source_space != SnapshotSpace::kCode && !FLAG_disable_write_barriers);
while (current < limit) {
byte data = source_.Get();
switch (data) {
......@@ -846,6 +847,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
// Don't update current pointer here as it may be needed for write barrier.
Write(current, heap_object_ref);
if (emit_write_barrier && write_barrier_needed) {
DCHECK_IMPLIES(FLAG_disable_write_barriers, !write_barrier_needed);
HeapObject host_object = HeapObject::FromAddress(current_object_address);
SLOW_DCHECK(isolate->heap()->Contains(host_object));
GenerationalBarrier(host_object, MaybeObjectSlot(current.address()),
......
......@@ -60,7 +60,9 @@ void ObjectDeserializer::FlushICache() {
DCHECK(deserializing_user_code());
for (Code code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrierForCode(code);
#endif
FlushInstructionCache(code.raw_instruction_start(),
code.raw_instruction_size());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment