Commit 1744f10a authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[GetIsolate] Remove GetHeap in object setters

Remove the GetHeap call in object setters, replacing it with a new
Heap::FromHeapObject method which explicitly tests for the object
not being in RO space. The hope is that this method is used sparingly.

As a drive-by, memoize the heap access in the write barrier macros,
to avoid having to calculate the heap location twice.

Bug: v8:7786
Change-Id: I26d1c41c673eddced17be316da6d51a16ac853e7
Reviewed-on: https://chromium-review.googlesource.com/1114614
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54033}
parent 97f71cdf
......@@ -392,6 +392,19 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
// static
Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
static_cast<ReadOnlySpace*>(chunk->owner())->writable());
Heap* heap = chunk->heap();
SLOW_DCHECK(heap != nullptr);
return heap;
}
bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
......
......@@ -1398,6 +1398,10 @@ class Heap {
inline bool InNewSpaceSlow(Address address);
inline bool InOldSpaceSlow(Address address);
// Find the heap which owns this HeapObject. Should never be called for
// objects in RO space.
static inline Heap* FromWritableHeapObject(const HeapObject* obj);
// ===========================================================================
// Object statistics tracking. ===============================================
// ===========================================================================
......
......@@ -415,7 +415,7 @@ class MemoryChunk {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject* o) {
static MemoryChunk* FromHeapObject(const HeapObject* o) {
return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
~kAlignmentMask);
}
......@@ -2912,6 +2912,8 @@ class ReadOnlySpace : public PagedSpace {
explicit ReadOnlySpace(Heap* heap);
bool writable() const { return !is_marked_read_only_; }
void ClearStringPaddingIfNeeded();
void MarkAsReadOnly();
......
......@@ -1042,14 +1042,15 @@ Map* HeapObject::map() const {
void HeapObject::set_map(Map* value) {
if (value != nullptr) {
#ifdef VERIFY_HEAP
value->GetHeap()->VerifyObjectLayoutChange(this, value);
Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
this, nullptr, value);
}
}
......@@ -1061,14 +1062,15 @@ Map* HeapObject::synchronized_map() const {
void HeapObject::synchronized_set_map(Map* value) {
if (value != nullptr) {
#ifdef VERIFY_HEAP
value->GetHeap()->VerifyObjectLayoutChange(this, value);
Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
#endif
}
synchronized_set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
this, nullptr, value);
}
}
......@@ -1077,7 +1079,7 @@ void HeapObject::synchronized_set_map(Map* value) {
void HeapObject::set_map_no_write_barrier(Map* value) {
if (value != nullptr) {
#ifdef VERIFY_HEAP
value->GetHeap()->VerifyObjectLayoutChange(this, value);
Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
......@@ -1089,7 +1091,8 @@ void HeapObject::set_map_after_allocation(Map* value, WriteBarrierMode mode) {
DCHECK_NOT_NULL(value);
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
this, nullptr, value);
}
}
......@@ -1474,8 +1477,8 @@ Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
void WeakCell::clear() {
// Either the garbage collector is clearing the cell or we are simply
// initializing the root empty weak cell.
DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
this == GetHeap()->empty_weak_cell());
DCHECK(Heap::FromWritableHeapObject(this)->gc_state() == Heap::MARK_COMPACT ||
this == Heap::FromWritableHeapObject(this)->empty_weak_cell());
WRITE_FIELD(this, kValueOffset, Smi::kZero);
}
......@@ -1485,7 +1488,7 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
Heap* heap = val->GetHeap();
Heap* heap = Heap::FromWritableHeapObject(this);
WriteBarrierMode mode =
heap->incremental_marking()->marking_state()->IsBlack(this)
? UPDATE_WRITE_BARRIER
......@@ -1753,7 +1756,7 @@ void PropertyArray::set(int index, Object* value) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
}
int RegExpMatchInfo::NumberOfCaptureRegisters() {
......@@ -1801,7 +1804,7 @@ void RegExpMatchInfo::SetCapture(int i, int value) {
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
Heap* heap = Heap::FromWritableHeapObject(this);
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
......@@ -1845,7 +1848,8 @@ void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
value, mode);
}
Object** PropertyArray::data_start() {
......@@ -2260,8 +2264,10 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == nullptr));
DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
Heap::kFreeSpaceMapRootIndex) ||
(!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
return reinterpret_cast<FreeSpace*>(
Memory::Address_at(address() + kNextOffset));
......@@ -2269,8 +2275,10 @@ FreeSpace* FreeSpace::next() {
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == nullptr));
DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
Heap::kFreeSpaceMapRootIndex) ||
(!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
base::Relaxed_Store(
reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
......@@ -2279,7 +2287,8 @@ void FreeSpace::set_next(FreeSpace* next) {
FreeSpace* FreeSpace::cast(HeapObject* o) {
SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace());
SLOW_DCHECK(!Heap::FromWritableHeapObject(o)->deserialization_complete() ||
o->IsFreeSpace());
return reinterpret_cast<FreeSpace*>(o);
}
......@@ -2699,7 +2708,8 @@ void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
Address entry_offset =
kHeaderSize + GetDataEntryOffset(entry, relative_index);
RELAXED_WRITE_FIELD(this, entry_offset, value);
WRITE_BARRIER(GetHeap(), this, static_cast<int>(entry_offset), value);
WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
static_cast<int>(entry_offset), value);
}
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
......@@ -3147,7 +3157,7 @@ bool AccessorPair::IsJSAccessor(Object* obj) {
template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::ClearEntry(int entry) {
Object* the_hole = this->GetHeap()->the_hole_value();
Object* the_hole = Heap::FromWritableHeapObject(this)->the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
Derived::cast(this)->SetEntry(entry, the_hole, the_hole, details);
}
......
......@@ -10158,6 +10158,9 @@ void FixedArray::Shrink(int new_length) {
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos,
int len) const {
DisallowHeapAllocation no_gc;
// Return early if len == 0 so that we don't try to read the write barrier off
// a canonical read-only empty fixed array.
if (len == 0) return;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
......
......@@ -65,17 +65,18 @@
WRITE_UINT16_FIELD(this, offset, value); \
}
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type* holder::name() const { \
type* value = type::cast(READ_FIELD(this, offset)); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
WRITE_FIELD(this, offset, value); \
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type* holder::name() const { \
type* value = type::cast(READ_FIELD(this, offset)); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
WRITE_FIELD(this, offset, value); \
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
offset, value, mode); \
}
#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
......@@ -83,17 +84,18 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
set_condition) \
MaybeObject* holder::name() const { \
MaybeObject* value = READ_WEAK_FIELD(this, offset); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
WRITE_WEAK_FIELD(this, offset, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
set_condition) \
MaybeObject* holder::name() const { \
MaybeObject* value = READ_WEAK_FIELD(this, offset); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
WRITE_WEAK_FIELD(this, offset, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
offset, value, mode); \
}
#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
......@@ -204,35 +206,49 @@
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
heap->RecordWrite(object, HeapObject::RawField(object, offset), value);
#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordMaybeWeakWrite( \
object, HeapObject::RawMaybeWeakField(object, offset), value); \
heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
value);
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
} \
heap->RecordWrite(object, HeapObject::RawField(object, offset), value); \
}
#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
heap->incremental_marking()->RecordMaybeWeakWrite( \
object, HeapObject::RawMaybeWeakField(object, offset), value); \
} \
heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
value); \
}
#define WRITE_BARRIER(heap, object, offset, value) \
do { \
Heap* __heap__ = heap; \
__heap__->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
__heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
value); \
} while (false)
#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
do { \
Heap* __heap__ = heap; \
__heap__->incremental_marking()->RecordMaybeWeakWrite( \
object, HeapObject::RawMaybeWeakField(object, offset), value); \
__heap__->RecordWrite( \
object, HeapObject::RawMaybeWeakField(object, offset), value); \
} while (false)
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
do { \
Heap* __heap__ = heap; \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
__heap__->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
} \
__heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
value); \
} \
} while (false)
#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
do { \
Heap* __heap__ = heap; \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
__heap__->incremental_marking()->RecordMaybeWeakWrite( \
object, HeapObject::RawMaybeWeakField(object, offset), value); \
} \
__heap__->RecordWrite( \
object, HeapObject::RawMaybeWeakField(object, offset), value); \
} \
} while (false)
#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
......
......@@ -713,7 +713,7 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
if (scope_info->length() == 0) return -1;
// Inline a GetIsolate-style call here.
// Get the Isolate via the heap.
//
// Ideally we'd pass Isolate* through to this function, however this is mostly
// called from the parser, which is otherwise isolate independent. We can't
......@@ -723,9 +723,7 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
// So, we take the least-ugly approach of manually getting the isolate to be
// able to remove GetIsolate from ScopeInfo in the general case, while
// allowing it in this one particular case.
MemoryChunk* scope_info_chunk = MemoryChunk::FromHeapObject(*scope_info);
DCHECK_NE(scope_info_chunk->owner()->identity(), RO_SPACE);
Isolate* isolate = scope_info_chunk->heap()->isolate();
Isolate* isolate = Heap::FromWritableHeapObject(*scope_info)->isolate();
ContextSlotCache* context_slot_cache = isolate->context_slot_cache();
int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment