Commit 08a4fe71 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Delete Heap read-only root accessors

This is a clean-up to completely remove the Heap accessors for roots
accessible via ReadOnlyRoots. Where previously ReadOnlyRoots called the
Heap accessor, now it accesses the roots array directly using the root
index (since ReadOnlyRoots is a friend of Heap).

Also clean up several cases where private Heap accessors were still
being used by Heap and its friends.

Bug: v8:7786
Change-Id: Iaca2e17b22822b30d395dec6250a4d5ae496c983
Reviewed-on: https://chromium-review.googlesource.com/1127172
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54297}
parent 8fe6b87f
......@@ -1562,7 +1562,7 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
new_script->set_column_offset(script->column_offset());
new_script->set_context_data(script->context_data());
new_script->set_type(script->type());
new_script->set_line_ends(heap->undefined_value());
new_script->set_line_ends(ReadOnlyRoots(heap).undefined_value());
new_script->set_eval_from_shared_or_wrapped_arguments(
script->eval_from_shared_or_wrapped_arguments());
new_script->set_shared_function_infos(*empty_weak_fixed_array(),
......
......@@ -47,21 +47,9 @@ HeapObject* AllocationResult::ToObjectChecked() {
#define ROOT_ACCESSOR(type, name, camel_name) \
type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
ROOT_LIST(ROOT_ACCESSOR)
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
Map* Heap::name##_map() { \
return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
}
ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
#undef ALLOCATION_SITE_MAP_ACCESSOR
#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
Map* Heap::name##_map() { \
return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
......@@ -69,22 +57,6 @@ ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
#undef DATA_HANDLER_MAP_ACCESSOR
#define STRING_ACCESSOR(name, str) \
String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
#define SYMBOL_ACCESSOR(name) \
Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define SYMBOL_ACCESSOR(name, description) \
Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
AccessorInfo* Heap::accessor_name##_accessor() { \
return AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]); \
......@@ -472,7 +444,7 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
if (candidate_map != allocation_memento_map()) {
if (candidate_map != ReadOnlyRoots(this).allocation_memento_map()) {
return nullptr;
}
......@@ -557,7 +529,8 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
ReadOnlyRoots roots(this);
return condition ? roots.true_value() : roots.false_value();
}
uint32_t Heap::HashSeed() {
......
......@@ -931,7 +931,7 @@ void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
void Heap::InvalidateCodeDeoptimizationData(Code* code) {
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
CodePageMemoryModificationScope modification_scope(chunk);
code->set_deoptimization_data(empty_fixed_array());
code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
}
void Heap::DeoptMarkedAllocationSites() {
......@@ -1538,7 +1538,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len, WriteBarrierMode mode) {
if (len == 0) return;
DCHECK(array->map() != fixed_cow_array_map());
DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
Object** dst = array->data_start() + dst_index;
Object** src = array->data_start() + src_index;
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
......@@ -2897,7 +2897,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
DCHECK(!lo_space()->Contains(object));
DCHECK(object->map() != fixed_cow_array_map());
DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
......@@ -2990,7 +2990,7 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
object->IsWeakFixedArray());
// For now this trick is only applied to objects in new and paged space.
DCHECK(object->map() != fixed_cow_array_map());
DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
if (bytes_to_trim == 0) {
DCHECK_EQ(elements_to_trim, 0);
......@@ -4531,7 +4531,7 @@ HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
HeapObject* result;
AllocationResult alloc = AllocateRaw(size, space, alignment);
if (alloc.To(&result)) {
DCHECK(result != exception());
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking. In newspace will almost always succeed.
......@@ -4540,7 +4540,7 @@ HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
GarbageCollectionReason::kAllocationFailure);
alloc = AllocateRaw(size, space, alignment);
if (alloc.To(&result)) {
DCHECK(result != exception());
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
}
......@@ -4560,7 +4560,7 @@ HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
alloc = AllocateRaw(size, space, alignment);
}
if (alloc.To(&result)) {
DCHECK(result != exception());
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// TODO(1181417): Fix this.
......@@ -4574,7 +4574,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
HeapObject* result;
if (alloc.To(&result)) {
DCHECK(result != exception());
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking.
......@@ -4583,7 +4583,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
GarbageCollectionReason::kAllocationFailure);
alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
if (alloc.To(&result)) {
DCHECK(result != exception());
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
}
......@@ -4594,7 +4594,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
}
if (alloc.To(&result)) {
DCHECK(result != exception());
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// TODO(1181417): Fix this.
......@@ -4801,8 +4801,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
DCHECK(js_object->WasConstructedFromApiFunction());
if (js_object->GetEmbedderFieldCount() >= 2 &&
js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != undefined_value() &&
js_object->GetEmbedderField(1) != undefined_value()) {
js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
js_object->GetEmbedderField(1) != ReadOnlyRoots(this).undefined_value()) {
DCHECK_EQ(0,
reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
......@@ -5073,7 +5073,7 @@ void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
new_length += 2;
}
number_of_disposed_maps_ = new_number_of_disposed_maps;
HeapObject* undefined = undefined_value();
HeapObject* undefined = ReadOnlyRoots(this).undefined_value();
for (int i = new_length; i < length; i++) {
retained_maps->Set(i, HeapObjectReference::Strong(undefined));
}
......@@ -5651,7 +5651,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
//
// Since this function is used for debugging only, we do not place
// asserts here, but check everything explicitly.
if (obj->map() == one_pointer_filler_map()) return false;
if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = obj->map()->instance_type();
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
AllocationSpace src = chunk->owner()->identity();
......@@ -5716,7 +5716,7 @@ Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == code_map());
DCHECK(map == ReadOnlyRoots(this).code_map());
if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
......
......@@ -791,39 +791,10 @@ class Heap {
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
// Heap root getters.
private:
friend class ReadOnlyRoots;
// RO_SPACE objects should be accessed via ReadOnlyRoots.
#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
STRONG_READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#define STRING_ACCESSOR(name, str) inline String* name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
#define SYMBOL_ACCESSOR(name) inline Symbol* name();
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
// Utility type maps.
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
inline Map* name##_map();
ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
#undef ALLOCATION_SITE_MAP_ACCESSOR
public:
// Heap root getters.
#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
......
This diff is collapsed.
......@@ -38,7 +38,7 @@ void Isolate::set_pending_exception(Object* exception_obj) {
void Isolate::clear_pending_exception() {
DCHECK(!thread_local_top_.pending_exception_->IsException(this));
thread_local_top_.pending_exception_ = heap_.the_hole_value();
thread_local_top_.pending_exception_ = ReadOnlyRoots(this).the_hole_value();
}
......@@ -60,7 +60,7 @@ void Isolate::clear_wasm_caught_exception() {
}
void Isolate::clear_pending_message() {
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
thread_local_top_.pending_message_obj_ = ReadOnlyRoots(this).the_hole_value();
}
......@@ -73,13 +73,14 @@ Object* Isolate::scheduled_exception() {
bool Isolate::has_scheduled_exception() {
DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
return thread_local_top_.scheduled_exception_ !=
ReadOnlyRoots(this).the_hole_value();
}
void Isolate::clear_scheduled_exception() {
DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
thread_local_top_.scheduled_exception_ = ReadOnlyRoots(this).the_hole_value();
}
bool Isolate::is_catchable_by_javascript(Object* exception) {
......
......@@ -1048,7 +1048,7 @@ Object* Isolate::StackOverflow() {
Object* Isolate::TerminateExecution() {
return Throw(heap_.termination_exception(), nullptr);
return Throw(ReadOnlyRoots(this).termination_exception(), nullptr);
}
......@@ -1057,12 +1057,12 @@ void Isolate::CancelTerminateExecution() {
try_catch_handler()->has_terminated_ = false;
}
if (has_pending_exception() &&
pending_exception() == heap_.termination_exception()) {
pending_exception() == ReadOnlyRoots(this).termination_exception()) {
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
}
if (has_scheduled_exception() &&
scheduled_exception() == heap_.termination_exception()) {
scheduled_exception() == ReadOnlyRoots(this).termination_exception()) {
thread_local_top()->external_caught_exception_ = false;
clear_scheduled_exception();
}
......@@ -2022,7 +2022,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
PropagatePendingExceptionToExternalTryCatch();
bool is_termination_exception =
pending_exception() == heap_.termination_exception();
pending_exception() == ReadOnlyRoots(this).termination_exception();
// Do not reschedule the exception if this is the bottom call.
bool clear_exception = is_bottom_call;
......@@ -3007,7 +3007,7 @@ bool Isolate::Init(StartupDeserializer* des) {
if (create_heap_objects) {
// Terminate the partial snapshot cache so we can iterate.
partial_snapshot_cache_.push_back(heap_.undefined_value());
partial_snapshot_cache_.push_back(ReadOnlyRoots(this).undefined_value());
}
InitializeThreadLocal();
......@@ -3072,7 +3072,8 @@ bool Isolate::Init(StartupDeserializer* des) {
heap_.SetStackLimits();
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
if (!create_heap_objects)
Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
if (FLAG_trace_turbo) {
// Create an empty file.
......
......@@ -8,6 +8,7 @@
#include "src/roots.h"
#include "src/heap/heap-inl.h"
#include "src/objects/api-callbacks.h"
namespace v8 {
......@@ -15,34 +16,46 @@ namespace internal {
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate) : heap_(isolate->heap()) {}
#define ROOT_ACCESSOR(type, name, camel_name) \
type* ReadOnlyRoots::name() { return heap_->name(); }
#define ROOT_ACCESSOR(type, name, camel_name) \
type* ReadOnlyRoots::name() { \
return type::cast(heap_->roots_[Heap::k##camel_name##RootIndex]); \
}
STRONG_READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#define STRING_ACCESSOR(name, str) \
String* ReadOnlyRoots::name() { return heap_->name(); }
#define STRING_ACCESSOR(name, str) \
String* ReadOnlyRoots::name() { \
return String::cast(heap_->roots_[Heap::k##name##RootIndex]); \
}
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
#define SYMBOL_ACCESSOR(name) \
Symbol* ReadOnlyRoots::name() { return heap_->name(); }
#define SYMBOL_ACCESSOR(name) \
Symbol* ReadOnlyRoots::name() { \
return Symbol::cast(heap_->roots_[Heap::k##name##RootIndex]); \
}
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define SYMBOL_ACCESSOR(name, description) \
Symbol* ReadOnlyRoots::name() { return heap_->name(); }
#define SYMBOL_ACCESSOR(name, description) \
Symbol* ReadOnlyRoots::name() { \
return Symbol::cast(heap_->roots_[Heap::k##name##RootIndex]); \
}
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
Map* ReadOnlyRoots::name##_map() { return heap_->name##_map(); }
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
Map* ReadOnlyRoots::name##_map() { \
return Map::cast(heap_->roots_[Heap::k##Name##MapRootIndex]); \
}
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
Map* ReadOnlyRoots::name##_map() { return heap_->name##_map(); }
#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
Map* ReadOnlyRoots::name##_map() { \
return Map::cast(heap_->roots_[Heap::k##Name##Size##MapRootIndex]); \
}
ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
#undef ALLOCATION_SITE_MAP_ACCESSOR
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment