Commit 802a86a4 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[heap] Do not visit the dispatch table

Now that builtins are embedded into the binary unconditionally, GC
visitation can be reduced. The interpreter dispatch table points
directly at embedded instruction starts. It is initialized once in
Isolate::Init, and its contents are immutable afterwards.
Visitation by GC is not needed.

Drive-by: Remove outdated comment on IsWasmRuntimeStub.

Bug: v8:7873
Change-Id: I14edc0beebb31c04f1429346b57ade9e8d838670
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1899773Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64814}
parent d9186855
......@@ -136,9 +136,6 @@ class Builtins {
return kAllBuiltinsAreIsolateIndependent;
}
// Wasm runtime stubs are treated specially by wasm. To guarantee reachability
// through near jumps, their code is completely copied into a fresh off-heap
// area.
static bool IsWasmRuntimeStub(int index);
// Initializes the table of builtin entry points based on the current contents
......
......@@ -4312,23 +4312,11 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
// Iterate over the builtin code objects in the heap. Note that it is not
// necessary to iterate over code objects on scavenge collections.
if (!isMinorGC) {
IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
// The dispatch table is set up directly from the builtins using
// IntitializeDispatchTable so there is no need to iterate to create it.
if (mode != VISIT_FOR_SERIALIZATION) {
// Currently we iterate the dispatch table to update pointers to possibly
// moved Code objects for bytecode handlers.
// TODO(v8:6666): Remove iteration once builtins are embedded (and thus
// immovable) in every build configuration.
isolate_->interpreter()->IterateDispatchTable(v);
v->Synchronize(VisitorSynchronization::kDispatchTable);
}
}
// Iterate over global handles.
......@@ -4403,7 +4391,7 @@ void Heap::IterateBuiltins(RootVisitor* v) {
FullObjectSlot(builtin_address(i)));
}
// The entry table does not need to be updated if all builtins are embedded.
// The entry table doesn't need to be updated since all builtins are embedded.
STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
}
......
......@@ -89,6 +89,7 @@ Code Interpreter::GetBytecodeHandler(Bytecode bytecode,
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale, Code handler) {
DCHECK(handler.is_off_heap_trampoline());
DCHECK(handler.kind() == Code::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
dispatch_table_[index] = handler.InstructionStart();
......@@ -103,39 +104,6 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
kEntriesPerOperandScale;
}
void Interpreter::IterateDispatchTable(RootVisitor* v) {
if (!isolate_->serializer_enabled() && isolate_->embedded_blob() != nullptr) {
// If we're not generating a snapshot, then every bytecode handler will be
// off-heap, so there's no point iterating over them.
#ifdef DEBUG
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
CHECK(code_entry == kNullAddress ||
InstructionStream::PcIsOffHeap(isolate_, code_entry));
}
#endif // DEBUG
return;
}
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
// Skip over off-heap bytecode handlers since they will never move.
if (InstructionStream::PcIsOffHeap(isolate_, code_entry)) continue;
// TODO(jkummerow): Would it hurt to simply do:
// if (code_entry == kNullAddress) continue;
Code code;
if (code_entry != kNullAddress) {
code = Code::GetCodeFromTargetAddress(code_entry);
}
Code old_code = code;
v->VisitRootPointer(Root::kDispatchTable, nullptr, FullObjectSlot(&code));
if (code != old_code) {
dispatch_table_[i] = code.entry();
}
}
}
int Interpreter::InterruptBudget() {
return FLAG_interrupt_budget;
}
......
......@@ -66,9 +66,6 @@ class Interpreter {
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
Code handler);
// GC support.
void IterateDispatchTable(RootVisitor* v);
// Disassembler support.
V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
......
......@@ -28,7 +28,6 @@ class CodeDataContainer;
V(kDebug, "(Debugger)") \
V(kCompilationCache, "(Compilation cache)") \
V(kHandleScope, "(Handle scope)") \
V(kDispatchTable, "(Dispatch table)") \
V(kBuiltins, "(Builtins)") \
V(kGlobalHandles, "(Global handles)") \
V(kEternalHandles, "(Eternal handles)") \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment