Commit 16191e9a authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Add function slots to the far jump table

This adds the --wasm-far-jump-table flag, which enables the extension
of the far jump table with a slot for each wasm function.

R=mstarzinger@chromium.org

Bug: v8:9477
Change-Id: I61cb4592aa8ed75e2772371e9b7dbfdfe9bb0046
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1792907
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63680}
parent 8734a486
...@@ -769,6 +769,9 @@ DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code") ...@@ -769,6 +769,9 @@ DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code") DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
DEFINE_BOOL(stress_wasm_code_gc, false, DEFINE_BOOL(stress_wasm_code_gc, false,
"stress test garbage collection of wasm code") "stress test garbage collection of wasm code")
DEFINE_BOOL(wasm_far_jump_table, false,
"use multiple separate code spaces that might require far jumps "
"between them")
// Profiler flags. // Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler") DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
......
...@@ -21,7 +21,9 @@ namespace wasm { ...@@ -21,7 +21,9 @@ namespace wasm {
// other purposes: // other purposes:
// - the far stub table contains one entry per wasm runtime stub (see // - the far stub table contains one entry per wasm runtime stub (see
// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded // {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
// builtin. // builtin, plus (if {FLAG_wasm_far_jump_table} is enabled and not the full
// address space can be reached via the jump table) one entry per wasm
// function.
// - the lazy compile table contains one entry per wasm function which jumps to // - the lazy compile table contains one entry per wasm function which jumps to
// the common {WasmCompileLazy} builtin and passes the function index that was // the common {WasmCompileLazy} builtin and passes the function index that was
// invoked. // invoked.
...@@ -85,8 +87,10 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { ...@@ -85,8 +87,10 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// Determine the size of a far jump table containing the given number of // Determine the size of a far jump table containing the given number of
// slots. // slots.
static constexpr uint32_t SizeForNumberOfFarJumpSlots(int num_stubs) { static constexpr uint32_t SizeForNumberOfFarJumpSlots(
return num_stubs * kFarJumpTableSlotSize; int num_stubs, int num_function_slots) {
int num_entries = num_stubs + num_function_slots;
return num_entries * kFarJumpTableSlotSize;
} }
// Translate a slot index to an offset into the lazy compile table. // Translate a slot index to an offset into the lazy compile table.
...@@ -115,14 +119,18 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { ...@@ -115,14 +119,18 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
} }
static void GenerateFarJumpTable(Address base, Address* stub_targets, static void GenerateFarJumpTable(Address base, Address* stub_targets,
int num_stubs) { int num_stubs, int num_function_slots) {
uint32_t table_size = num_stubs * kFarJumpTableSlotSize; uint32_t table_size =
SizeForNumberOfFarJumpSlots(num_stubs, num_function_slots);
// Assume enough space, so the Assembler does not try to grow the buffer. // Assume enough space, so the Assembler does not try to grow the buffer.
JumpTableAssembler jtasm(base, table_size + 256); JumpTableAssembler jtasm(base, table_size + 256);
int offset = 0; int offset = 0;
for (int index = 0; index < num_stubs; ++index) { for (int index = 0; index < num_stubs + num_function_slots; ++index) {
DCHECK_EQ(offset, FarJumpSlotIndexToOffset(index)); DCHECK_EQ(offset, FarJumpSlotIndexToOffset(index));
jtasm.EmitFarJumpSlot(stub_targets[index]); // Functions slots initially jump to themselves. They are patched before
// being used.
Address target = index < num_stubs ? stub_targets[index] : base + offset;
jtasm.EmitFarJumpSlot(target);
offset += kFarJumpTableSlotSize; offset += kFarJumpTableSlotSize;
DCHECK_EQ(offset, jtasm.pc_offset()); DCHECK_EQ(offset, jtasm.pc_offset());
} }
......
...@@ -860,9 +860,13 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) { ...@@ -860,9 +860,13 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once. DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
DCHECK_EQ(1, code_space_data_.size()); DCHECK_EQ(1, code_space_data_.size());
int num_function_slots =
kNeedsFarJumpsBetweenCodeSpaces && FLAG_wasm_far_jump_table
? static_cast<int>(module_->num_declared_functions)
: 0;
WasmCode* jump_table = CreateEmptyJumpTableInRegion( WasmCode* jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfFarJumpSlots( JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount), WasmCode::kRuntimeStubCount, num_function_slots),
code_space_data_[0].region); code_space_data_[0].region);
Address base = jump_table->instruction_start(); Address base = jump_table->instruction_start();
EmbeddedData embedded_data = EmbeddedData::FromBlob(); EmbeddedData embedded_data = EmbeddedData::FromBlob();
...@@ -880,9 +884,11 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) { ...@@ -880,9 +884,11 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
runtime_stub_entries_[i] = runtime_stub_entries_[i] =
base + JumpTableAssembler::FarJumpSlotIndexToOffset(i); base + JumpTableAssembler::FarJumpSlotIndexToOffset(i);
} }
JumpTableAssembler::GenerateFarJumpTable(base, builtin_address, JumpTableAssembler::GenerateFarJumpTable(
WasmCode::kRuntimeStubCount); base, builtin_address, WasmCode::kRuntimeStubCount, num_function_slots);
DCHECK_NULL(runtime_stub_table_); DCHECK_NULL(runtime_stub_table_);
// TODO(clemensh): Store this as "far jump table" (instead of "runtime stub
// table") per code space.
runtime_stub_table_ = jump_table; runtime_stub_table_ = jump_table;
DCHECK_NE(kNullAddress, runtime_stub_entries_[0]); DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
} }
......
...@@ -350,9 +350,9 @@ class WasmCodeAllocator { ...@@ -350,9 +350,9 @@ class WasmCodeAllocator {
class V8_EXPORT_PRIVATE NativeModule final { class V8_EXPORT_PRIVATE NativeModule final {
public: public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
static constexpr bool kCanAllocateMoreMemory = false; static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
#else #else
static constexpr bool kCanAllocateMoreMemory = true; static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
#endif #endif
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
......
...@@ -675,7 +675,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule( ...@@ -675,7 +675,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
size_t code_size_estimate = size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get()); wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
return NewNativeModule(isolate, enabled, code_size_estimate, return NewNativeModule(isolate, enabled, code_size_estimate,
wasm::NativeModule::kCanAllocateMoreMemory, !wasm::NativeModule::kNeedsFarJumpsBetweenCodeSpaces ||
FLAG_wasm_far_jump_table,
std::move(module)); std::move(module));
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment