Commit 3b3f2bbd authored by Michael Starzinger's avatar Michael Starzinger Committed by Commit Bot

[wasm] Introduce jump table index calculation helpers.

This is in preparation of making sure that jump table slots don't cross
cache line boundaries. It is only introducing helper functions for back
and forth conversion between "index" and "offset", but should not make
any functional changes yet.

R=ahaas@chromium.org
BUG=v8:8018

Change-Id: I6ab525f9b89a6a15414c043a54c9fffb527a1ab6
Reviewed-on: https://chromium-review.googlesource.com/1163517Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54922}
parent a12cbd34
...@@ -27,6 +27,22 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -27,6 +27,22 @@ class JumpTableAssembler : public TurboAssembler {
reinterpret_cast<void*>(slot_addr), size, reinterpret_cast<void*>(slot_addr), size,
CodeObjectRequired::kNo) {} CodeObjectRequired::kNo) {}
// To allow concurrent patching of the jump table entries we need to ensure
// that slots do not cross cache-line boundaries. Hence translation between
// slot offsets and index is encapsulated in the following methods.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
DCHECK_EQ(0, slot_offset % kJumpTableSlotSize);
return slot_offset / kJumpTableSlotSize;
}
static uint32_t SlotIndexToOffset(uint32_t slot_index) {
return slot_index * kJumpTableSlotSize;
}
// Determine the size of a jump table containing the given number of slots.
static size_t SizeForNumberOfSlots(uint32_t slot_count) {
return slot_count * kJumpTableSlotSize;
}
#if V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_X64
static constexpr int kJumpTableSlotSize = 18; static constexpr int kJumpTableSlotSize = 18;
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
...@@ -51,22 +67,38 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -51,22 +67,38 @@ class JumpTableAssembler : public TurboAssembler {
static constexpr int kJumpTableSlotSize = 1; static constexpr int kJumpTableSlotSize = 1;
#endif #endif
void EmitLazyCompileJumpSlot(uint32_t func_index, static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
Address lazy_compile_target); uint32_t func_index,
Address lazy_compile_target,
void EmitJumpSlot(Address target); WasmCode::FlushICache flush_i_cache) {
Address slot = base + SlotIndexToOffset(slot_index);
void NopBytes(int bytes); JumpTableAssembler jtasm(slot);
jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
static void PatchJumpTableSlot(Address slot, Address new_target, static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target,
WasmCode::FlushICache flush_i_cache) { WasmCode::FlushICache flush_i_cache) {
JumpTableAssembler jsasm(slot); Address slot = base + SlotIndexToOffset(slot_index);
jsasm.EmitJumpSlot(new_target); JumpTableAssembler jtasm(slot);
jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset()); jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) { if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize); Assembler::FlushICache(slot, kJumpTableSlotSize);
} }
} }
private:
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
void EmitJumpSlot(Address target);
void NopBytes(int bytes);
}; };
} // namespace wasm } // namespace wasm
......
...@@ -417,16 +417,11 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) { ...@@ -417,16 +417,11 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub); WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
// Fill the jump table with jumps to the lazy compile stub. // Fill the jump table with jumps to the lazy compile stub.
Address lazy_compile_target = lazy_builtin->instruction_start(); Address lazy_compile_target = lazy_builtin->instruction_start();
JumpTableAssembler jtasm(
jump_table_->instruction_start(),
static_cast<int>(jump_table_->instructions().size()) + 256);
for (uint32_t i = 0; i < num_wasm_functions; ++i) { for (uint32_t i = 0; i < num_wasm_functions; ++i) {
// Check that the offset in the jump table increases as expected. JumpTableAssembler::EmitLazyCompileJumpSlot(
DCHECK_EQ(i * JumpTableAssembler::kJumpTableSlotSize, jtasm.pc_offset()); jump_table_->instruction_start(), i,
jtasm.EmitLazyCompileJumpSlot(i + module_->num_imported_functions, i + module_->num_imported_functions, lazy_compile_target,
lazy_compile_target); WasmCode::kNoFlushICache);
jtasm.NopBytes((i + 1) * JumpTableAssembler::kJumpTableSlotSize -
jtasm.pc_offset());
} }
Assembler::FlushICache(jump_table_->instructions().start(), Assembler::FlushICache(jump_table_->instructions().start(),
jump_table_->instructions().size()); jump_table_->instructions().size());
...@@ -600,7 +595,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) { ...@@ -600,7 +595,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
// Only call this if we really need a jump table. // Only call this if we really need a jump table.
DCHECK_LT(0, num_wasm_functions); DCHECK_LT(0, num_wasm_functions);
OwnedVector<byte> instructions = OwnedVector<byte>::New( OwnedVector<byte> instructions = OwnedVector<byte>::New(
num_wasm_functions * JumpTableAssembler::kJumpTableSlotSize); JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
memset(instructions.start(), 0, instructions.size()); memset(instructions.start(), 0, instructions.size());
return AddOwnedCode(Nothing<uint32_t>(), // index return AddOwnedCode(Nothing<uint32_t>(), // index
instructions.as_vector(), // instructions instructions.as_vector(), // instructions
...@@ -619,9 +614,8 @@ void NativeModule::PatchJumpTable(uint32_t func_index, Address target, ...@@ -619,9 +614,8 @@ void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache flush_icache) { WasmCode::FlushICache flush_icache) {
DCHECK_LE(module_->num_imported_functions, func_index); DCHECK_LE(module_->num_imported_functions, func_index);
uint32_t slot_idx = func_index - module_->num_imported_functions; uint32_t slot_idx = func_index - module_->num_imported_functions;
Address jump_table_slot = jump_table_->instruction_start() + JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
slot_idx * JumpTableAssembler::kJumpTableSlotSize; slot_idx, target, flush_icache);
JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, target, flush_icache);
} }
Address NativeModule::AllocateForCode(size_t size) { Address NativeModule::AllocateForCode(size_t size) {
...@@ -710,18 +704,17 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const { ...@@ -710,18 +704,17 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// Return the jump table slot for that function index. // Return the jump table slot for that function index.
DCHECK_NOT_NULL(jump_table_); DCHECK_NOT_NULL(jump_table_);
uint32_t slot_idx = func_index - module_->num_imported_functions; uint32_t slot_idx = func_index - module_->num_imported_functions;
DCHECK_LT(slot_idx, jump_table_->instructions().size() / uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
JumpTableAssembler::kJumpTableSlotSize); DCHECK_LT(slot_offset, jump_table_->instructions().size());
return jump_table_->instruction_start() + return jump_table_->instruction_start() + slot_offset;
slot_idx * JumpTableAssembler::kJumpTableSlotSize;
} }
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot( uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
Address slot_address) const { Address slot_address) const {
DCHECK(is_jump_table_slot(slot_address)); DCHECK(is_jump_table_slot(slot_address));
uint32_t offset = uint32_t slot_offset =
static_cast<uint32_t>(slot_address - jump_table_->instruction_start()); static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
uint32_t slot_idx = offset / JumpTableAssembler::kJumpTableSlotSize; uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
DCHECK_LT(slot_idx, module_->num_declared_functions); DCHECK_LT(slot_idx, module_->num_declared_functions);
return module_->num_imported_functions + slot_idx; return module_->num_imported_functions + slot_idx;
} }
...@@ -839,8 +832,7 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) { ...@@ -839,8 +832,7 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
(sizeof(WasmCode*) * num_wasm_functions /* code table size */) + (sizeof(WasmCode*) * num_wasm_functions /* code table size */) +
(sizeof(WasmCode) * num_wasm_functions /* code object size */) + (sizeof(WasmCode) * num_wasm_functions /* code object size */) +
(kImportSize * module->num_imported_functions /* import size */) + (kImportSize * module->num_imported_functions /* import size */) +
(JumpTableAssembler::kJumpTableSlotSize * (JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
num_wasm_functions /* jump table size */);
for (auto& function : module->functions) { for (auto& function : module->functions) {
estimate += kCodeSizeMultiplier * function.code.length(); estimate += kCodeSizeMultiplier * function.code.length();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment