Commit 04ce2f09 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Optimize runtime stub slots on x64 and ia32

This reduces the size per runtime stub slot by using the same sequence
we plan to use for far jumps.
Note that alignment is not an issue here, since runtime stub slots are
never patched.

R=mstarzinger@chromium.org

Bug: v8:9477
Change-Id: Ida73896bfc26d01f2a3fbccde785928d1ac92380
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1784291
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63564}
parent 8e317c95
......@@ -1777,6 +1777,13 @@ void Assembler::emit_mov(Register dst, Immediate64 value, int size) {
}
}
void Assembler::movq_imm64(Register dst, int64_t value) {
EnsureSpace ensure_space(this);
emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
emitq(static_cast<uint64_t>(value));
}
void Assembler::movq_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
emit_rex(dst, kInt64Size);
......
......@@ -513,12 +513,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movq_string(Register dst, const StringConstantBase* str);
// Loads a 64-bit immediate into a register.
// Loads a 64-bit immediate into a register, potentially using the constant
// pool.
void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
void movq(Register dst, uint64_t value) {
movq(dst, Immediate64(static_cast<int64_t>(value)));
}
// Loads a 64-bit immediate into a register without using the constant pool.
void movq_imm64(Register dst, int64_t value);
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, Operand src);
void movsxbq(Register dst, Register src);
......
......@@ -22,7 +22,9 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
movq_imm64(kScratchRegister, builtin_target); // 10 bytes
jmp(kScratchRegister); // 3 bytes
STATIC_ASSERT(kJumpTableStubSlotSize == 13);
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
......@@ -47,7 +49,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
jmp(builtin_target, RelocInfo::NONE);
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
......
......@@ -158,12 +158,12 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 5;
static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 18;
static constexpr int kJumpTableStubSlotSize = 13;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 5;
static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 5;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment