Commit cb631803 authored by Zhao Jiazhong's avatar Zhao Jiazhong Committed by Commit Bot

[mips] Allow concurrent patching of the jump table.

Bug: v8:8974
Change-Id: Ib1e1c84b79190359d5ad519509b881e93d519604
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1989323
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Auto-Submit: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65697}
parent b05c5896
...@@ -4019,6 +4019,25 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { ...@@ -4019,6 +4019,25 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index); Call(builtin_index);
} }
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips32r6) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, ra);
bal(1); // jump to lw
nop(); // in the delay slot
lw(t9, MemOperand(ra, kInstrSize * 3)); // ra == pc_
jr(t9);
mov(ra, scratch); // in delay slot
DCHECK_EQ(reinterpret_cast<uint32_t>(pc_) % 8, 0);
*reinterpret_cast<uint32_t*>(pc_) = target;
pc_ += sizeof(uint32_t);
} else {
// TODO(mips r6): Implement.
UNIMPLEMENTED();
}
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) { void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions // This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed. // once an exit frame has been constructed.
......
...@@ -198,6 +198,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -198,6 +198,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target, const Operand& offset, COND_ARGS); void Jump(Register target, const Operand& offset, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
// Deffer from li, this method save target to the memory, and then load
// it to register use lw, it can be used in wasm jump table for concurrent
// patching.
void PatchAndJump(Address target);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Jump(const ExternalReference& reference) override; void Jump(const ExternalReference& reference) override;
void Call(Register target, int16_t offset = 0, COND_ARGS); void Call(Register target, int16_t offset = 0, COND_ARGS);
......
...@@ -4302,6 +4302,25 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { ...@@ -4302,6 +4302,25 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index); Call(builtin_index);
} }
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips64r6) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, ra);
bal(1); // jump to ld
nop(); // in the delay slot
ld(t9, MemOperand(ra, kInstrSize * 3)); // ra == pc_
jr(t9);
mov(ra, scratch); // in delay slot
DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
*reinterpret_cast<uint64_t*>(pc_) = target; // pc_ should be align.
pc_ += sizeof(uint64_t);
} else {
// TODO(mips r6): Implement.
UNIMPLEMENTED();
}
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) { void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions // This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed. // once an exit frame has been constructed.
......
...@@ -222,6 +222,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -222,6 +222,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target, COND_ARGS); void Jump(Register target, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
// Deffer from li, this method save target to the memory, and then load
// it to register use ld, it can be used in wasm jump table for concurrent
// patching.
void PatchAndJump(Address target);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Jump(const ExternalReference& reference) override; void Jump(const ExternalReference& reference) override;
void Call(Register target, COND_ARGS); void Call(Register target, COND_ARGS);
......
...@@ -235,7 +235,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ...@@ -235,7 +235,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
} }
bool JumpTableAssembler::EmitJumpSlot(Address target) { bool JumpTableAssembler::EmitJumpSlot(Address target) {
Jump(target, RelocInfo::NONE); PatchAndJump(target);
return true; return true;
} }
......
...@@ -202,13 +202,13 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { ...@@ -202,13 +202,13 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kFarJumpTableSlotSize = 7 * kInstrSize; static constexpr int kFarJumpTableSlotSize = 7 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize; static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 4 * kInstrSize; static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize; static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize; static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 6 * kInstrSize; static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize; static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
#else #else
......
...@@ -329,12 +329,6 @@ ...@@ -329,12 +329,6 @@
'test-run-wasm-atomics64/*': [SKIP], 'test-run-wasm-atomics64/*': [SKIP],
}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64' }], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
##############################################################################
['arch == mips64el or arch == mipsel', {
# TODO(mips-team): support concurrent patching of jump table.
'test-jump-table-assembler/*': [SKIP],
}], # 'arch == mips64el or arch == mipsel'
############################################################################## ##############################################################################
['mips_arch_variant == r6', { ['mips_arch_variant == r6', {
# For MIPS[64] architecture release 6, fusion multiply-accumulate instructions # For MIPS[64] architecture release 6, fusion multiply-accumulate instructions
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment