Commit 22a16bda authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[wasm] Fix return value of lazy compile runtime function

The Runtime_WasmCompileLazy function was returning a ptr-sized address,
wrapped in an Object. This worked because no GC is triggered between the
return from the runtime function and the point where we jump to the
returned address.

In a pointer-compressed world though, generated code assumes that all
objects live in the same 4GB heap, so comparisons only compare the lower
32 bit. On a 64-bit system, this can lead to collisions where a
comparison determines that the returned address equals a heap object,
even though the upper 32-bit differ.

This happens occasionally in the wild, where the returned function entry
pointer has the same lower half than the exception sentinel value. This
leads to triggering stack unwinding (by the CEntry stub), which then
fails (with a CHECK) because there is no pending exception.

This CL fixes that by returning a Smi instead which is the offset in the
jump table where the kWasmCompileLazy builtin should jump to. The
builtin then gets the jump table start address from the instance object,
adds the offset that the runtime function returned, and performs the
jump.

We do not include a regression test because this failure is very
spurious and hard to reproduce.

R=jkummerow@chromium.org

Bug: chromium:1311960
Change-Id: I5a72daf78905904f8ae8ade8630793c42e223984
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3663093
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80729}
parent 99642831
......@@ -2666,8 +2666,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(kWasmCompileLazyFuncIndexRegister,
kWasmCompileLazyFuncIndexRegister);
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
......@@ -2697,22 +2696,34 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
// Pass instance and function index as explicit arguments to the runtime
// Push the Wasm instance for loading the jump table address after the
// runtime call.
__ push(kWasmInstanceRegister);
// Push the Wasm instance again as an explicit argument to the runtime
// function.
__ push(kWasmInstanceRegister);
// Push the function index as second argument.
__ push(kWasmCompileLazyFuncIndexRegister);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// The entrypoint address is the return value.
__ mov(r8, kReturnRegister0);
// The runtime function returns the jump table slot offset as a Smi. Use
// that to compute the jump target in r8.
__ pop(kWasmInstanceRegister);
__ ldr(r8, MemOperand(
kWasmInstanceRegister,
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
__ add(r8, r8, Operand::SmiUntag(kReturnRegister0));
// r8 now holds the jump table slot where we want to jump to in the end.
// Restore registers.
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
__ ldm(ia_w, sp, gp_regs);
}
// Finally, jump to the entrypoint.
// Finally, jump to the jump table slot for the function.
__ Jump(r8);
}
......
......@@ -3056,41 +3056,50 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Sign extend and convert to Smi for the runtime call.
__ sxtw(kWasmCompileLazyFuncIndexRegister,
kWasmCompileLazyFuncIndexRegister.W());
__ SmiTag(kWasmCompileLazyFuncIndexRegister,
kWasmCompileLazyFuncIndexRegister);
UseScratchRegisterScope temps(masm);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
RegList gp_regs;
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
// Compute register lists for parameters to be saved. We save all parameter
// registers (see wasm-linkage.h). They might be overwritten in the runtime
// call below. We don't have any callee-saved registers in wasm, so no need to
// store anything else.
constexpr RegList kSavedGpRegs = ([]() constexpr {
RegList saved_gp_regs;
for (Register gp_param_reg : wasm::kGpParamRegisters) {
gp_regs.set(gp_param_reg);
saved_gp_regs.set(gp_param_reg);
}
// Also push x1, because we must push multiples of 16 bytes (see
// {TurboAssembler::PushCPURegList}.
CHECK_EQ(1, gp_regs.Count() % 2);
gp_regs.set(x1);
CHECK_EQ(0, gp_regs.Count() % 2);
saved_gp_regs.set(x1);
// All set registers were unique.
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
// We push a multiple of 16 bytes.
CHECK_EQ(0, saved_gp_regs.Count() % 2);
// The Wasm instance must be part of the saved registers.
CHECK(saved_gp_regs.has(kWasmInstanceRegister));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
saved_gp_regs.Count());
return saved_gp_regs;
})();
DoubleRegList fp_regs;
constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
DoubleRegList saved_fp_regs;
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
fp_regs.set(fp_param_reg);
saved_fp_regs.set(fp_param_reg);
}
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
gp_regs.Count());
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
fp_regs.Count());
saved_fp_regs.Count());
return saved_fp_regs;
})();
__ PushXRegList(gp_regs);
__ PushQRegList(fp_regs);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save registers that we need to keep alive across the runtime call.
__ PushXRegList(kSavedGpRegs);
__ PushQRegList(kSavedFpRegs);
// Pass instance and function index as explicit arguments to the runtime
// function.
......@@ -3100,17 +3109,23 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Mov(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// Exclude x17 from the scope, there are hardcoded uses of it below.
temps.Exclude(x17);
// The entrypoint address is the return value.
__ Mov(x17, kReturnRegister0);
// Untag the returned Smi into into x17, for later use.
static_assert(!kSavedGpRegs.has(x17));
__ SmiUntag(x17, kReturnRegister0);
// Restore registers.
__ PopQRegList(fp_regs);
__ PopXRegList(gp_regs);
__ PopQRegList(kSavedFpRegs);
__ PopXRegList(kSavedGpRegs);
}
// Finally, jump to the entrypoint.
// The runtime function returned the jump table slot offset as a Smi (now in
// x17). Use that to compute the jump target.
static_assert(!kSavedGpRegs.has(x18));
__ ldr(x18, MemOperand(
kWasmInstanceRegister,
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
__ add(x17, x18, Operand(x17));
// Finally, jump to the jump table slot for the function.
__ Jump(x17);
}
......
......@@ -2933,20 +2933,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
// Push the Wasm instance as an explicit argument to WasmCompileLazy.
// Push the Wasm instance for loading the jump table address after the
// runtime call.
__ Push(kWasmInstanceRegister);
// Push the Wasm instance again as an explicit argument to the runtime
// function.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(kWasmCompileLazyFuncIndexRegister);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::zero());
{
// At this point, ebx has been spilled to the stack but is not yet
// overwritten with another value. We can still use it as kRootRegister.
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
}
// The entrypoint address is the return value.
__ mov(edi, kReturnRegister0);
// The runtime function returns the jump table slot offset as a Smi. Use
// that to compute the jump target in edi.
__ Pop(kWasmInstanceRegister);
__ mov(edi, MemOperand(kWasmInstanceRegister,
WasmInstanceObject::kJumpTableStartOffset -
kHeapObjectTag));
__ SmiUntag(kReturnRegister0);
__ add(edi, kReturnRegister0);
// edi now holds the jump table slot where we want to jump to in the end.
// Restore registers.
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
......@@ -2959,7 +2967,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(reg);
}
}
// Finally, jump to the entrypoint.
// Finally, jump to the jump table slot for the function.
__ jmp(edi);
}
......
......@@ -2816,6 +2816,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(r15);
// Convert to Smi for the runtime call.
__ SmiTag(r15);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
......@@ -2839,7 +2840,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
// Push the Wasm instance as an explicit argument to WasmCompileLazy.
// Push the Wasm instance for loading the jump table address after the
// runtime call.
__ Push(kWasmInstanceRegister);
// Push the Wasm instance again as an explicit argument to the runtime
// function.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(r15);
......@@ -2847,8 +2853,15 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// set the current context on the isolate.
__ Move(kContextRegister, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// The entrypoint address is the return value.
__ movq(r15, kReturnRegister0);
// The runtime function returns the jump table slot offset as a Smi. Use
// that to compute the jump target in r15.
__ Pop(kWasmInstanceRegister);
__ movq(r15, MemOperand(kWasmInstanceRegister,
wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kJumpTableStartOffset)));
__ SmiUntag(kReturnRegister0);
__ addq(r15, kReturnRegister0);
// r15 now holds the jump table slot where we want to jump to in the end.
// Restore registers.
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
......@@ -2861,7 +2874,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(reg);
}
}
// Finally, jump to the entrypoint.
// Finally, jump to the jump table slot for the function.
__ jmp(r15);
}
......
......@@ -237,14 +237,11 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
isolate, instance->module_object().native_module(), func_index);
}
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
return ReadOnlyRoots{isolate}.exception();
}
Address entrypoint =
instance->module_object().native_module()->GetCallTargetForFunction(
func_index);
return Object(entrypoint);
auto* native_module = instance->module_object().native_module();
return Smi::FromInt(native_module->GetJumpTableOffset(func_index));
}
namespace {
......
......@@ -143,6 +143,8 @@ constexpr DoubleRegister kFpReturnRegisters[] = {};
// The parameter index where the instance parameter should be placed in wasm
// call descriptors. This is used by the Int64Lowering::LowerNode method.
constexpr int kWasmInstanceParameterIndex = 0;
static_assert(kWasmInstanceRegister ==
kGpParamRegisters[kWasmInstanceParameterIndex]);
class LinkageAllocator {
public:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment