Commit c2d46fe9 authored by Thibaud Michaud's avatar Thibaud Michaud Committed by V8 LUCI CQ

[wasm] Keep call_indirect index on the stack

When a call_indirect fails because of a signature mismatch or a null
target, the value stack generated for debug doesn't contain the target
index anymore, which makes it hard for users to understand the error.

Keep the index on the stack, and ensure that the index is not modified
until we generate the debug info. Previously, the index was shifted
in-place to compute various offsets. Instead, use scaled loads to
compute the offset directly in the load instruction.

R=clemensb@chromium.org

Bug: chromium:1350384
Change-Id: Iad5359ec80deef25a69ac119119a0b5ca559a336
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3854309Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82780}
parent e89d0061
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h" #include "src/base/platform/wrappers.h"
#include "src/base/v8-fallthrough.h" #include "src/base/v8-fallthrough.h"
#include "src/codegen/arm/register-arm.h" #include "src/codegen/arm/register-arm.h"
#include "src/common/globals.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h" #include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/baseline/liftoff-register.h" #include "src/wasm/baseline/liftoff-register.h"
...@@ -90,11 +91,17 @@ inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } ...@@ -90,11 +91,17 @@ inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline MemOperand GetMemOp(LiftoffAssembler* assm, inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr, UseScratchRegisterScope* temps, Register addr,
Register offset, int32_t offset_imm) { Register offset, int32_t offset_imm,
unsigned shift_amount = 0) {
if (offset != no_reg) { if (offset != no_reg) {
if (offset_imm == 0) return MemOperand(addr, offset); if (offset_imm == 0) return MemOperand(addr, offset, LSL, shift_amount);
Register tmp = temps->Acquire(); Register tmp = temps->Acquire();
assm->add(tmp, offset, Operand(offset_imm)); if (shift_amount == 0) {
assm->add(tmp, offset, Operand(offset_imm));
} else {
assm->lsl(tmp, offset, Operand(shift_amount));
assm->add(tmp, tmp, Operand(offset_imm));
}
return MemOperand(addr, tmp); return MemOperand(addr, tmp);
} }
return MemOperand(addr, offset_imm); return MemOperand(addr, offset_imm);
...@@ -644,12 +651,17 @@ namespace liftoff { ...@@ -644,12 +651,17 @@ namespace liftoff {
inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst, inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst,
Register src_addr, Register offset_reg, Register src_addr, Register offset_reg,
int32_t offset_imm, LoadType type, int32_t offset_imm, LoadType type,
uint32_t* protected_load_pc = nullptr) { uint32_t* protected_load_pc = nullptr,
bool needs_shift = false) {
unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
DCHECK_IMPLIES(type.value_type() == kWasmI64, dst.is_gp_pair()); DCHECK_IMPLIES(type.value_type() == kWasmI64, dst.is_gp_pair());
UseScratchRegisterScope temps(lasm); UseScratchRegisterScope temps(lasm);
if (type.value() == LoadType::kF64Load || if (type.value() == LoadType::kF64Load ||
type.value() == LoadType::kF32Load || type.value() == LoadType::kF32Load ||
type.value() == LoadType::kS128Load) { type.value() == LoadType::kS128Load) {
// Remove the DCHECK and implement scaled offsets for these types if needed.
// For now this path is never used.
DCHECK(!needs_shift);
Register actual_src_addr = liftoff::CalculateActualAddress( Register actual_src_addr = liftoff::CalculateActualAddress(
lasm, &temps, src_addr, offset_reg, offset_imm); lasm, &temps, src_addr, offset_reg, offset_imm);
if (type.value() == LoadType::kF64Load) { if (type.value() == LoadType::kF64Load) {
...@@ -671,8 +683,8 @@ inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst, ...@@ -671,8 +683,8 @@ inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst,
NeonMemOperand(actual_src_addr)); NeonMemOperand(actual_src_addr));
} }
} else { } else {
MemOperand src_op = MemOperand src_op = liftoff::GetMemOp(lasm, &temps, src_addr, offset_reg,
liftoff::GetMemOp(lasm, &temps, src_addr, offset_reg, offset_imm); offset_imm, shift_amount);
if (protected_load_pc) *protected_load_pc = __ pc_offset(); if (protected_load_pc) *protected_load_pc = __ pc_offset();
switch (type.value()) { switch (type.value()) {
case LoadType::kI32Load8U: case LoadType::kI32Load8U:
...@@ -737,10 +749,10 @@ inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst, ...@@ -737,10 +749,10 @@ inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst,
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, Register offset_reg,
int32_t offset_imm) { int32_t offset_imm, bool needs_shift) {
static_assert(kTaggedSize == kInt32Size); static_assert(kTaggedSize == kInt32Size);
liftoff::LoadInternal(this, LiftoffRegister(dst), src_addr, offset_reg, liftoff::LoadInternal(this, LiftoffRegister(dst), src_addr, offset_reg,
offset_imm, LoadType::kI32Load); offset_imm, LoadType::kI32Load, nullptr, needs_shift);
} }
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr, void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
...@@ -793,12 +805,13 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, ...@@ -793,12 +805,13 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, uint32_t* protected_load_pc, LoadType type, uint32_t* protected_load_pc,
bool /* is_load_mem */, bool /* i64_offset */) { bool /* is_load_mem */, bool /* i64_offset */,
bool needs_shift) {
// Offsets >=2GB are statically OOB on 32-bit systems. // Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max()); DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
liftoff::LoadInternal(this, dst, src_addr, offset_reg, liftoff::LoadInternal(this, dst, src_addr, offset_reg,
static_cast<int32_t>(offset_imm), type, static_cast<int32_t>(offset_imm), type,
protected_load_pc); protected_load_pc, needs_shift);
} }
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
......
...@@ -131,15 +131,16 @@ template <typename T> ...@@ -131,15 +131,16 @@ template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm, inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr, UseScratchRegisterScope* temps, Register addr,
Register offset, T offset_imm, Register offset, T offset_imm,
bool i64_offset = false) { bool i64_offset = false, unsigned shift_amount = 0) {
if (!offset.is_valid()) return MemOperand(addr.X(), offset_imm); if (!offset.is_valid()) return MemOperand(addr.X(), offset_imm);
Register effective_addr = addr.X(); Register effective_addr = addr.X();
if (offset_imm) { if (offset_imm) {
effective_addr = temps->AcquireX(); effective_addr = temps->AcquireX();
assm->Add(effective_addr, addr.X(), offset_imm); assm->Add(effective_addr, addr.X(), offset_imm);
} }
return i64_offset ? MemOperand(effective_addr, offset.X()) return i64_offset
: MemOperand(effective_addr, offset.W(), UXTW); ? MemOperand(effective_addr, offset.X(), LSL, shift_amount)
: MemOperand(effective_addr, offset.W(), UXTW, shift_amount);
} }
// Compute the effective address (sum of |addr|, |offset| (if given) and // Compute the effective address (sum of |addr|, |offset| (if given) and
...@@ -470,10 +471,11 @@ void LiftoffAssembler::ResetOSRTarget() {} ...@@ -470,10 +471,11 @@ void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, Register offset_reg,
int32_t offset_imm) { int32_t offset_imm, bool needs_shift) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
MemOperand src_op = unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm); MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
offset_imm, false, shift_amount);
LoadTaggedPointerField(dst, src_op); LoadTaggedPointerField(dst, src_op);
} }
...@@ -527,10 +529,12 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, ...@@ -527,10 +529,12 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm, Register offset_reg, uintptr_t offset_imm,
LoadType type, uint32_t* protected_load_pc, LoadType type, uint32_t* protected_load_pc,
bool /* is_load_mem */, bool i64_offset) { bool /* is_load_mem */, bool i64_offset,
bool needs_shift) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg, MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
offset_imm, i64_offset); offset_imm, i64_offset, shift_amount);
if (protected_load_pc) *protected_load_pc = pc_offset(); if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) { switch (type.value()) {
case LoadType::kI32Load8U: case LoadType::kI32Load8U:
......
...@@ -385,11 +385,12 @@ void LiftoffAssembler::ResetOSRTarget() {} ...@@ -385,11 +385,12 @@ void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, Register offset_reg,
int32_t offset_imm) { int32_t offset_imm, bool needs_shift) {
DCHECK_GE(offset_imm, 0); DCHECK_GE(offset_imm, 0);
static_assert(kTaggedSize == kInt32Size); static_assert(kTaggedSize == kInt32Size);
Load(LiftoffRegister(dst), src_addr, offset_reg, Load(LiftoffRegister(dst), src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), LoadType::kI32Load); static_cast<uint32_t>(offset_imm), LoadType::kI32Load, nullptr, false,
false, needs_shift);
} }
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr, void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
...@@ -434,13 +435,17 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, ...@@ -434,13 +435,17 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, uint32_t* protected_load_pc, LoadType type, uint32_t* protected_load_pc,
bool /* is_load_mem */, bool i64_offset) { bool /* is_load_mem */, bool i64_offset,
bool needs_shift) {
// Offsets >=2GB are statically OOB on 32-bit systems. // Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max()); DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair()); DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());
Operand src_op = offset_reg == no_reg static_assert(times_4 == 2);
? Operand(src_addr, offset_imm) ScaleFactor scale_factor =
: Operand(src_addr, offset_reg, times_1, offset_imm); !needs_shift ? times_1 : static_cast<ScaleFactor>(type.size_log_2());
Operand src_op = offset_reg == no_reg ? Operand(src_addr, offset_imm)
: Operand(src_addr, offset_reg,
scale_factor, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset(); if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) { switch (type.value()) {
......
...@@ -774,7 +774,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -774,7 +774,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void SpillInstance(Register instance); inline void SpillInstance(Register instance);
inline void ResetOSRTarget(); inline void ResetOSRTarget();
inline void LoadTaggedPointer(Register dst, Register src_addr, inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm); Register offset_reg, int32_t offset_imm,
bool offset_reg_needs_shift = false);
inline void LoadFullPointer(Register dst, Register src_addr, inline void LoadFullPointer(Register dst, Register src_addr,
int32_t offset_imm); int32_t offset_imm);
enum SkipWriteBarrier : bool { enum SkipWriteBarrier : bool {
...@@ -808,7 +809,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -808,7 +809,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uintptr_t offset_imm, LoadType type, uintptr_t offset_imm, LoadType type,
uint32_t* protected_load_pc = nullptr, uint32_t* protected_load_pc = nullptr,
bool is_load_mem = false, bool i64_offset = false); bool is_load_mem = false, bool i64_offset = false,
bool needs_shift = false);
inline void Store(Register dst_addr, Register offset_reg, inline void Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src, StoreType type, uintptr_t offset_imm, LiftoffRegister src, StoreType type,
LiftoffRegList pinned, LiftoffRegList pinned,
......
...@@ -7133,8 +7133,7 @@ class LiftoffCompiler { ...@@ -7133,8 +7133,7 @@ class LiftoffCompiler {
if (!CheckSupportedType(decoder, ret, "return")) return; if (!CheckSupportedType(decoder, ret, "return")) return;
} }
// Pop the index. We'll modify the register's contents later. Register index = __ PeekToRegister(0, {}).gp();
Register index = __ PopToModifiableRegister().gp();
LiftoffRegList pinned{index}; LiftoffRegList pinned{index};
// Get all temporary registers unconditionally up front. // Get all temporary registers unconditionally up front.
...@@ -7186,10 +7185,9 @@ class LiftoffCompiler { ...@@ -7186,10 +7185,9 @@ class LiftoffCompiler {
WasmIndirectFunctionTable::kSigIdsOffset), WasmIndirectFunctionTable::kSigIdsOffset),
kPointerLoadType); kPointerLoadType);
} }
// Shift {index} by 2 (multiply by 4) to represent kInt32Size items.
static_assert((1 << 2) == kInt32Size); static_assert((1 << 2) == kInt32Size);
__ emit_i32_shli(index, index, 2); __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
__ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load); nullptr, false, false, true);
// Compare against expected signature. // Compare against expected signature.
if (v8_flags.wasm_type_canonicalization) { if (v8_flags.wasm_type_canonicalization) {
...@@ -7207,19 +7205,14 @@ class LiftoffCompiler { ...@@ -7207,19 +7205,14 @@ class LiftoffCompiler {
Label* sig_mismatch_label = Label* sig_mismatch_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch); AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
__ DropValues(1);
{ {
FREEZE_STATE(trapping); FREEZE_STATE(trapping);
__ emit_cond_jump(kUnequal, sig_mismatch_label, kPointerKind, scratch, __ emit_cond_jump(kUnequal, sig_mismatch_label, kPointerKind, scratch,
tmp_const, trapping); tmp_const, trapping);
} }
// At this point {index} has already been multiplied by 4.
CODE_COMMENT("Execute indirect call"); CODE_COMMENT("Execute indirect call");
if (kTaggedSize != kInt32Size) {
DCHECK_EQ(kTaggedSize, kInt32Size * 2);
// Multiply {index} by another 2 to represent kTaggedSize items.
__ emit_i32_add(index, index, index);
}
// At this point {index} has already been multiplied by kTaggedSize. // At this point {index} has already been multiplied by kTaggedSize.
// Load the instance from {instance->ift_instances[key]} // Load the instance from {instance->ift_instances[key]}
...@@ -7231,14 +7224,8 @@ class LiftoffCompiler { ...@@ -7231,14 +7224,8 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset)); wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset));
} }
__ LoadTaggedPointer(tmp_const, table, index, __ LoadTaggedPointer(tmp_const, table, index,
ObjectAccess::ElementOffsetInTaggedFixedArray(0)); ObjectAccess::ElementOffsetInTaggedFixedArray(0),
true);
if (kTaggedSize != kSystemPointerSize) {
DCHECK_EQ(kSystemPointerSize, kTaggedSize * 2);
// Multiply {index} by another 2 to represent kSystemPointerSize items.
__ emit_i32_add(index, index, index);
}
// At this point {index} has already been multiplied by kSystemPointerSize.
Register* explicit_instance = &tmp_const; Register* explicit_instance = &tmp_const;
...@@ -7252,7 +7239,8 @@ class LiftoffCompiler { ...@@ -7252,7 +7239,8 @@ class LiftoffCompiler {
WasmIndirectFunctionTable::kTargetsOffset), WasmIndirectFunctionTable::kTargetsOffset),
kPointerLoadType); kPointerLoadType);
} }
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType); __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
nullptr, false, false, true);
auto call_descriptor = auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig); compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
......
...@@ -81,18 +81,19 @@ constexpr Operand kInstanceOperand = GetStackSlot(kInstanceOffset); ...@@ -81,18 +81,19 @@ constexpr Operand kInstanceOperand = GetStackSlot(kInstanceOffset);
constexpr Operand kOSRTargetSlot = GetStackSlot(kOSRTargetOffset); constexpr Operand kOSRTargetSlot = GetStackSlot(kOSRTargetOffset);
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, inline Operand GetMemOp(LiftoffAssembler* assm, Register addr,
Register offset_reg, uintptr_t offset_imm) { Register offset_reg, uintptr_t offset_imm,
ScaleFactor scale_factor = times_1) {
if (is_uint31(offset_imm)) { if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm); int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
return offset_reg == no_reg return offset_reg == no_reg
? Operand(addr, offset_imm32) ? Operand(addr, offset_imm32)
: Operand(addr, offset_reg, times_1, offset_imm32); : Operand(addr, offset_reg, scale_factor, offset_imm32);
} }
// Offset immediate does not fit in 31 bits. // Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister; Register scratch = kScratchRegister;
assm->TurboAssembler::Move(scratch, offset_imm); assm->TurboAssembler::Move(scratch, offset_imm);
if (offset_reg != no_reg) assm->addq(scratch, offset_reg); if (offset_reg != no_reg) assm->addq(scratch, offset_reg);
return Operand(addr, scratch, times_1, 0); return Operand(addr, scratch, scale_factor, 0);
} }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
...@@ -381,11 +382,15 @@ void LiftoffAssembler::ResetOSRTarget() { ...@@ -381,11 +382,15 @@ void LiftoffAssembler::ResetOSRTarget() {
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, Register offset_reg,
int32_t offset_imm) { int32_t offset_imm, bool needs_shift) {
DCHECK_GE(offset_imm, 0); DCHECK_GE(offset_imm, 0);
if (offset_reg != no_reg) AssertZeroExtended(offset_reg); if (offset_reg != no_reg) AssertZeroExtended(offset_reg);
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, ScaleFactor scale_factor = !needs_shift ? times_1
static_cast<uint32_t>(offset_imm)); : COMPRESS_POINTERS_BOOL ? times_4
: times_8;
Operand src_op =
liftoff::GetMemOp(this, src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), scale_factor);
LoadTaggedPointerField(dst, src_op); LoadTaggedPointerField(dst, src_op);
} }
...@@ -440,9 +445,14 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, ...@@ -440,9 +445,14 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm, Register offset_reg, uintptr_t offset_imm,
LoadType type, uint32_t* protected_load_pc, LoadType type, uint32_t* protected_load_pc,
bool /* is_load_mem */, bool i64_offset) { bool /* is_load_mem */, bool i64_offset,
bool needs_shift) {
if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg); if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); static_assert(times_4 == 2);
ScaleFactor scale_factor =
needs_shift ? static_cast<ScaleFactor>(type.size_log_2()) : times_1;
Operand src_op =
liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, scale_factor);
if (protected_load_pc) *protected_load_pc = pc_offset(); if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) { switch (type.value()) {
case LoadType::kI32Load8U: case LoadType::kI32Load8U:
......
...@@ -355,8 +355,9 @@ TEST(Liftoff_debug_side_table_indirect_call) { ...@@ -355,8 +355,9 @@ TEST(Liftoff_debug_side_table_indirect_call) {
constexpr int kConst = 47; constexpr int kConst = 47;
auto debug_side_table = env.GenerateDebugSideTable( auto debug_side_table = env.GenerateDebugSideTable(
{kWasmI32}, {kWasmI32}, {kWasmI32}, {kWasmI32},
{WASM_I32_ADD(WASM_CALL_INDIRECT(0, WASM_I32V_1(47), WASM_LOCAL_GET(0)), {WASM_I32_ADD(
WASM_LOCAL_GET(0))}); WASM_CALL_INDIRECT(0, WASM_I32V_1(kConst), WASM_LOCAL_GET(0)),
WASM_LOCAL_GET(0))});
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, local in register. // function entry, local in register.
...@@ -365,10 +366,11 @@ TEST(Liftoff_debug_side_table_indirect_call) { ...@@ -365,10 +366,11 @@ TEST(Liftoff_debug_side_table_indirect_call) {
{1, {Stack(0, kWasmI32)}}, {1, {Stack(0, kWasmI32)}},
// OOL stack check, local still spilled. // OOL stack check, local still spilled.
{1, {}}, {1, {}},
// OOL trap (invalid index), local still spilled, stack has {kConst}. // OOL trap (invalid index), local still spilled, stack has {kConst,
{2, {Constant(1, kWasmI32, kConst)}}, // kStack}.
{3, {Constant(1, kWasmI32, kConst), Stack(2, kWasmI32)}},
// OOL trap (sig mismatch), stack unmodified. // OOL trap (sig mismatch), stack unmodified.
{2, {}}, {3, {}},
}, },
debug_side_table.get()); debug_side_table.get());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment