Commit 96a0677a authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[Liftoff] Use ValueKind instead of ValueType

The precise type is only used for validation. For code generation,
knowing the kind is more than enough. Hence, only store and pass the
ValueKind in Liftoff, and not the full ValueType.

R=manoskouk@chromium.org

Bug: v8:11477
Change-Id: Ia42c0fa419f75b508bd2f210c767b631e93d3398
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2707170
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarManos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72997}
parent 23fa9ffd
...@@ -293,14 +293,14 @@ inline void F64x2Compare(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -293,14 +293,14 @@ inline void F64x2Compare(LiftoffAssembler* assm, LiftoffRegister dst,
} }
inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst, inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
ValueType type) { ValueKind kind) {
#ifdef DEBUG #ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the // The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack // provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed. // frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{assm}.CanAcquire()); DCHECK(UseScratchRegisterScope{assm}.CanAcquire());
#endif #endif
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kOptRef: case kOptRef:
case kRef: case kRef:
...@@ -334,8 +334,8 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst, ...@@ -334,8 +334,8 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
} }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) { ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kOptRef: case kOptRef:
case kRef: case kRef:
...@@ -534,17 +534,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() { ...@@ -534,17 +534,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset; return liftoff::kInstanceOffset;
} }
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kS128: case kS128:
return type.element_size_bytes(); return element_size_bytes(kind);
default: default:
return kStackSlotSize; return kStackSlotSize;
} }
} }
bool LiftoffAssembler::NeedsAlignment(ValueType type) { bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return (type.kind() == kS128 || type.is_reference_type()); return kind == kS128 || is_reference_type(kind);
} }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
...@@ -1081,7 +1081,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, ...@@ -1081,7 +1081,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
} }
ParallelRegisterMove( ParallelRegisterMove(
{{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}}); {{dst, LiftoffRegister::ForPair(dst_low, dst_high), kI64}});
} }
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
...@@ -1203,11 +1203,10 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm, ...@@ -1203,11 +1203,10 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ ParallelRegisterMove( __ ParallelRegisterMove(
{{LiftoffRegister::ForPair(new_value_low, new_value_high), new_value, {{LiftoffRegister::ForPair(new_value_low, new_value_high), new_value,
kWasmI64}, kI64},
{LiftoffRegister::ForPair(expected_low, expected_high), expected, {LiftoffRegister::ForPair(expected_low, expected_high), expected, kI64},
kWasmI64}, {dst_addr, dst_addr_reg, kI32},
{dst_addr, dst_addr_reg, kWasmI32}, {offset, offset_reg != no_reg ? offset_reg : offset, kI32}});
{offset, offset_reg != no_reg ? offset_reg : offset, kWasmI32}});
{ {
UseScratchRegisterScope temps(lasm); UseScratchRegisterScope temps(lasm);
...@@ -1235,7 +1234,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm, ...@@ -1235,7 +1234,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ bind(&done); __ bind(&done);
__ ParallelRegisterMove( __ ParallelRegisterMove(
{{result, LiftoffRegister::ForPair(result_low, result_high), kWasmI64}}); {{result, LiftoffRegister::ForPair(result_low, result_high), kI64}});
} }
#undef __ #undef __
} // namespace liftoff } // namespace liftoff
...@@ -1346,52 +1345,52 @@ void LiftoffAssembler::AtomicFence() { dmb(ISH); } ...@@ -1346,52 +1345,52 @@ void LiftoffAssembler::AtomicFence() { dmb(ISH); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
MemOperand src(fp, (caller_slot_idx + 1) * kSystemPointerSize); MemOperand src(fp, (caller_slot_idx + 1) * kSystemPointerSize);
liftoff::Load(this, dst, src, type); liftoff::Load(this, dst, src, kind);
} }
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
MemOperand dst(fp, (caller_slot_idx + 1) * kSystemPointerSize); MemOperand dst(fp, (caller_slot_idx + 1) * kSystemPointerSize);
liftoff::Store(this, src, dst, type); liftoff::Store(this, src, dst, kind);
} }
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset, void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
ValueType type) { ValueKind kind) {
MemOperand src(sp, offset); MemOperand src(sp, offset);
liftoff::Load(this, dst, src, type); liftoff::Load(this, dst, src, kind);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueKind kind) {
DCHECK_NE(dst_offset, src_offset); DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
Fill(reg, src_offset, type); Fill(reg, src_offset, kind);
Spill(dst_offset, reg, type); Spill(dst_offset, reg, kind);
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
DCHECK(type == kWasmI32 || type.is_reference_type()); DCHECK(kind == kI32 || is_reference_type(kind));
TurboAssembler::Move(dst, src); TurboAssembler::Move(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (kind == kF32) {
vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src)); vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
} else if (type == kWasmF64) { } else if (kind == kF64) {
vmov(dst, src); vmov(dst, src);
} else { } else {
DCHECK_EQ(kWasmS128, type); DCHECK_EQ(kS128, kind);
vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src)); vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
} }
} }
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
// The {str} instruction needs a temp register when the immediate in the // The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack // provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed. // frames. This DCHECK checks that the temp register is available when needed.
...@@ -1399,7 +1398,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ...@@ -1399,7 +1398,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
DCHECK_LT(0, offset); DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset); MemOperand dst(fp, -offset);
liftoff::Store(this, reg, dst, type); liftoff::Store(this, reg, dst, kind);
} }
void LiftoffAssembler::Spill(int offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
...@@ -1434,8 +1433,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { ...@@ -1434,8 +1433,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type); liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
} }
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) { void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
...@@ -2186,16 +2185,16 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); } ...@@ -2186,16 +2185,16 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); }
void LiftoffAssembler::emit_jump(Register target) { bx(target); } void LiftoffAssembler::emit_jump(Register target) { bx(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type, Label* label, ValueKind kind,
Register lhs, Register rhs) { Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond); Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) { if (rhs == no_reg) {
DCHECK_EQ(type, kWasmI32); DCHECK_EQ(kind, kI32);
cmp(lhs, Operand(0)); cmp(lhs, Operand(0));
} else { } else {
DCHECK(type == kWasmI32 || DCHECK(kind == kI32 ||
(type.is_reference_type() && (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal))); (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs); cmp(lhs, rhs);
} }
...@@ -2304,7 +2303,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond, ...@@ -2304,7 +2303,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value, LiftoffRegister true_value,
LiftoffRegister false_value, LiftoffRegister false_value,
ValueType type) { ValueKind kind) {
return false; return false;
} }
...@@ -4114,10 +4113,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -4114,10 +4113,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret(); Ret();
} }
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
const LiftoffRegister* rets, const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes, ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) { ExternalReference ext_ref) {
// Arguments are passed by pushing them all to the stack and then passing // Arguments are passed by pushing them all to the stack and then passing
// a pointer to them. // a pointer to them.
...@@ -4126,8 +4125,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -4126,8 +4125,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
AllocateStackSpace(stack_bytes); AllocateStackSpace(stack_bytes);
int arg_bytes = 0; int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueKind param_kind : sig->parameters()) {
switch (param_type.kind()) { switch (param_kind) {
case kI32: case kI32:
str(args->gp(), MemOperand(sp, arg_bytes)); str(args->gp(), MemOperand(sp, arg_bytes));
break; break;
...@@ -4150,7 +4149,7 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -4150,7 +4149,7 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
UNREACHABLE(); UNREACHABLE();
} }
args++; args++;
arg_bytes += param_type.element_size_bytes(); arg_bytes += element_size_bytes(param_kind);
} }
DCHECK_LE(arg_bytes, stack_bytes); DCHECK_LE(arg_bytes, stack_bytes);
...@@ -4174,8 +4173,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -4174,8 +4173,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
} }
// Load potential output value from the buffer on the stack. // Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) { if (out_argument_kind != kStmt) {
switch (out_argument_type.kind()) { switch (out_argument_kind) {
case kI32: case kI32:
ldr(result_reg->gp(), MemOperand(sp)); ldr(result_reg->gp(), MemOperand(sp));
break; break;
...@@ -4208,7 +4207,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { ...@@ -4208,7 +4207,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL); Jump(addr, RelocInfo::WASM_CALL);
} }
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target) { Register target) {
DCHECK(target != no_reg); DCHECK(target != no_reg);
...@@ -4240,7 +4239,7 @@ void LiftoffStackSlots::Construct() { ...@@ -4240,7 +4239,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: { case LiftoffAssembler::VarState::kStack: {
switch (src.type().kind()) { switch (src.kind()) {
// i32 and i64 can be treated as similar cases, i64 being previously // i32 and i64 can be treated as similar cases, i64 being previously
// split into two i32 registers // split into two i32 registers
case kI32: case kI32:
...@@ -4276,7 +4275,7 @@ void LiftoffStackSlots::Construct() { ...@@ -4276,7 +4275,7 @@ void LiftoffStackSlots::Construct() {
break; break;
} }
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
switch (src.type().kind()) { switch (src.kind()) {
case kI64: { case kI64: {
LiftoffRegister reg = LiftoffRegister reg =
slot.half_ == kLowWord ? src.reg().low() : src.reg().high(); slot.half_ == kLowWord ? src.reg().low() : src.reg().high();
...@@ -4301,7 +4300,7 @@ void LiftoffStackSlots::Construct() { ...@@ -4301,7 +4300,7 @@ void LiftoffStackSlots::Construct() {
} }
break; break;
case LiftoffAssembler::VarState::kIntConst: { case LiftoffAssembler::VarState::kIntConst: {
DCHECK(src.type() == kWasmI32 || src.type() == kWasmI64); DCHECK(src.kind() == kI32 || src.kind() == kI64);
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
// The high word is the sign extension of the low word. // The high word is the sign extension of the low word.
......
...@@ -72,8 +72,8 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); } ...@@ -72,8 +72,8 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) { inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
return reg.gp().W(); return reg.gp().W();
case kI64: case kI64:
...@@ -104,8 +104,8 @@ inline CPURegList PadVRegList(RegList list) { ...@@ -104,8 +104,8 @@ inline CPURegList PadVRegList(RegList list) {
} }
inline CPURegister AcquireByType(UseScratchRegisterScope* temps, inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
ValueType type) { ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
return temps->AcquireW(); return temps->AcquireW();
case kI64: case kI64:
...@@ -357,19 +357,19 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() { ...@@ -357,19 +357,19 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset; return liftoff::kInstanceOffset;
} }
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do // TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take. // some performance testing to see how big an effect it will take.
switch (type.kind()) { switch (kind) {
case kS128: case kS128:
return type.element_size_bytes(); return element_size_bytes(kind);
default: default:
return kStackSlotSize; return kStackSlotSize;
} }
} }
bool LiftoffAssembler::NeedsAlignment(ValueType type) { bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return type.kind() == kS128 || type.is_reference_type(); return kind == kS128 || is_reference_type(kind);
} }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
...@@ -840,56 +840,56 @@ void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); } ...@@ -840,56 +840,56 @@ void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize; int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset)); Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(fp, offset));
} }
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize; int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
Str(liftoff::GetRegFromType(src, type), MemOperand(fp, offset)); Str(liftoff::GetRegFromType(src, kind), MemOperand(fp, offset));
} }
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset, void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
ValueType type) { ValueKind kind) {
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(sp, offset)); Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(sp, offset));
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueKind kind) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
CPURegister scratch = liftoff::AcquireByType(&temps, type); CPURegister scratch = liftoff::AcquireByType(&temps, kind);
Ldr(scratch, liftoff::GetStackSlot(src_offset)); Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_offset)); Str(scratch, liftoff::GetStackSlot(dst_offset));
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
if (type == kWasmI32) { if (kind == kI32) {
Mov(dst.W(), src.W()); Mov(dst.W(), src.W());
} else { } else {
DCHECK(kWasmI64 == type || type.is_reference_type()); DCHECK(kI64 == kind || is_reference_type(kind));
Mov(dst.X(), src.X()); Mov(dst.X(), src.X());
} }
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) { ValueKind kind) {
if (type == kWasmF32) { if (kind == kF32) {
Fmov(dst.S(), src.S()); Fmov(dst.S(), src.S());
} else if (type == kWasmF64) { } else if (kind == kF64) {
Fmov(dst.D(), src.D()); Fmov(dst.D(), src.D());
} else { } else {
DCHECK_EQ(kWasmS128, type); DCHECK_EQ(kS128, kind);
Mov(dst.Q(), src.Q()); Mov(dst.Q(), src.Q());
} }
} }
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst); Str(liftoff::GetRegFromType(reg, kind), dst);
} }
void LiftoffAssembler::Spill(int offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
...@@ -921,9 +921,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { ...@@ -921,9 +921,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Str(src, dst); Str(src, dst);
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset); MemOperand src = liftoff::GetStackSlot(offset);
Ldr(liftoff::GetRegFromType(reg, type), src); Ldr(liftoff::GetRegFromType(reg, kind), src);
} }
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
...@@ -1506,10 +1506,10 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); } ...@@ -1506,10 +1506,10 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); } void LiftoffAssembler::emit_jump(Register target) { Br(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type, Label* label, ValueKind kind,
Register lhs, Register rhs) { Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond); Condition cond = liftoff::ToCondition(liftoff_cond);
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
if (rhs.is_valid()) { if (rhs.is_valid()) {
Cmp(lhs.W(), rhs.W()); Cmp(lhs.W(), rhs.W());
...@@ -1598,7 +1598,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond, ...@@ -1598,7 +1598,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value, LiftoffRegister true_value,
LiftoffRegister false_value, LiftoffRegister false_value,
ValueType type) { ValueKind kind) {
return false; return false;
} }
...@@ -3124,10 +3124,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -3124,10 +3124,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret(); Ret();
} }
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
const LiftoffRegister* rets, const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes, ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) { ExternalReference ext_ref) {
// The stack pointer is required to be quadword aligned. // The stack pointer is required to be quadword aligned.
int total_size = RoundUp(stack_bytes, kQuadWordSizeInBytes); int total_size = RoundUp(stack_bytes, kQuadWordSizeInBytes);
...@@ -3135,9 +3135,9 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -3135,9 +3135,9 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
Claim(total_size, 1); Claim(total_size, 1);
int arg_bytes = 0; int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueKind param_kind : sig->parameters()) {
Poke(liftoff::GetRegFromType(*args++, param_type), arg_bytes); Poke(liftoff::GetRegFromType(*args++, param_kind), arg_bytes);
arg_bytes += param_type.element_size_bytes(); arg_bytes += element_size_bytes(param_kind);
} }
DCHECK_LE(arg_bytes, stack_bytes); DCHECK_LE(arg_bytes, stack_bytes);
...@@ -3160,8 +3160,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -3160,8 +3160,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
} }
// Load potential output value from the buffer on the stack. // Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) { if (out_argument_kind != kStmt) {
Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_type), 0); Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_kind), 0);
} }
Drop(total_size, 1); Drop(total_size, 1);
...@@ -3175,7 +3175,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { ...@@ -3175,7 +3175,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL); Jump(addr, RelocInfo::WASM_CALL);
} }
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target) { Register target) {
// For Arm64, we have more cache registers than wasm parameters. That means // For Arm64, we have more cache registers than wasm parameters. That means
...@@ -3217,34 +3217,34 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ...@@ -3217,34 +3217,34 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffStackSlots::Construct() { void LiftoffStackSlots::Construct() {
size_t num_slots = 0; size_t num_slots = 0;
for (auto& slot : slots_) { for (auto& slot : slots_) {
num_slots += slot.src_.type() == kWasmS128 ? 2 : 1; num_slots += slot.src_.kind() == kS128 ? 2 : 1;
} }
// The stack pointer is required to be quadword aligned. // The stack pointer is required to be quadword aligned.
asm_->Claim(RoundUp(num_slots, 2)); asm_->Claim(RoundUp(num_slots, 2));
size_t poke_offset = num_slots * kXRegSize; size_t poke_offset = num_slots * kXRegSize;
for (auto& slot : slots_) { for (auto& slot : slots_) {
poke_offset -= slot.src_.type() == kWasmS128 ? kXRegSize * 2 : kXRegSize; poke_offset -= slot.src_.kind() == kS128 ? kXRegSize * 2 : kXRegSize;
switch (slot.src_.loc()) { switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: { case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type()); CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.kind());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_)); asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Poke(scratch, poke_offset); asm_->Poke(scratch, poke_offset);
break; break;
} }
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()), asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.kind()),
poke_offset); poke_offset);
break; break;
case LiftoffAssembler::VarState::kIntConst: case LiftoffAssembler::VarState::kIntConst:
DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64); DCHECK(slot.src_.kind() == kI32 || slot.src_.kind() == kI64);
if (slot.src_.i32_const() == 0) { if (slot.src_.i32_const() == 0) {
Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr; Register zero_reg = slot.src_.kind() == kI32 ? wzr : xzr;
asm_->Poke(zero_reg, poke_offset); asm_->Poke(zero_reg, poke_offset);
} else { } else {
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
Register scratch = slot.src_.type() == kWasmI32 ? temps.AcquireW() Register scratch =
: temps.AcquireX(); slot.src_.kind() == kI32 ? temps.AcquireW() : temps.AcquireX();
asm_->Mov(scratch, int64_t{slot.src_.i32_const()}); asm_->Mov(scratch, int64_t{slot.src_.i32_const()});
asm_->Poke(scratch, poke_offset); asm_->Poke(scratch, poke_offset);
} }
......
...@@ -65,9 +65,9 @@ static constexpr LiftoffRegList kByteRegs = ...@@ -65,9 +65,9 @@ static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf(eax, ecx, edx)>(); LiftoffRegList::FromBits<Register::ListOf(eax, ecx, edx)>();
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
int32_t offset, ValueType type) { int32_t offset, ValueKind kind) {
Operand src(base, offset); Operand src(base, offset);
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kOptRef: case kOptRef:
case kRef: case kRef:
...@@ -94,9 +94,9 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base, ...@@ -94,9 +94,9 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
} }
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) { LiftoffRegister src, ValueKind kind) {
Operand dst(base, offset); Operand dst(base, offset);
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
assm->mov(dst, src.gp()); assm->mov(dst, src.gp());
break; break;
...@@ -118,8 +118,8 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ...@@ -118,8 +118,8 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
} }
} }
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kRef: case kRef:
case kOptRef: case kOptRef:
...@@ -261,13 +261,13 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() { ...@@ -261,13 +261,13 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset; return liftoff::kInstanceOffset;
} }
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
return type.is_reference_type() ? kSystemPointerSize return is_reference_type(kind) ? kSystemPointerSize
: type.element_size_bytes(); : element_size_bytes(kind);
} }
bool LiftoffAssembler::NeedsAlignment(ValueType type) { bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return type.is_reference_type(); return is_reference_type(kind);
} }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
...@@ -828,7 +828,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr, ...@@ -828,7 +828,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
__ SpillRegisters(old_hi, old_lo, new_hi, base, offset); __ SpillRegisters(old_hi, old_lo, new_hi, base, offset);
__ ParallelRegisterMove( __ ParallelRegisterMove(
{{LiftoffRegister::ForPair(base, offset), {{LiftoffRegister::ForPair(base, offset),
LiftoffRegister::ForPair(dst_addr, offset_reg), kWasmI64}}); LiftoffRegister::ForPair(dst_addr, offset_reg), kI64}});
Operand dst_op_lo = Operand(base, offset, times_1, offset_imm); Operand dst_op_lo = Operand(base, offset, times_1, offset_imm);
Operand dst_op_hi = Operand(base, offset, times_1, offset_imm + 4); Operand dst_op_hi = Operand(base, offset, times_1, offset_imm + 4);
...@@ -877,7 +877,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr, ...@@ -877,7 +877,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
// Move the result into the correct registers. // Move the result into the correct registers.
__ ParallelRegisterMove( __ ParallelRegisterMove(
{{result, LiftoffRegister::ForPair(old_lo, old_hi), kWasmI64}}); {{result, LiftoffRegister::ForPair(old_lo, old_hi), kI64}});
} }
#undef __ #undef __
...@@ -1065,9 +1065,9 @@ void LiftoffAssembler::AtomicCompareExchange( ...@@ -1065,9 +1065,9 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move all other values into the right register. // Move all other values into the right register.
ParallelRegisterMove( ParallelRegisterMove(
{{LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32}, {{LiftoffRegister(address), LiftoffRegister(dst_addr), kI32},
{LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64}, {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kI64},
{LiftoffRegister(new_hi), new_value.high(), kWasmI32}}); {LiftoffRegister(new_hi), new_value.high(), kI32}});
Operand dst_op = Operand(address, offset_imm); Operand dst_op = Operand(address, offset_imm);
...@@ -1079,33 +1079,33 @@ void LiftoffAssembler::AtomicCompareExchange( ...@@ -1079,33 +1079,33 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move the result into the correct registers. // Move the result into the correct registers.
ParallelRegisterMove( ParallelRegisterMove(
{{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}}); {{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kI64}});
} }
void LiftoffAssembler::AtomicFence() { mfence(); } void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1), liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
type); kind);
} }
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset, void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
ValueType type) { ValueKind kind) {
liftoff::Load(this, reg, esp, offset, type); liftoff::Load(this, reg, esp, offset, kind);
} }
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
liftoff::Store(this, ebp, kSystemPointerSize * (caller_slot_idx + 1), src, liftoff::Store(this, ebp, kSystemPointerSize * (caller_slot_idx + 1), src,
type); kind);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueKind kind) {
if (needs_gp_reg_pair(type)) { if (needs_gp_reg_pair(kind)) {
liftoff::MoveStackValue(this, liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_offset, kLowWord), liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_offset, kLowWord)); liftoff::GetHalfStackSlot(dst_offset, kLowWord));
...@@ -1118,29 +1118,29 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ...@@ -1118,29 +1118,29 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
} }
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
DCHECK(kWasmI32 == type || type.is_reference_type()); DCHECK(kI32 == kind || is_reference_type(kind));
mov(dst, src); mov(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (kind == kF32) {
movss(dst, src); movss(dst, src);
} else if (type == kWasmF64) { } else if (kind == kF64) {
movsd(dst, src); movsd(dst, src);
} else { } else {
DCHECK_EQ(kWasmS128, type); DCHECK_EQ(kS128, kind);
Movaps(dst, src); Movaps(dst, src);
} }
} }
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset); Operand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kOptRef: case kOptRef:
case kRef: case kRef:
...@@ -1186,8 +1186,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { ...@@ -1186,8 +1186,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
liftoff::Load(this, reg, ebp, -offset, type); liftoff::Load(this, reg, ebp, -offset, kind);
} }
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) { void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
...@@ -1517,7 +1517,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -1517,7 +1517,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
// If necessary, move result into the right registers. // If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high); LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64); if (tmp_result != dst) assm->Move(dst, tmp_result, kI64);
} }
template <void (Assembler::*op)(Register, const Immediate&), template <void (Assembler::*op)(Register, const Immediate&),
...@@ -1576,9 +1576,8 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -1576,9 +1576,8 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
SpillRegisters(dst_hi, dst_lo, lhs_hi, rhs_lo); SpillRegisters(dst_hi, dst_lo, lhs_hi, rhs_lo);
// Move lhs and rhs into the respective registers. // Move lhs and rhs into the respective registers.
ParallelRegisterMove( ParallelRegisterMove({{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kI64},
{{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64}, {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kI64}});
{LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}});
// First mul: lhs_hi' = lhs_hi * rhs_lo. // First mul: lhs_hi' = lhs_hi * rhs_lo.
imul(lhs_hi, rhs_lo); imul(lhs_hi, rhs_lo);
...@@ -1593,7 +1592,7 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -1593,7 +1592,7 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
// Finally, move back the temporary result to the actual dst register pair. // Finally, move back the temporary result to the actual dst register pair.
LiftoffRegister dst_tmp = LiftoffRegister::ForPair(dst_lo, dst_hi); LiftoffRegister dst_tmp = LiftoffRegister::ForPair(dst_lo, dst_hi);
if (dst != dst_tmp) Move(dst, dst_tmp, kWasmI64); if (dst != dst_tmp) Move(dst, dst_tmp, kI64);
} }
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
...@@ -1660,11 +1659,11 @@ inline void Emit64BitShiftOperation( ...@@ -1660,11 +1659,11 @@ inline void Emit64BitShiftOperation(
(assm->cache_state()->is_used(LiftoffRegister(ecx)) || (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
pinned.has(LiftoffRegister(ecx)))) { pinned.has(LiftoffRegister(ecx)))) {
ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp(); ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
reg_moves.emplace_back(ecx_replace, ecx, kWasmI32); reg_moves.emplace_back(ecx_replace, ecx, kI32);
} }
reg_moves.emplace_back(dst, src, kWasmI64); reg_moves.emplace_back(dst, src, kI64);
reg_moves.emplace_back(ecx, amount, kWasmI32); reg_moves.emplace_back(ecx, amount, kI32);
assm->ParallelRegisterMove(VectorOf(reg_moves)); assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift. // Do the actual shift.
...@@ -1689,7 +1688,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, ...@@ -1689,7 +1688,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shl(dst.high_gp(), amount - 32); if (amount != 32) shl(dst.high_gp(), amount - 32);
xor_(dst.low_gp(), dst.low_gp()); xor_(dst.low_gp(), dst.low_gp());
} else { } else {
if (dst != src) Move(dst, src, kWasmI64); if (dst != src) Move(dst, src, kI64);
ShlPair(dst.high_gp(), dst.low_gp(), amount); ShlPair(dst.high_gp(), dst.low_gp(), amount);
} }
} }
...@@ -1709,7 +1708,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, ...@@ -1709,7 +1708,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) sar(dst.low_gp(), amount - 32); if (amount != 32) sar(dst.low_gp(), amount - 32);
sar(dst.high_gp(), 31); sar(dst.high_gp(), 31);
} else { } else {
if (dst != src) Move(dst, src, kWasmI64); if (dst != src) Move(dst, src, kI64);
SarPair(dst.high_gp(), dst.low_gp(), amount); SarPair(dst.high_gp(), dst.low_gp(), amount);
} }
} }
...@@ -1727,7 +1726,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, ...@@ -1727,7 +1726,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shr(dst.low_gp(), amount - 32); if (amount != 32) shr(dst.low_gp(), amount - 32);
xor_(dst.high_gp(), dst.high_gp()); xor_(dst.high_gp(), dst.high_gp());
} else { } else {
if (dst != src) Move(dst, src, kWasmI64); if (dst != src) Move(dst, src, kI64);
ShrPair(dst.high_gp(), dst.low_gp(), amount); ShrPair(dst.high_gp(), dst.low_gp(), amount);
} }
} }
...@@ -2402,11 +2401,11 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); } ...@@ -2402,11 +2401,11 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); } void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type, Label* label, ValueKind kind,
Register lhs, Register rhs) { Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond); Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) { if (rhs != no_reg) {
switch (type.kind()) { switch (kind) {
case kRef: case kRef:
case kOptRef: case kOptRef:
case kRtt: case kRtt:
...@@ -2420,7 +2419,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ...@@ -2420,7 +2419,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
UNREACHABLE(); UNREACHABLE();
} }
} else { } else {
DCHECK_EQ(type, kWasmI32); DCHECK_EQ(kind, kI32);
test(lhs, lhs); test(lhs, lhs);
} }
...@@ -2572,7 +2571,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond, ...@@ -2572,7 +2571,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value, LiftoffRegister true_value,
LiftoffRegister false_value, LiftoffRegister false_value,
ValueType type) { ValueKind kind) {
return false; return false;
} }
...@@ -4880,17 +4879,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -4880,17 +4879,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize)); ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
} }
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
const LiftoffRegister* rets, const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes, ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) { ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes); AllocateStackSpace(stack_bytes);
int arg_bytes = 0; int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, esp, arg_bytes, *args++, param_type); liftoff::Store(this, esp, arg_bytes, *args++, param_kind);
arg_bytes += param_type.element_size_bytes(); arg_bytes += element_size_bytes(param_kind);
} }
DCHECK_LE(arg_bytes, stack_bytes); DCHECK_LE(arg_bytes, stack_bytes);
...@@ -4919,8 +4918,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -4919,8 +4918,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
} }
// Load potential output value from the buffer on the stack. // Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) { if (out_argument_kind != kStmt) {
liftoff::Load(this, *next_result_reg, esp, 0, out_argument_type); liftoff::Load(this, *next_result_reg, esp, 0, out_argument_kind);
} }
add(esp, Immediate(stack_bytes)); add(esp, Immediate(stack_bytes));
...@@ -4934,7 +4933,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { ...@@ -4934,7 +4933,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
jmp(addr, RelocInfo::WASM_CALL); jmp(addr, RelocInfo::WASM_CALL);
} }
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target) { Register target) {
// Since we have more cache registers than parameter registers, the // Since we have more cache registers than parameter registers, the
...@@ -4980,26 +4979,26 @@ void LiftoffStackSlots::Construct() { ...@@ -4980,26 +4979,26 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
// The combination of AllocateStackSpace and 2 movdqu is usually smaller // The combination of AllocateStackSpace and 2 movdqu is usually smaller
// in code size than doing 4 pushes. // in code size than doing 4 pushes.
if (src.type() == kWasmS128) { if (src.kind() == kS128) {
asm_->AllocateStackSpace(sizeof(double) * 2); asm_->AllocateStackSpace(sizeof(double) * 2);
asm_->movdqu(liftoff::kScratchDoubleReg, asm_->movdqu(liftoff::kScratchDoubleReg,
liftoff::GetStackSlot(slot.src_offset_)); liftoff::GetStackSlot(slot.src_offset_));
asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg); asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
break; break;
} }
if (src.type() == kWasmF64) { if (src.kind() == kF64) {
DCHECK_EQ(kLowWord, slot.half_); DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord)); asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
} }
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_)); asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) { if (src.kind() == kI64) {
liftoff::push( liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(), asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
kWasmI32); kI32);
} else { } else {
liftoff::push(asm_, src.reg(), src.type()); liftoff::push(asm_, src.reg(), src.kind());
} }
break; break;
case LiftoffAssembler::VarState::kIntConst: case LiftoffAssembler::VarState::kIntConst:
......
...@@ -23,17 +23,18 @@ namespace internal { ...@@ -23,17 +23,18 @@ namespace internal {
namespace wasm { namespace wasm {
using VarState = LiftoffAssembler::VarState; using VarState = LiftoffAssembler::VarState;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
constexpr ValueType LiftoffAssembler::kWasmIntPtr; constexpr ValueKind LiftoffAssembler::kIntPtr;
namespace { namespace {
class StackTransferRecipe { class StackTransferRecipe {
struct RegisterMove { struct RegisterMove {
LiftoffRegister src; LiftoffRegister src;
ValueType type; ValueKind kind;
constexpr RegisterMove(LiftoffRegister src, ValueType type) constexpr RegisterMove(LiftoffRegister src, ValueKind kind)
: src(src), type(type) {} : src(src), kind(kind) {}
}; };
struct RegisterLoad { struct RegisterLoad {
...@@ -45,35 +46,34 @@ class StackTransferRecipe { ...@@ -45,35 +46,34 @@ class StackTransferRecipe {
kHighHalfStack // fill a register from the high half of a stack slot. kHighHalfStack // fill a register from the high half of a stack slot.
}; };
LoadKind kind; LoadKind load_kind;
ValueType type; ValueKind kind;
int32_t value; // i32 constant value or stack offset, depending on kind. int32_t value; // i32 constant value or stack offset, depending on kind.
// Named constructors. // Named constructors.
static RegisterLoad Const(WasmValue constant) { static RegisterLoad Const(WasmValue constant) {
if (constant.type() == kWasmI32) { if (constant.type().kind() == kI32) {
return {kConstant, kWasmI32, constant.to_i32()}; return {kConstant, kI32, constant.to_i32()};
} }
DCHECK_EQ(kWasmI64, constant.type()); DCHECK_EQ(kI64, constant.type().kind());
int32_t i32_const = static_cast<int32_t>(constant.to_i64()); int32_t i32_const = static_cast<int32_t>(constant.to_i64());
DCHECK_EQ(constant.to_i64(), i32_const); DCHECK_EQ(constant.to_i64(), i32_const);
return {kConstant, kWasmI64, i32_const}; return {kConstant, kI64, i32_const};
} }
static RegisterLoad Stack(int32_t offset, ValueType type) { static RegisterLoad Stack(int32_t offset, ValueKind kind) {
return {kStack, type, offset}; return {kStack, kind, offset};
} }
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) { static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32, return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kI32, offset};
offset};
} }
static RegisterLoad Nop() { static RegisterLoad Nop() {
// ValueType does not matter. // ValueKind does not matter.
return {kNop, kWasmI32, 0}; return {kNop, kI32, 0};
} }
private: private:
RegisterLoad(LoadKind kind, ValueType type, int32_t value) RegisterLoad(LoadKind load_kind, ValueKind kind, int32_t value)
: kind(kind), type(type), value(value) {} : load_kind(load_kind), kind(kind), value(value) {}
}; };
public: public:
...@@ -92,12 +92,12 @@ class StackTransferRecipe { ...@@ -92,12 +92,12 @@ class StackTransferRecipe {
} }
#if DEBUG #if DEBUG
bool CheckCompatibleStackSlotTypes(ValueType dst, ValueType src) { bool CheckCompatibleStackSlotTypes(ValueKind dst, ValueKind src) {
if (dst.is_object_reference_type()) { if (is_object_reference_type(dst)) {
// Since Liftoff doesn't do accurate type tracking (e.g. on loop back // Since Liftoff doesn't do accurate type tracking (e.g. on loop back
// edges), we only care that pointer types stay amongst pointer types. // edges), we only care that pointer types stay amongst pointer types.
// It's fine if ref/optref overwrite each other. // It's fine if ref/optref overwrite each other.
DCHECK(src.is_object_reference_type()); DCHECK(is_object_reference_type(src));
} else { } else {
// All other types (primitive numbers, RTTs, bottom/stmt) must be equal. // All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
DCHECK_EQ(dst, src); DCHECK_EQ(dst, src);
...@@ -107,7 +107,7 @@ class StackTransferRecipe { ...@@ -107,7 +107,7 @@ class StackTransferRecipe {
#endif #endif
V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) { V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK(CheckCompatibleStackSlotTypes(dst.type(), src.type())); DCHECK(CheckCompatibleStackSlotTypes(dst.kind(), src.kind()));
if (dst.is_reg()) { if (dst.is_reg()) {
LoadIntoRegister(dst.reg(), src, src.offset()); LoadIntoRegister(dst.reg(), src, src.offset());
return; return;
...@@ -120,11 +120,11 @@ class StackTransferRecipe { ...@@ -120,11 +120,11 @@ class StackTransferRecipe {
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
if (src.offset() != dst.offset()) { if (src.offset() != dst.offset()) {
asm_->MoveStackValue(dst.offset(), src.offset(), src.type()); asm_->MoveStackValue(dst.offset(), src.offset(), src.kind());
} }
break; break;
case VarState::kRegister: case VarState::kRegister:
asm_->Spill(dst.offset(), src.reg(), src.type()); asm_->Spill(dst.offset(), src.reg(), src.kind());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
asm_->Spill(dst.offset(), src.constant()); asm_->Spill(dst.offset(), src.constant());
...@@ -137,11 +137,11 @@ class StackTransferRecipe { ...@@ -137,11 +137,11 @@ class StackTransferRecipe {
uint32_t src_offset) { uint32_t src_offset) {
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
LoadStackSlot(dst, src_offset, src.type()); LoadStackSlot(dst, src_offset, src.kind());
break; break;
case VarState::kRegister: case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class()); DCHECK_EQ(dst.reg_class(), src.reg_class());
if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type()); if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
LoadConstant(dst, src.constant()); LoadConstant(dst, src.constant());
...@@ -155,7 +155,7 @@ class StackTransferRecipe { ...@@ -155,7 +155,7 @@ class StackTransferRecipe {
// Use CHECK such that the remaining code is statically dead if // Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false. // {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair); CHECK(kNeedI64RegPair);
DCHECK_EQ(kWasmI64, src.type()); DCHECK_EQ(kI64, src.kind());
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
LoadI64HalfStackSlot(dst, offset, half); LoadI64HalfStackSlot(dst, offset, half);
...@@ -163,7 +163,7 @@ class StackTransferRecipe { ...@@ -163,7 +163,7 @@ class StackTransferRecipe {
case VarState::kRegister: { case VarState::kRegister: {
LiftoffRegister src_half = LiftoffRegister src_half =
half == kLowWord ? src.reg().low() : src.reg().high(); half == kLowWord ? src.reg().low() : src.reg().high();
if (dst != src_half) MoveRegister(dst, src_half, kWasmI32); if (dst != src_half) MoveRegister(dst, src_half, kI32);
break; break;
} }
case VarState::kIntConst: case VarState::kIntConst:
...@@ -175,45 +175,44 @@ class StackTransferRecipe { ...@@ -175,45 +175,44 @@ class StackTransferRecipe {
} }
} }
void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) { void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class()); DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_EQ(reg_class_for(type), src.reg_class()); DCHECK_EQ(reg_class_for(kind), src.reg_class());
if (src.is_gp_pair()) { if (src.is_gp_pair()) {
DCHECK_EQ(kWasmI64, type); DCHECK_EQ(kI64, kind);
if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32); if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kI32);
if (dst.high() != src.high()) if (dst.high() != src.high()) MoveRegister(dst.high(), src.high(), kI32);
MoveRegister(dst.high(), src.high(), kWasmI32);
return; return;
} }
if (src.is_fp_pair()) { if (src.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type); DCHECK_EQ(kS128, kind);
if (dst.low() != src.low()) { if (dst.low() != src.low()) {
MoveRegister(dst.low(), src.low(), kWasmF64); MoveRegister(dst.low(), src.low(), kF64);
MoveRegister(dst.high(), src.high(), kWasmF64); MoveRegister(dst.high(), src.high(), kF64);
} }
return; return;
} }
if (move_dst_regs_.has(dst)) { if (move_dst_regs_.has(dst)) {
DCHECK_EQ(register_move(dst)->src, src); DCHECK_EQ(register_move(dst)->src, src);
// Non-fp registers can only occur with the exact same type. // Non-fp registers can only occur with the exact same type.
DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->type == type); DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->kind == kind);
// It can happen that one fp register holds both the f32 zero and the f64 // It can happen that one fp register holds both the f32 zero and the f64
// zero, as the initial value for local variables. Move the value as f64 // zero, as the initial value for local variables. Move the value as f64
// in that case. // in that case.
if (type == kWasmF64) register_move(dst)->type = kWasmF64; if (kind == kF64) register_move(dst)->kind = kF64;
return; return;
} }
move_dst_regs_.set(dst); move_dst_regs_.set(dst);
++*src_reg_use_count(src); ++*src_reg_use_count(src);
*register_move(dst) = {src, type}; *register_move(dst) = {src, kind};
} }
void LoadConstant(LiftoffRegister dst, WasmValue value) { void LoadConstant(LiftoffRegister dst, WasmValue value) {
DCHECK(!load_dst_regs_.has(dst)); DCHECK(!load_dst_regs_.has(dst));
load_dst_regs_.set(dst); load_dst_regs_.set(dst);
if (dst.is_gp_pair()) { if (dst.is_gp_pair()) {
DCHECK_EQ(kWasmI64, value.type()); DCHECK_EQ(kI64, value.type().kind());
int64_t i64 = value.to_i64(); int64_t i64 = value.to_i64();
*register_load(dst.low()) = *register_load(dst.low()) =
RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64))); RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64)));
...@@ -225,7 +224,7 @@ class StackTransferRecipe { ...@@ -225,7 +224,7 @@ class StackTransferRecipe {
} }
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset, void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
ValueType type) { ValueKind kind) {
if (load_dst_regs_.has(dst)) { if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack // It can happen that we spilled the same register to different stack
// slots, and then we reload them later into the same dst register. // slots, and then we reload them later into the same dst register.
...@@ -234,20 +233,20 @@ class StackTransferRecipe { ...@@ -234,20 +233,20 @@ class StackTransferRecipe {
} }
load_dst_regs_.set(dst); load_dst_regs_.set(dst);
if (dst.is_gp_pair()) { if (dst.is_gp_pair()) {
DCHECK_EQ(kWasmI64, type); DCHECK_EQ(kI64, kind);
*register_load(dst.low()) = *register_load(dst.low()) =
RegisterLoad::HalfStack(stack_offset, kLowWord); RegisterLoad::HalfStack(stack_offset, kLowWord);
*register_load(dst.high()) = *register_load(dst.high()) =
RegisterLoad::HalfStack(stack_offset, kHighWord); RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) { } else if (dst.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type); DCHECK_EQ(kS128, kind);
// Only need register_load for low_gp since we load 128 bits at one go. // Only need register_load for low_gp since we load 128 bits at one go.
// Both low and high need to be set in load_dst_regs_ but when iterating // Both low and high need to be set in load_dst_regs_ but when iterating
// over it, both low and high will be cleared, so we won't load twice. // over it, both low and high will be cleared, so we won't load twice.
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type); *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, kind);
*register_load(dst.high()) = RegisterLoad::Nop(); *register_load(dst.high()) = RegisterLoad::Nop();
} else { } else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type); *register_load(dst) = RegisterLoad::Stack(stack_offset, kind);
} }
} }
...@@ -295,7 +294,7 @@ class StackTransferRecipe { ...@@ -295,7 +294,7 @@ class StackTransferRecipe {
void ExecuteMove(LiftoffRegister dst) { void ExecuteMove(LiftoffRegister dst) {
RegisterMove* move = register_move(dst); RegisterMove* move = register_move(dst);
DCHECK_EQ(0, *src_reg_use_count(dst)); DCHECK_EQ(0, *src_reg_use_count(dst));
asm_->Move(dst, move->src, move->type); asm_->Move(dst, move->src, move->kind);
ClearExecutedMove(dst); ClearExecutedMove(dst);
} }
...@@ -329,11 +328,11 @@ class StackTransferRecipe { ...@@ -329,11 +328,11 @@ class StackTransferRecipe {
// TODO(clemensb): Use an unused register if available. // TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet(); LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst); RegisterMove* move = register_move(dst);
last_spill_offset += LiftoffAssembler::SlotSizeForType(move->type); last_spill_offset += LiftoffAssembler::SlotSizeForType(move->kind);
LiftoffRegister spill_reg = move->src; LiftoffRegister spill_reg = move->src;
asm_->Spill(last_spill_offset, spill_reg, move->type); asm_->Spill(last_spill_offset, spill_reg, move->kind);
// Remember to reload into the destination register later. // Remember to reload into the destination register later.
LoadStackSlot(dst, last_spill_offset, move->type); LoadStackSlot(dst, last_spill_offset, move->kind);
ClearExecutedMove(dst); ClearExecutedMove(dst);
} }
} }
...@@ -341,20 +340,20 @@ class StackTransferRecipe { ...@@ -341,20 +340,20 @@ class StackTransferRecipe {
void ExecuteLoads() { void ExecuteLoads() {
for (LiftoffRegister dst : load_dst_regs_) { for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst); RegisterLoad* load = register_load(dst);
switch (load->kind) { switch (load->load_kind) {
case RegisterLoad::kNop: case RegisterLoad::kNop:
break; break;
case RegisterLoad::kConstant: case RegisterLoad::kConstant:
asm_->LoadConstant(dst, load->type == kWasmI64 asm_->LoadConstant(dst, load->kind == kI64
? WasmValue(int64_t{load->value}) ? WasmValue(int64_t{load->value})
: WasmValue(int32_t{load->value})); : WasmValue(int32_t{load->value}));
break; break;
case RegisterLoad::kStack: case RegisterLoad::kStack:
if (kNeedS128RegPair && load->type == kWasmS128) { if (kNeedS128RegPair && load->kind == kS128) {
asm_->Fill(LiftoffRegister::ForFpPair(dst.fp()), load->value, asm_->Fill(LiftoffRegister::ForFpPair(dst.fp()), load->value,
load->type); load->kind);
} else { } else {
asm_->Fill(dst, load->value, load->type); asm_->Fill(dst, load->value, load->kind);
} }
break; break;
case RegisterLoad::kLowHalfStack: case RegisterLoad::kLowHalfStack:
...@@ -431,18 +430,18 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state, ...@@ -431,18 +430,18 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
reg = register_reuse_map.Lookup(source->reg()); reg = register_reuse_map.Lookup(source->reg());
} }
// Third try: Use any free register. // Third try: Use any free register.
RegClass rc = reg_class_for(source->type()); RegClass rc = reg_class_for(source->kind());
if (!reg && state->has_unused_register(rc, used_regs)) { if (!reg && state->has_unused_register(rc, used_regs)) {
reg = state->unused_register(rc, used_regs); reg = state->unused_register(rc, used_regs);
} }
if (!reg) { if (!reg) {
// No free register; make this a stack slot. // No free register; make this a stack slot.
*target = VarState(source->type(), source->offset()); *target = VarState(source->kind(), source->offset());
continue; continue;
} }
if (reuse_registers) register_reuse_map.Add(source->reg(), *reg); if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
state->inc_used(*reg); state->inc_used(*reg);
*target = VarState(source->type(), *reg, source->offset()); *target = VarState(source->kind(), *reg, source->offset());
} }
} }
...@@ -534,7 +533,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode( ...@@ -534,7 +533,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
ZoneVector<int>* slots, LiftoffRegList* spills, ZoneVector<int>* slots, LiftoffRegList* spills,
SpillLocation spill_location) { SpillLocation spill_location) {
for (const auto& slot : stack_state) { for (const auto& slot : stack_state) {
if (!slot.type().is_reference_type()) continue; if (!is_reference_type(slot.kind())) continue;
if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) { if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
// Registers get spilled just before the call to the runtime. In {spills} // Registers get spilled just before the call to the runtime. In {spills}
...@@ -553,7 +552,7 @@ void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) { ...@@ -553,7 +552,7 @@ void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (const auto& slot : stack_state) { for (const auto& slot : stack_state) {
DCHECK(!slot.is_reg()); DCHECK(!slot.is_reg());
if (slot.type().is_reference_type()) { if (is_reference_type(slot.kind())) {
safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot)); safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
} }
} }
...@@ -591,12 +590,12 @@ LiftoffAssembler::~LiftoffAssembler() { ...@@ -591,12 +590,12 @@ LiftoffAssembler::~LiftoffAssembler() {
LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot, LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
LiftoffRegList pinned) { LiftoffRegList pinned) {
if (slot.is_reg()) return slot.reg(); if (slot.is_reg()) return slot.reg();
LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.type()), pinned); LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned);
if (slot.is_const()) { if (slot.is_const()) {
LoadConstant(reg, slot.constant()); LoadConstant(reg, slot.constant());
} else { } else {
DCHECK(slot.is_stack()); DCHECK(slot.is_stack());
Fill(reg, slot.offset(), slot.type()); Fill(reg, slot.offset(), slot.kind());
} }
return reg; return reg;
} }
...@@ -647,7 +646,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) { ...@@ -647,7 +646,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) { for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i]; VarState& slot = cache_state_.stack_state.end()[-1 - i];
if (slot.is_stack()) continue; if (slot.is_stack()) continue;
RegClass rc = reg_class_for(slot.type()); RegClass rc = reg_class_for(slot.kind());
if (slot.is_reg()) { if (slot.is_reg()) {
if (cache_state_.get_use_count(slot.reg()) > 1) { if (cache_state_.get_use_count(slot.reg()) > 1) {
// If the register is used more than once, we cannot use it for the // If the register is used more than once, we cannot use it for the
...@@ -655,7 +654,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) { ...@@ -655,7 +654,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
LiftoffRegList pinned; LiftoffRegList pinned;
pinned.set(slot.reg()); pinned.set(slot.reg());
LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned); LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
Move(dst_reg, slot.reg(), slot.type()); Move(dst_reg, slot.reg(), slot.kind());
cache_state_.dec_used(slot.reg()); cache_state_.dec_used(slot.reg());
cache_state_.inc_used(dst_reg); cache_state_.inc_used(dst_reg);
slot.MakeRegister(dst_reg); slot.MakeRegister(dst_reg);
...@@ -677,7 +676,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) { ...@@ -677,7 +676,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
VectorOf(stack_base, num_locals())}) { VectorOf(stack_base, num_locals())}) {
for (VarState& slot : slots) { for (VarState& slot : slots) {
if (!slot.is_const()) continue; if (!slot.is_const()) continue;
RegClass rc = reg_class_for(slot.type()); RegClass rc = reg_class_for(slot.kind());
if (cache_state_.has_unused_register(rc)) { if (cache_state_.has_unused_register(rc)) {
LiftoffRegister reg = cache_state_.unused_register(rc); LiftoffRegister reg = cache_state_.unused_register(rc);
LoadConstant(reg, slot.constant()); LoadConstant(reg, slot.constant());
...@@ -744,7 +743,7 @@ void LiftoffAssembler::Spill(VarState* slot) { ...@@ -744,7 +743,7 @@ void LiftoffAssembler::Spill(VarState* slot) {
case VarState::kStack: case VarState::kStack:
return; return;
case VarState::kRegister: case VarState::kRegister:
Spill(slot->offset(), slot->reg(), slot->type()); Spill(slot->offset(), slot->reg(), slot->kind());
cache_state_.dec_used(slot->reg()); cache_state_.dec_used(slot->reg());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
...@@ -764,7 +763,7 @@ void LiftoffAssembler::SpillAllRegisters() { ...@@ -764,7 +763,7 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) { for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i]; auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue; if (!slot.is_reg()) continue;
Spill(slot.offset(), slot.reg(), slot.type()); Spill(slot.offset(), slot.reg(), slot.kind());
slot.MakeStack(); slot.MakeStack();
} }
cache_state_.ClearCachedInstanceRegister(); cache_state_.ClearCachedInstanceRegister();
...@@ -786,7 +785,7 @@ void LiftoffAssembler::ClearRegister( ...@@ -786,7 +785,7 @@ void LiftoffAssembler::ClearRegister(
if (reg != *use) continue; if (reg != *use) continue;
if (replacement == no_reg) { if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp(); replacement = GetUnusedRegister(kGpReg, pinned).gp();
Move(replacement, reg, LiftoffAssembler::kWasmIntPtr); Move(replacement, reg, LiftoffAssembler::kIntPtr);
} }
// We cannot leave this loop early. There may be multiple uses of {reg}. // We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement; *use = replacement;
...@@ -794,7 +793,7 @@ void LiftoffAssembler::ClearRegister( ...@@ -794,7 +793,7 @@ void LiftoffAssembler::ClearRegister(
} }
namespace { namespace {
void PrepareStackTransfers(const FunctionSig* sig, void PrepareStackTransfers(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
const VarState* slots, const VarState* slots,
LiftoffStackSlots* stack_slots, LiftoffStackSlots* stack_slots,
...@@ -807,8 +806,8 @@ void PrepareStackTransfers(const FunctionSig* sig, ...@@ -807,8 +806,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count()); uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
for (uint32_t i = num_params; i > 0; --i) { for (uint32_t i = num_params; i > 0; --i) {
const uint32_t param = i - 1; const uint32_t param = i - 1;
ValueType type = sig->GetParam(param); ValueKind kind = sig->GetParam(param);
const bool is_gp_pair = kNeedI64RegPair && type == kWasmI64; const bool is_gp_pair = kNeedI64RegPair && kind == kI64;
const int num_lowered_params = is_gp_pair ? 2 : 1; const int num_lowered_params = is_gp_pair ? 2 : 1;
const VarState& slot = slots[param]; const VarState& slot = slots[param];
const uint32_t stack_offset = slot.offset(); const uint32_t stack_offset = slot.offset();
...@@ -822,10 +821,10 @@ void PrepareStackTransfers(const FunctionSig* sig, ...@@ -822,10 +821,10 @@ void PrepareStackTransfers(const FunctionSig* sig,
call_descriptor->GetInputLocation(call_desc_input_idx); call_descriptor->GetInputLocation(call_desc_input_idx);
if (loc.IsRegister()) { if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister()); DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type); RegClass rc = is_gp_pair ? kGpReg : reg_class_for(kind);
int reg_code = loc.AsRegister(); int reg_code = loc.AsRegister();
LiftoffRegister reg = LiftoffRegister reg =
LiftoffRegister::from_external_code(rc, type, reg_code); LiftoffRegister::from_external_code(rc, kind, reg_code);
param_regs->set(reg); param_regs->set(reg);
if (is_gp_pair) { if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset, stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
...@@ -844,7 +843,7 @@ void PrepareStackTransfers(const FunctionSig* sig, ...@@ -844,7 +843,7 @@ void PrepareStackTransfers(const FunctionSig* sig,
} // namespace } // namespace
void LiftoffAssembler::PrepareBuiltinCall( void LiftoffAssembler::PrepareBuiltinCall(
const FunctionSig* sig, compiler::CallDescriptor* call_descriptor, const ValueKindSig* sig, compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params) { std::initializer_list<VarState> params) {
LiftoffStackSlots stack_slots(this); LiftoffStackSlots stack_slots(this);
StackTransferRecipe stack_transfers(this); StackTransferRecipe stack_transfers(this);
...@@ -863,7 +862,7 @@ void LiftoffAssembler::PrepareBuiltinCall( ...@@ -863,7 +862,7 @@ void LiftoffAssembler::PrepareBuiltinCall(
cache_state_.reset_used_registers(); cache_state_.reset_used_registers();
} }
void LiftoffAssembler::PrepareCall(const FunctionSig* sig, void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register* target, Register* target,
Register* target_instance) { Register* target_instance) {
...@@ -878,7 +877,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig, ...@@ -878,7 +877,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
!cache_state_.used_registers.is_empty(); !cache_state_.used_registers.is_empty();
--it) { --it) {
if (!it->is_reg()) continue; if (!it->is_reg()) continue;
Spill(it->offset(), it->reg(), it->type()); Spill(it->offset(), it->reg(), it->kind());
cache_state_.dec_used(it->reg()); cache_state_.dec_used(it->reg());
it->MakeStack(); it->MakeStack();
} }
...@@ -895,8 +894,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig, ...@@ -895,8 +894,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
param_regs.set(instance_reg); param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) { if (target_instance && *target_instance != instance_reg) {
stack_transfers.MoveRegister(LiftoffRegister(instance_reg), stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
LiftoffRegister(*target_instance), LiftoffRegister(*target_instance), kIntPtr);
kWasmIntPtr);
} }
if (num_params) { if (num_params) {
...@@ -914,10 +912,10 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig, ...@@ -914,10 +912,10 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
if (!free_regs.is_empty()) { if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet(); LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target), stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
kWasmIntPtr); kIntPtr);
*target = new_target.gp(); *target = new_target.gp();
} else { } else {
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr, stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr,
LiftoffRegister(*target), 0)); LiftoffRegister(*target), 0));
*target = no_reg; *target = no_reg;
} }
...@@ -939,15 +937,15 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig, ...@@ -939,15 +937,15 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
} }
} }
void LiftoffAssembler::FinishCall(const FunctionSig* sig, void LiftoffAssembler::FinishCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor) { compiler::CallDescriptor* call_descriptor) {
int call_desc_return_idx = 0; int call_desc_return_idx = 0;
for (ValueType return_type : sig->returns()) { for (ValueKind return_kind : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount()); DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
const bool needs_gp_pair = needs_gp_reg_pair(return_type); const bool needs_gp_pair = needs_gp_reg_pair(return_kind);
const int num_lowered_params = 1 + needs_gp_pair; const int num_lowered_params = 1 + needs_gp_pair;
const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type; const ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
const RegClass rc = reg_class_for(lowered_type); const RegClass rc = reg_class_for(lowered_kind);
// Initialize to anything, will be set in the loop and used afterwards. // Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(), LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
kGpCacheRegList.GetFirstRegSet()}; kGpCacheRegList.GetFirstRegSet()};
...@@ -958,7 +956,7 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig, ...@@ -958,7 +956,7 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
if (loc.IsRegister()) { if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister()); DCHECK(!loc.IsAnyRegister());
reg_pair[pair_idx] = LiftoffRegister::from_external_code( reg_pair[pair_idx] = LiftoffRegister::from_external_code(
rc, lowered_type, loc.AsRegister()); rc, lowered_kind, loc.AsRegister());
} else { } else {
DCHECK(loc.IsCallerFrameSlot()); DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned); reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
...@@ -966,16 +964,16 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig, ...@@ -966,16 +964,16 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
int offset = call_descriptor->GetOffsetToReturns(); int offset = call_descriptor->GetOffsetToReturns();
int return_slot = -loc.GetLocation() - offset - 1; int return_slot = -loc.GetLocation() - offset - 1;
LoadReturnStackSlot(reg_pair[pair_idx], LoadReturnStackSlot(reg_pair[pair_idx],
return_slot * kSystemPointerSize, lowered_type); return_slot * kSystemPointerSize, lowered_kind);
} }
if (pair_idx == 0) { if (pair_idx == 0) {
pinned.set(reg_pair[0]); pinned.set(reg_pair[0]);
} }
} }
if (num_lowered_params == 1) { if (num_lowered_params == 1) {
PushRegister(return_type, reg_pair[0]); PushRegister(return_kind, reg_pair[0]);
} else { } else {
PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(), PushRegister(return_kind, LiftoffRegister::ForPair(reg_pair[0].gp(),
reg_pair[1].gp())); reg_pair[1].gp()));
} }
} }
...@@ -984,21 +982,21 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig, ...@@ -984,21 +982,21 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
} }
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
ValueType type) { ValueKind kind) {
DCHECK_EQ(dst.reg_class(), src.reg_class()); DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (kNeedI64RegPair && dst.is_gp_pair()) { if (kNeedI64RegPair && dst.is_gp_pair()) {
// Use the {StackTransferRecipe} to move pairs, as the registers in the // Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap. // pairs might overlap.
StackTransferRecipe(this).MoveRegister(dst, src, type); StackTransferRecipe(this).MoveRegister(dst, src, kind);
} else if (kNeedS128RegPair && dst.is_fp_pair()) { } else if (kNeedS128RegPair && dst.is_fp_pair()) {
// Calling low_fp is fine, Move will automatically check the type and // Calling low_fp is fine, Move will automatically check the kind and
// convert this FP to its SIMD register, and use a SIMD move. // convert this FP to its SIMD register, and use a SIMD move.
Move(dst.low_fp(), src.low_fp(), type); Move(dst.low_fp(), src.low_fp(), kind);
} else if (dst.is_gp()) { } else if (dst.is_gp()) {
Move(dst.gp(), src.gp(), type); Move(dst.gp(), src.gp(), kind);
} else { } else {
Move(dst.fp(), src.fp(), type); Move(dst.fp(), src.fp(), kind);
} }
} }
...@@ -1007,7 +1005,7 @@ void LiftoffAssembler::ParallelRegisterMove( ...@@ -1007,7 +1005,7 @@ void LiftoffAssembler::ParallelRegisterMove(
StackTransferRecipe stack_transfers(this); StackTransferRecipe stack_transfers(this);
for (auto tuple : tuples) { for (auto tuple : tuples) {
if (tuple.dst == tuple.src) continue; if (tuple.dst == tuple.src) continue;
stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type); stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.kind);
} }
} }
...@@ -1015,19 +1013,19 @@ void LiftoffAssembler::MoveToReturnLocations( ...@@ -1015,19 +1013,19 @@ void LiftoffAssembler::MoveToReturnLocations(
const FunctionSig* sig, compiler::CallDescriptor* descriptor) { const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
StackTransferRecipe stack_transfers(this); StackTransferRecipe stack_transfers(this);
if (sig->return_count() == 1) { if (sig->return_count() == 1) {
ValueType return_type = sig->GetReturn(0); ValueKind return_kind = sig->GetReturn(0).kind();
// Defaults to a gp reg, will be set below if return type is not gp. // Defaults to a gp reg, will be set below if return kind is not gp.
LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]); LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
if (needs_gp_reg_pair(return_type)) { if (needs_gp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForPair(kGpReturnRegisters[0], return_reg = LiftoffRegister::ForPair(kGpReturnRegisters[0],
kGpReturnRegisters[1]); kGpReturnRegisters[1]);
} else if (needs_fp_reg_pair(return_type)) { } else if (needs_fp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]); return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
} else if (reg_class_for(return_type) == kFpReg) { } else if (reg_class_for(return_kind) == kFpReg) {
return_reg = LiftoffRegister(kFpReturnRegisters[0]); return_reg = LiftoffRegister(kFpReturnRegisters[0]);
} else { } else {
DCHECK_EQ(kGpReg, reg_class_for(return_type)); DCHECK_EQ(kGpReg, reg_class_for(return_kind));
} }
stack_transfers.LoadIntoRegister(return_reg, stack_transfers.LoadIntoRegister(return_reg,
cache_state_.stack_state.back(), cache_state_.stack_state.back(),
...@@ -1042,8 +1040,8 @@ void LiftoffAssembler::MoveToReturnLocations( ...@@ -1042,8 +1040,8 @@ void LiftoffAssembler::MoveToReturnLocations(
// Fill return frame slots first to ensure that all potential spills happen // Fill return frame slots first to ensure that all potential spills happen
// before we prepare the stack transfers. // before we prepare the stack transfers.
for (size_t i = 0; i < sig->return_count(); ++i) { for (size_t i = 0; i < sig->return_count(); ++i) {
ValueType return_type = sig->GetReturn(i); ValueKind return_kind = sig->GetReturn(i).kind();
bool needs_gp_pair = needs_gp_reg_pair(return_type); bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair; int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) { for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation loc = compiler::LinkageLocation loc =
...@@ -1054,16 +1052,16 @@ void LiftoffAssembler::MoveToReturnLocations( ...@@ -1054,16 +1052,16 @@ void LiftoffAssembler::MoveToReturnLocations(
LiftoffRegister reg = needs_gp_pair LiftoffRegister reg = needs_gp_pair
? LoadI64HalfIntoRegister(slot, half) ? LoadI64HalfIntoRegister(slot, half)
: LoadToRegister(slot, {}); : LoadToRegister(slot, {});
ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type; ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_type); StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_kind);
} }
} }
} }
// Prepare and execute stack transfers. // Prepare and execute stack transfers.
call_desc_return_idx = 0; call_desc_return_idx = 0;
for (size_t i = 0; i < sig->return_count(); ++i) { for (size_t i = 0; i < sig->return_count(); ++i) {
ValueType return_type = sig->GetReturn(i); ValueKind return_kind = sig->GetReturn(i).kind();
bool needs_gp_pair = needs_gp_reg_pair(return_type); bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair; int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) { for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord; RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord;
...@@ -1072,10 +1070,10 @@ void LiftoffAssembler::MoveToReturnLocations( ...@@ -1072,10 +1070,10 @@ void LiftoffAssembler::MoveToReturnLocations(
if (loc.IsRegister()) { if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister()); DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister(); int reg_code = loc.AsRegister();
ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type; ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
RegClass rc = reg_class_for(lowered_type); RegClass rc = reg_class_for(lowered_kind);
LiftoffRegister reg = LiftoffRegister reg =
LiftoffRegister::from_external_code(rc, return_type, reg_code); LiftoffRegister::from_external_code(rc, return_kind, reg_code);
VarState& slot = slots[i]; VarState& slot = slots[i];
if (needs_gp_pair) { if (needs_gp_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(), stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(),
...@@ -1194,7 +1192,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) { ...@@ -1194,7 +1192,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.last_spilled_regs.set(slot->reg().low()); cache_state_.last_spilled_regs.set(slot->reg().low());
cache_state_.last_spilled_regs.set(slot->reg().high()); cache_state_.last_spilled_regs.set(slot->reg().high());
} }
Spill(slot->offset(), slot->reg(), slot->type()); Spill(slot->offset(), slot->reg(), slot->kind());
slot->MakeStack(); slot->MakeStack();
if (--remaining_uses == 0) break; if (--remaining_uses == 0) break;
} }
...@@ -1206,14 +1204,14 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) { ...@@ -1206,14 +1204,14 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once. DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals; num_locals_ = num_locals;
if (num_locals > kInlineLocalTypes) { if (num_locals > kInlineLocalTypes) {
more_local_types_ = reinterpret_cast<ValueType*>( more_local_types_ = reinterpret_cast<ValueKind*>(
base::Malloc(num_locals * sizeof(ValueType))); base::Malloc(num_locals * sizeof(ValueKind)));
DCHECK_NOT_NULL(more_local_types_); DCHECK_NOT_NULL(more_local_types_);
} }
} }
std::ostream& operator<<(std::ostream& os, VarState slot) { std::ostream& operator<<(std::ostream& os, VarState slot) {
os << slot.type().name() << ":"; os << name(slot.kind()) << ":";
switch (slot.loc()) { switch (slot.loc()) {
case VarState::kStack: case VarState::kStack:
return os << "s"; return os << "s";
......
...@@ -73,25 +73,26 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -73,25 +73,26 @@ class LiftoffAssembler : public TurboAssembler {
// Each slot in our stack frame currently has exactly 8 bytes. // Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8; static constexpr int kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr = static constexpr ValueKind kIntPtr = kSystemPointerSize == 8 ? kI64 : kI32;
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
using ValueKindSig = Signature<ValueKind>;
class VarState { class VarState {
public: public:
enum Location : uint8_t { kStack, kRegister, kIntConst }; enum Location : uint8_t { kStack, kRegister, kIntConst };
explicit VarState(ValueType type, int offset) explicit VarState(ValueKind kind, int offset)
: loc_(kStack), type_(type), spill_offset_(offset) {} : loc_(kStack), kind_(kind), spill_offset_(offset) {}
explicit VarState(ValueType type, LiftoffRegister r, int offset) explicit VarState(ValueKind kind, LiftoffRegister r, int offset)
: loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) { : loc_(kRegister), kind_(kind), reg_(r), spill_offset_(offset) {
DCHECK_EQ(r.reg_class(), reg_class_for(type)); DCHECK_EQ(r.reg_class(), reg_class_for(kind));
} }
explicit VarState(ValueType type, int32_t i32_const, int offset) explicit VarState(ValueKind kind, int32_t i32_const, int offset)
: loc_(kIntConst), : loc_(kIntConst),
type_(type), kind_(kind),
i32_const_(i32_const), i32_const_(i32_const),
spill_offset_(offset) { spill_offset_(offset) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64); DCHECK(kind_ == kI32 || kind_ == kI64);
} }
bool is_stack() const { return loc_ == kStack; } bool is_stack() const { return loc_ == kStack; }
...@@ -100,7 +101,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -100,7 +101,7 @@ class LiftoffAssembler : public TurboAssembler {
bool is_reg() const { return loc_ == kRegister; } bool is_reg() const { return loc_ == kRegister; }
bool is_const() const { return loc_ == kIntConst; } bool is_const() const { return loc_ == kIntConst; }
ValueType type() const { return type_; } ValueKind kind() const { return kind_; }
Location loc() const { return loc_; } Location loc() const { return loc_; }
...@@ -109,9 +110,9 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -109,9 +110,9 @@ class LiftoffAssembler : public TurboAssembler {
return i32_const_; return i32_const_;
} }
WasmValue constant() const { WasmValue constant() const {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64); DCHECK(kind_ == kI32 || kind_ == kI64);
DCHECK_EQ(loc_, kIntConst); DCHECK_EQ(loc_, kIntConst);
return type_ == kWasmI32 ? WasmValue(i32_const_) return kind_ == kI32 ? WasmValue(i32_const_)
: WasmValue(int64_t{i32_const_}); : WasmValue(int64_t{i32_const_});
} }
...@@ -133,7 +134,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -133,7 +134,7 @@ class LiftoffAssembler : public TurboAssembler {
} }
void MakeConstant(int32_t i32_const) { void MakeConstant(int32_t i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64); DCHECK(kind_ == kI32 || kind_ == kI64);
loc_ = kIntConst; loc_ = kIntConst;
i32_const_ = i32_const; i32_const_ = i32_const;
} }
...@@ -142,7 +143,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -142,7 +143,7 @@ class LiftoffAssembler : public TurboAssembler {
// from different stack states. // from different stack states.
void Copy(VarState src) { void Copy(VarState src) {
loc_ = src.loc(); loc_ = src.loc();
type_ = src.type(); kind_ = src.kind();
if (loc_ == kRegister) { if (loc_ == kRegister) {
reg_ = src.reg(); reg_ = src.reg();
} else if (loc_ == kIntConst) { } else if (loc_ == kIntConst) {
...@@ -154,7 +155,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -154,7 +155,7 @@ class LiftoffAssembler : public TurboAssembler {
Location loc_; Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each // TODO(wasm): This is redundant, the decoder already knows the type of each
// stack value. Try to collapse. // stack value. Try to collapse.
ValueType type_; ValueKind kind_;
union { union {
LiftoffRegister reg_; // used if loc_ == kRegister LiftoffRegister reg_; // used if loc_ == kRegister
...@@ -388,13 +389,13 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -388,13 +389,13 @@ class LiftoffAssembler : public TurboAssembler {
// Use this to pop a value into a register that has no other uses, so it // Use this to pop a value into a register that has no other uses, so it
// can be modified. // can be modified.
LiftoffRegister PopToModifiableRegister(LiftoffRegList pinned = {}) { LiftoffRegister PopToModifiableRegister(LiftoffRegList pinned = {}) {
ValueType type = cache_state_.stack_state.back().type(); ValueKind kind = cache_state_.stack_state.back().kind();
LiftoffRegister reg = PopToRegister(pinned); LiftoffRegister reg = PopToRegister(pinned);
if (cache_state()->is_free(reg)) return reg; if (cache_state()->is_free(reg)) return reg;
pinned.set(reg); pinned.set(reg);
LiftoffRegister new_reg = GetUnusedRegister(reg.reg_class(), pinned); LiftoffRegister new_reg = GetUnusedRegister(reg.reg_class(), pinned);
Move(new_reg, reg, type); Move(new_reg, reg, kind);
return new_reg; return new_reg;
} }
...@@ -413,10 +414,10 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -413,10 +414,10 @@ class LiftoffAssembler : public TurboAssembler {
// stack, so that we can merge different values on the back-edge. // stack, so that we can merge different values on the back-edge.
void PrepareLoopArgs(int num); void PrepareLoopArgs(int num);
int NextSpillOffset(ValueType type) { int NextSpillOffset(ValueKind kind) {
int offset = TopSpillOffset() + SlotSizeForType(type); int offset = TopSpillOffset() + SlotSizeForType(kind);
if (NeedsAlignment(type)) { if (NeedsAlignment(kind)) {
offset = RoundUp(offset, SlotSizeForType(type)); offset = RoundUp(offset, SlotSizeForType(kind));
} }
return offset; return offset;
} }
...@@ -427,25 +428,25 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -427,25 +428,25 @@ class LiftoffAssembler : public TurboAssembler {
: cache_state_.stack_state.back().offset(); : cache_state_.stack_state.back().offset();
} }
void PushRegister(ValueType type, LiftoffRegister reg) { void PushRegister(ValueKind kind, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class()); DCHECK_EQ(reg_class_for(kind), reg.reg_class());
cache_state_.inc_used(reg); cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type)); cache_state_.stack_state.emplace_back(kind, reg, NextSpillOffset(kind));
} }
void PushConstant(ValueType type, int32_t i32_const) { void PushConstant(ValueKind kind, int32_t i32_const) {
DCHECK(type == kWasmI32 || type == kWasmI64); DCHECK(kind == kI32 || kind == kI64);
cache_state_.stack_state.emplace_back(type, i32_const, cache_state_.stack_state.emplace_back(kind, i32_const,
NextSpillOffset(type)); NextSpillOffset(kind));
} }
void PushStack(ValueType type) { void PushStack(ValueKind kind) {
cache_state_.stack_state.emplace_back(type, NextSpillOffset(type)); cache_state_.stack_state.emplace_back(kind, NextSpillOffset(kind));
} }
void SpillRegister(LiftoffRegister); void SpillRegister(LiftoffRegister);
uint32_t GetNumUses(LiftoffRegister reg) { uint32_t GetNumUses(LiftoffRegister reg) const {
return cache_state_.get_use_count(reg); return cache_state_.get_use_count(reg);
} }
...@@ -535,32 +536,32 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -535,32 +536,32 @@ class LiftoffAssembler : public TurboAssembler {
} }
// Load parameters into the right registers / stack slots for the call. // Load parameters into the right registers / stack slots for the call.
void PrepareBuiltinCall(const FunctionSig* sig, void PrepareBuiltinCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params); std::initializer_list<VarState> params);
// Load parameters into the right registers / stack slots for the call. // Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that // Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack. // register, or {no_reg} if target was spilled to the stack.
void PrepareCall(const FunctionSig*, compiler::CallDescriptor*, void PrepareCall(const ValueKindSig*, compiler::CallDescriptor*,
Register* target = nullptr, Register* target = nullptr,
Register* target_instance = nullptr); Register* target_instance = nullptr);
// Process return values of the call. // Process return values of the call.
void FinishCall(const FunctionSig*, compiler::CallDescriptor*); void FinishCall(const ValueKindSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different. // Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType); void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind);
// Parallel register move: For a list of tuples <dst, src, type>, move the // Parallel register move: For a list of tuples <dst, src, kind>, move the
// {src} register of type {type} into {dst}. If {src} equals {dst}, ignore // {src} register of kind {kind} into {dst}. If {src} equals {dst}, ignore
// that tuple. // that tuple.
struct ParallelRegisterMoveTuple { struct ParallelRegisterMoveTuple {
LiftoffRegister dst; LiftoffRegister dst;
LiftoffRegister src; LiftoffRegister src;
ValueType type; ValueKind kind;
template <typename Dst, typename Src> template <typename Dst, typename Src>
ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type) ParallelRegisterMoveTuple(Dst dst, Src src, ValueKind kind)
: dst(dst), src(src), type(type) {} : dst(dst), src(src), kind(kind) {}
}; };
void ParallelRegisterMove(Vector<const ParallelRegisterMoveTuple>); void ParallelRegisterMove(Vector<const ParallelRegisterMoveTuple>);
...@@ -594,8 +595,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -594,8 +595,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void FinishCode(); inline void FinishCode();
inline void AbortCompilation(); inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize(); inline static constexpr int StaticStackFrameSize();
inline static int SlotSizeForType(ValueType type); inline static int SlotSizeForType(ValueKind kind);
inline static bool NeedsAlignment(ValueType type); inline static bool NeedsAlignment(ValueKind kind);
inline void LoadConstant(LiftoffRegister, WasmValue, inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE); RelocInfo::Mode rmode = RelocInfo::NONE);
...@@ -685,19 +686,19 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -685,19 +686,19 @@ class LiftoffAssembler : public TurboAssembler {
inline void AtomicFence(); inline void AtomicFence();
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType); ValueKind);
inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType); ValueKind);
inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueType); inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType); ValueKind);
inline void Move(Register dst, Register src, ValueType); inline void Move(Register dst, Register src, ValueKind);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType); inline void Move(DoubleRegister dst, DoubleRegister src, ValueKind);
inline void Spill(int offset, LiftoffRegister, ValueType); inline void Spill(int offset, LiftoffRegister, ValueKind);
inline void Spill(int offset, WasmValue); inline void Spill(int offset, WasmValue);
inline void Fill(LiftoffRegister, int offset, ValueType); inline void Fill(LiftoffRegister, int offset, ValueKind);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e. // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value. // 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, int offset, RegPairHalf); inline void FillI64Half(Register, int offset, RegPairHalf);
...@@ -840,7 +841,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -840,7 +841,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst), emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
LiftoffRegister(src)); LiftoffRegister(src));
} else if (dst != src) { } else if (dst != src) {
Move(dst, src, kWasmI32); Move(dst, src, kI32);
} }
} }
...@@ -906,7 +907,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -906,7 +907,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_jump(Label*); inline void emit_jump(Label*);
inline void emit_jump(Register); inline void emit_jump(Register);
inline void emit_cond_jump(LiftoffCondition, Label*, ValueType value, inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg); Register lhs, Register rhs = no_reg);
inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label, inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
Register lhs, int imm); Register lhs, int imm);
...@@ -926,7 +927,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -926,7 +927,7 @@ class LiftoffAssembler : public TurboAssembler {
// should be emitted instead. // should be emitted instead.
inline bool emit_select(LiftoffRegister dst, Register condition, inline bool emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value, LiftoffRegister true_value,
LiftoffRegister false_value, ValueType type); LiftoffRegister false_value, ValueKind kind);
enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi }; enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi };
inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode); inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode);
...@@ -1367,18 +1368,18 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -1367,18 +1368,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots); inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Execute a C call. Arguments are pushed to the stack and a pointer to this // Execute a C call. Arguments are pushed to the stack and a pointer to this
// region is passed to the C function. If {out_argument_type != kWasmStmt}, // region is passed to the C function. If {out_argument_kind != kStmt},
// this is the return value of the C function, stored in {rets[0]}. Further // this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored // outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers. // in the remaining {rets} registers.
inline void CallC(const FunctionSig* sig, const LiftoffRegister* args, inline void CallC(const ValueKindSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueType out_argument_type, const LiftoffRegister* rets, ValueKind out_argument_kind,
int stack_bytes, ExternalReference ext_ref); int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr); inline void CallNativeWasmCode(Address addr);
inline void TailCallNativeWasmCode(Address addr); inline void TailCallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack. // Indirect call: If {target == no_reg}, then pop the target from the stack.
inline void CallIndirect(const FunctionSig* sig, inline void CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target); Register target);
inline void TailCallIndirect(Register target); inline void TailCallIndirect(Register target);
...@@ -1399,17 +1400,17 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -1399,17 +1400,17 @@ class LiftoffAssembler : public TurboAssembler {
int GetTotalFrameSize() const { return max_used_spill_offset_; } int GetTotalFrameSize() const { return max_used_spill_offset_; }
ValueType local_type(uint32_t index) { ValueKind local_type(uint32_t index) {
DCHECK_GT(num_locals_, index); DCHECK_GT(num_locals_, index);
ValueType* locals = ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_; num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
return locals[index]; return locals[index];
} }
void set_local_type(uint32_t index, ValueType type) { void set_local_type(uint32_t index, ValueKind kind) {
ValueType* locals = ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_; num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
locals[index] = type; locals[index] = kind;
} }
CacheState* cache_state() { return &cache_state_; } CacheState* cache_state() { return &cache_state_; }
...@@ -1431,13 +1432,13 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -1431,13 +1432,13 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half); LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0; uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8; static constexpr uint32_t kInlineLocalTypes = 16;
union { union {
ValueType local_types_[kInlineLocalTypes]; ValueKind local_types_[kInlineLocalTypes];
ValueType* more_local_types_; ValueKind* more_local_types_;
}; };
static_assert(sizeof(ValueType) == 4, static_assert(sizeof(ValueKind) == 1,
"Reconsider this inlining if ValueType gets bigger"); "Reconsider this inlining if ValueKind gets bigger");
CacheState cache_state_; CacheState cache_state_;
// The maximum spill offset for slots in the value stack. // The maximum spill offset for slots in the value stack.
int max_used_spill_offset_ = StaticStackFrameSize(); int max_used_spill_offset_ = StaticStackFrameSize();
...@@ -1483,7 +1484,7 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm, ...@@ -1483,7 +1484,7 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), rhs.low_gp()); (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
assm->Move(dst.low_gp(), tmp, kWasmI32); assm->Move(dst.low_gp(), tmp, kI32);
} }
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)> template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
...@@ -1511,7 +1512,7 @@ void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm, ...@@ -1511,7 +1512,7 @@ void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp(); assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), low_word); (assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word); (assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
assm->Move(dst.low_gp(), tmp, kWasmI32); assm->Move(dst.low_gp(), tmp, kI32);
} }
} // namespace liftoff } // namespace liftoff
......
...@@ -86,13 +86,12 @@ struct assert_field_size { ...@@ -86,13 +86,12 @@ struct assert_field_size {
constexpr LoadType::LoadTypeValue kPointerLoadType = constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load; kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
constexpr ValueType kPointerValueType = constexpr ValueKind kPointerValueType = kSystemPointerSize == 8 ? kI64 : kI32;
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS) #if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
constexpr ValueType kSmiValueType = kWasmI32; constexpr ValueKind kSmiValueType = kI32;
#else #else
constexpr ValueType kSmiValueType = kWasmI64; constexpr ValueKind kSmiValueType = kI64;
#endif #endif
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
...@@ -271,23 +270,23 @@ class DebugSideTableBuilder { ...@@ -271,23 +270,23 @@ class DebugSideTableBuilder {
for (const auto& slot : stack_state) { for (const auto& slot : stack_state) {
Value new_value; Value new_value;
new_value.index = index; new_value.index = index;
new_value.type = slot.type(); new_value.kind = slot.kind();
switch (slot.loc()) { switch (slot.loc()) {
case kIntConst: case kIntConst:
new_value.kind = Entry::kConstant; new_value.storage = Entry::kConstant;
new_value.i32_const = slot.i32_const(); new_value.i32_const = slot.i32_const();
break; break;
case kRegister: case kRegister:
DCHECK_NE(kDidSpill, assume_spilling); DCHECK_NE(kDidSpill, assume_spilling);
if (assume_spilling == kAllowRegisters) { if (assume_spilling == kAllowRegisters) {
new_value.kind = Entry::kRegister; new_value.storage = Entry::kRegister;
new_value.reg_code = slot.reg().liftoff_code(); new_value.reg_code = slot.reg().liftoff_code();
break; break;
} }
DCHECK_EQ(kAssumeSpilling, assume_spilling); DCHECK_EQ(kAssumeSpilling, assume_spilling);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case kStack: case kStack:
new_value.kind = Entry::kStack; new_value.storage = Entry::kStack;
new_value.stack_offset = slot.offset(); new_value.stack_offset = slot.offset();
break; break;
} }
...@@ -405,6 +404,7 @@ class LiftoffCompiler { ...@@ -405,6 +404,7 @@ class LiftoffCompiler {
}; };
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>; using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
// For debugging, we need to spill registers before a trap or a stack check to // For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them. // be able to inspect them.
...@@ -412,7 +412,7 @@ class LiftoffCompiler { ...@@ -412,7 +412,7 @@ class LiftoffCompiler {
struct Entry { struct Entry {
int offset; int offset;
LiftoffRegister reg; LiftoffRegister reg;
ValueType type; ValueKind kind;
}; };
ZoneVector<Entry> entries; ZoneVector<Entry> entries;
...@@ -541,10 +541,10 @@ class LiftoffCompiler { ...@@ -541,10 +541,10 @@ class LiftoffCompiler {
return true; return true;
} }
bool CheckSupportedType(FullDecoder* decoder, ValueType type, bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
const char* context) { const char* context) {
LiftoffBailoutReason bailout_reason = kOtherReason; LiftoffBailoutReason bailout_reason = kOtherReason;
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kI64: case kI64:
case kF32: case kF32:
...@@ -568,7 +568,7 @@ class LiftoffCompiler { ...@@ -568,7 +568,7 @@ class LiftoffCompiler {
UNREACHABLE(); UNREACHABLE();
} }
EmbeddedVector<char, 128> buffer; EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", type.name().c_str(), context); SNPrintF(buffer, "%s %s", name(kind), context);
unsupported(decoder, bailout_reason, buffer.begin()); unsupported(decoder, bailout_reason, buffer.begin());
return false; return false;
} }
...@@ -599,27 +599,27 @@ class LiftoffCompiler { ...@@ -599,27 +599,27 @@ class LiftoffCompiler {
int num_locals = decoder->num_locals(); int num_locals = decoder->num_locals();
__ set_num_locals(num_locals); __ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) { for (int i = 0; i < num_locals; ++i) {
ValueType type = decoder->local_type(i); ValueKind kind = decoder->local_type(i).kind();
__ set_local_type(i, type); __ set_local_type(i, kind);
} }
} }
// Returns the number of inputs processed (1 or 2). // Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueType type, uint32_t input_idx) { uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
const bool needs_pair = needs_gp_reg_pair(type); const bool needs_pair = needs_gp_reg_pair(kind);
const ValueType reg_type = needs_pair ? kWasmI32 : type; const ValueKind reg_kind = needs_pair ? kI32 : kind;
const RegClass rc = reg_class_for(reg_type); const RegClass rc = reg_class_for(reg_kind);
auto LoadToReg = [this, reg_type, rc](compiler::LinkageLocation location, auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location,
LiftoffRegList pinned) { LiftoffRegList pinned) {
if (location.IsRegister()) { if (location.IsRegister()) {
DCHECK(!location.IsAnyRegister()); DCHECK(!location.IsAnyRegister());
return LiftoffRegister::from_external_code(rc, reg_type, return LiftoffRegister::from_external_code(rc, reg_kind,
location.AsRegister()); location.AsRegister());
} }
DCHECK(location.IsCallerFrameSlot()); DCHECK(location.IsCallerFrameSlot());
LiftoffRegister reg = __ GetUnusedRegister(rc, pinned); LiftoffRegister reg = __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_type); __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg; return reg;
}; };
...@@ -631,7 +631,7 @@ class LiftoffCompiler { ...@@ -631,7 +631,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(reg)); LiftoffRegList::ForRegs(reg));
reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp()); reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
} }
__ PushRegister(type, reg); __ PushRegister(kind, reg);
return needs_pair ? 2 : 1; return needs_pair ? 2 : 1;
} }
...@@ -680,8 +680,8 @@ class LiftoffCompiler { ...@@ -680,8 +680,8 @@ class LiftoffCompiler {
// because other types cannot be initialized to constants. // because other types cannot be initialized to constants.
for (uint32_t param_idx = num_params; param_idx < __ num_locals(); for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) { ++param_idx) {
ValueType type = decoder->local_type(param_idx); ValueKind kind = __ local_type(param_idx);
if (type != kWasmI32 && type != kWasmI64) return true; if (kind != kI32 && kind != kI64) return true;
} }
return false; return false;
} }
...@@ -749,16 +749,16 @@ class LiftoffCompiler { ...@@ -749,16 +749,16 @@ class LiftoffCompiler {
if (SpillLocalsInitially(decoder, num_params)) { if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals(); for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) { ++param_idx) {
ValueType type = decoder->local_type(param_idx); ValueKind kind = __ local_type(param_idx);
__ PushStack(type); __ PushStack(kind);
} }
int spill_size = __ TopSpillOffset() - params_size; int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size); __ FillStackSlotsWithZero(params_size, spill_size);
} else { } else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals(); for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) { ++param_idx) {
ValueType type = decoder->local_type(param_idx); ValueKind kind = __ local_type(param_idx);
__ PushConstant(type, int32_t{0}); __ PushConstant(kind, int32_t{0});
} }
} }
...@@ -767,14 +767,14 @@ class LiftoffCompiler { ...@@ -767,14 +767,14 @@ class LiftoffCompiler {
Register null_ref_reg = no_reg; Register null_ref_reg = no_reg;
for (uint32_t local_index = num_params; local_index < __ num_locals(); for (uint32_t local_index = num_params; local_index < __ num_locals();
++local_index) { ++local_index) {
ValueType type = decoder->local_type(local_index); ValueKind kind = __ local_type(local_index);
if (type.is_reference_type()) { if (is_reference_type(kind)) {
if (null_ref_reg == no_reg) { if (null_ref_reg == no_reg) {
null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp(); null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
LoadNullValue(null_ref_reg, {}); LoadNullValue(null_ref_reg, {});
} }
__ Spill(__ cache_state()->stack_state[local_index].offset(), __ Spill(__ cache_state()->stack_state[local_index].offset(),
LiftoffRegister(null_ref_reg), type); LiftoffRegister(null_ref_reg), kind);
} }
} }
} }
...@@ -821,8 +821,7 @@ class LiftoffCompiler { ...@@ -821,8 +821,7 @@ class LiftoffCompiler {
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(), __ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp()); new_number_of_calls.gp());
// Unary "unequal" means "different from zero". // Unary "unequal" means "different from zero".
__ emit_cond_jump(kUnequal, &no_tierup, kWasmI32, __ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
old_number_of_calls.gp());
TierUpFunction(decoder); TierUpFunction(decoder);
// After the runtime call, the instance cache register is clobbered (we // After the runtime call, the instance cache register is clobbered (we
// reset it already in {SpillAllRegisters} above, but then we still access // reset it already in {SpillAllRegisters} above, but then we still access
...@@ -868,7 +867,7 @@ class LiftoffCompiler { ...@@ -868,7 +867,7 @@ class LiftoffCompiler {
__ PushRegisters(ool->regs_to_save); __ PushRegisters(ool->regs_to_save);
} else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) { } else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
for (auto& entry : ool->spilled_registers->entries) { for (auto& entry : ool->spilled_registers->entries) {
__ Spill(entry.offset, entry.reg, entry.type); __ Spill(entry.offset, entry.reg, entry.kind);
} }
} }
...@@ -906,7 +905,7 @@ class LiftoffCompiler { ...@@ -906,7 +905,7 @@ class LiftoffCompiler {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) { if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
DCHECK(for_debugging_); DCHECK(for_debugging_);
for (auto& entry : ool->spilled_registers->entries) { for (auto& entry : ool->spilled_registers->entries) {
__ Fill(entry.reg, entry.offset, entry.type); __ Fill(entry.reg, entry.offset, entry.kind);
} }
} }
__ emit_jump(ool->continuation.get()); __ emit_jump(ool->continuation.get());
...@@ -978,12 +977,12 @@ class LiftoffCompiler { ...@@ -978,12 +977,12 @@ class LiftoffCompiler {
{}); {});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {}); __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
// Unary "unequal" means "not equals zero". // Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, &do_break, kWasmI32, flag); __ emit_cond_jump(kUnequal, &do_break, kI32, flag);
// Check if we should stop on "script entry". // Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {}); LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
// Unary "equal" means "equals zero". // Unary "equal" means "equals zero".
__ emit_cond_jump(kEqual, &no_break, kWasmI32, flag); __ emit_cond_jump(kEqual, &no_break, kI32, flag);
__ bind(&do_break); __ bind(&do_break);
EmitBreakpoint(decoder); EmitBreakpoint(decoder);
...@@ -1083,8 +1082,7 @@ class LiftoffCompiler { ...@@ -1083,8 +1082,7 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero. // Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp(); Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32, __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
value);
// Store the state (after popping the value) for executing the else branch. // Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state()); if_block->else_state->state.Split(*__ cache_state());
...@@ -1153,8 +1151,8 @@ class LiftoffCompiler { ...@@ -1153,8 +1151,8 @@ class LiftoffCompiler {
void EndControl(FullDecoder* decoder, Control* c) {} void EndControl(FullDecoder* decoder, Control* c) {}
void GenerateCCall(const LiftoffRegister* result_regs, const FunctionSig* sig, void GenerateCCall(const LiftoffRegister* result_regs,
ValueType out_argument_type, const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs, const LiftoffRegister* arg_regs,
ExternalReference ext_ref) { ExternalReference ext_ref) {
// Before making a call, spill all cache registers. // Before making a call, spill all cache registers.
...@@ -1162,14 +1160,13 @@ class LiftoffCompiler { ...@@ -1162,14 +1160,13 @@ class LiftoffCompiler {
// Store arguments on our stack, then align the stack for calling to C. // Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0; int param_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueKind param_kind : sig->parameters()) {
param_bytes += param_type.element_size_bytes(); param_bytes += element_size_bytes(param_kind);
} }
int out_arg_bytes = out_argument_type == kWasmStmt int out_arg_bytes =
? 0 out_argument_kind == kStmt ? 0 : element_size_bytes(out_argument_kind);
: out_argument_type.element_size_bytes();
int stack_bytes = std::max(param_bytes, out_arg_bytes); int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes, __ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref); ext_ref);
} }
...@@ -1219,38 +1216,38 @@ class LiftoffCompiler { ...@@ -1219,38 +1216,38 @@ class LiftoffCompiler {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...); CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
} }
template <ValueKind src_type, ValueKind result_type, class EmitFn> template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
void EmitUnOp(EmitFn fn) { void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_type); constexpr RegClass src_rc = reg_class_for(src_kind);
constexpr RegClass result_rc = reg_class_for(result_type); constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src = __ PopToRegister(); LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src}, {}) ? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {}); : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src); CallEmitFn(fn, dst, src);
__ PushRegister(ValueType::Primitive(result_type), dst); __ PushRegister(result_kind, dst);
} }
template <ValueKind type> template <ValueKind kind>
void EmitFloatUnOpWithCFallback( void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister), bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) { ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) { auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return; if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn(); ExternalReference ext_ref = fallback_fn();
ValueType sig_reps[] = {ValueType::Primitive(type)}; ValueKind sig_reps[] = {kind};
FunctionSig sig(0, 1, sig_reps); ValueKindSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref); GenerateCCall(&dst, &sig, kind, &src, ext_ref);
}; };
EmitUnOp<type, type>(emit_with_c_fallback); EmitUnOp<kind, kind>(emit_with_c_fallback);
} }
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false }; enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
template <ValueKind dst_type, ValueKind src_type, template <ValueKind dst_type, ValueKind src_kind,
TypeConversionTrapping can_trap> TypeConversionTrapping can_trap>
void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(), void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
WasmCodePosition trap_position) { WasmCodePosition trap_position) {
static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass dst_rc = reg_class_for(dst_type); static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister(); LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc LiftoffRegister dst = src_rc == dst_rc
...@@ -1266,22 +1263,20 @@ class LiftoffCompiler { ...@@ -1266,22 +1263,20 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn(); ExternalReference ext_ref = fallback_fn();
if (can_trap) { if (can_trap) {
// External references for potentially trapping conversions return int. // External references for potentially trapping conversions return int.
ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)}; ValueKind sig_reps[] = {kI32, src_kind};
FunctionSig sig(1, 1, sig_reps); ValueKindSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg = LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)); __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst}; LiftoffRegister dst_regs[] = {ret_reg, dst};
GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src, GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
ext_ref); __ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
__ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
} else { } else {
ValueType sig_reps[] = {ValueType::Primitive(src_type)}; ValueKind sig_reps[] = {src_kind};
FunctionSig sig(0, 1, sig_reps); ValueKindSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src, GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
ext_ref);
} }
} }
__ PushRegister(ValueType::Primitive(dst_type), dst); __ PushRegister(dst_type, dst);
} }
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value, void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
...@@ -1292,16 +1287,16 @@ class LiftoffCompiler { ...@@ -1292,16 +1287,16 @@ class LiftoffCompiler {
#define CASE_I64_UNOP(opcode, fn) \ #define CASE_I64_UNOP(opcode, fn) \
case kExpr##opcode: \ case kExpr##opcode: \
return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn); return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP(opcode, type, fn) \ #define CASE_FLOAT_UNOP(opcode, kind, fn) \
case kExpr##opcode: \ case kExpr##opcode: \
return EmitUnOp<k##type, k##type>(&LiftoffAssembler::emit_##fn); return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \ #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \
case kExpr##opcode: \ case kExpr##opcode: \
return EmitFloatUnOpWithCFallback<k##type>(&LiftoffAssembler::emit_##fn, \ return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn); &ExternalReference::wasm_##fn);
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \ #define CASE_TYPE_CONVERSION(opcode, dst_type, src_kind, ext_ref, can_trap) \
case kExpr##opcode: \ case kExpr##opcode: \
return EmitTypeConversion<k##dst_type, k##src_type, can_trap>( \ return EmitTypeConversion<k##dst_type, k##src_kind, can_trap>( \
kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);
switch (opcode) { switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz) CASE_I32_UNOP(I32Clz, i32_clz)
...@@ -1390,9 +1385,9 @@ class LiftoffCompiler { ...@@ -1390,9 +1385,9 @@ class LiftoffCompiler {
return EmitUnOp<kI32, kI32>( return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) { [=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return; if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32}; ValueKind sig_i_i_reps[] = {kI32, kI32};
FunctionSig sig_i_i(1, 1, sig_i_i_reps); ValueKindSig sig_i_i(1, 1, sig_i_i_reps);
GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, GenerateCCall(&dst, &sig_i_i, kStmt, &src,
ExternalReference::wasm_word32_popcnt()); ExternalReference::wasm_word32_popcnt());
}); });
case kExprI64Popcnt: case kExprI64Popcnt:
...@@ -1400,10 +1395,10 @@ class LiftoffCompiler { ...@@ -1400,10 +1395,10 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister src) { [=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return; if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later. // The c function returns i32. We will zero-extend later.
ValueType sig_i_l_reps[] = {kWasmI32, kWasmI64}; ValueKind sig_i_l_reps[] = {kI32, kI64};
FunctionSig sig_i_l(1, 1, sig_i_l_reps); ValueKindSig sig_i_l(1, 1, sig_i_l_reps);
LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst; LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
GenerateCCall(&c_call_dst, &sig_i_l, kWasmStmt, &src, GenerateCCall(&c_call_dst, &sig_i_l, kStmt, &src,
ExternalReference::wasm_word64_popcnt()); ExternalReference::wasm_word64_popcnt());
// Now zero-extend the result to i64. // Now zero-extend the result to i64.
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst, __ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
...@@ -1422,7 +1417,7 @@ class LiftoffCompiler { ...@@ -1422,7 +1417,7 @@ class LiftoffCompiler {
// of the comparison. // of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {}); LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null); __ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
__ PushRegister(kWasmI32, dst); __ PushRegister(kI32, dst);
return; return;
} }
default: default:
...@@ -1435,11 +1430,11 @@ class LiftoffCompiler { ...@@ -1435,11 +1430,11 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION #undef CASE_TYPE_CONVERSION
} }
template <ValueKind src_type, ValueKind result_type, typename EmitFn, template <ValueKind src_kind, ValueKind result_kind, typename EmitFn,
typename EmitFnImm> typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) { void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_type); static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back(); LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate. // Check if the RHS is an immediate.
...@@ -1456,18 +1451,18 @@ class LiftoffCompiler { ...@@ -1456,18 +1451,18 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, pinned); : __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm); CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType::Primitive(result_type), dst); __ PushRegister(result_kind, dst);
} else { } else {
// The RHS was not an immediate. // The RHS was not an immediate.
EmitBinOp<src_type, result_type>(fn); EmitBinOp<src_kind, result_kind>(fn);
} }
} }
template <ValueKind src_type, ValueKind result_type, template <ValueKind src_kind, ValueKind result_kind,
bool swap_lhs_rhs = false, typename EmitFn> bool swap_lhs_rhs = false, typename EmitFn>
void EmitBinOp(EmitFn fn) { void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_type); static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister rhs = __ PopToRegister(); LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)); LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc LiftoffRegister dst = src_rc == result_rc
...@@ -1477,7 +1472,7 @@ class LiftoffCompiler { ...@@ -1477,7 +1472,7 @@ class LiftoffCompiler {
if (swap_lhs_rhs) std::swap(lhs, rhs); if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs); CallEmitFn(fn, dst, lhs, rhs);
__ PushRegister(ValueType::Primitive(result_type), dst); __ PushRegister(result_kind, dst);
} }
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs, void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
...@@ -1491,16 +1486,15 @@ class LiftoffCompiler { ...@@ -1491,16 +1486,15 @@ class LiftoffCompiler {
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret)); __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs}; LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst}; LiftoffRegister result_regs[] = {ret, dst};
ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64}; ValueKind sig_kinds[] = {kI32, kI64, kI64};
// <i64, i64> -> i32 (with i64 output argument) // <i64, i64> -> i32 (with i64 output argument)
FunctionSig sig(1, 2, sig_types); ValueKindSig sig(1, 2, sig_kinds);
GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref); GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0})); __ LoadConstant(tmp, WasmValue(int32_t{0}));
__ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp()); __ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
if (trap_unrepresentable) { if (trap_unrepresentable) {
__ LoadConstant(tmp, WasmValue(int32_t{-1})); __ LoadConstant(tmp, WasmValue(int32_t{-1}));
__ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(), __ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp());
tmp.gp());
} }
} }
...@@ -1527,17 +1521,17 @@ class LiftoffCompiler { ...@@ -1527,17 +1521,17 @@ class LiftoffCompiler {
amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \ amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
}, \ }, \
&LiftoffAssembler::emit_##fn##i); &LiftoffAssembler::emit_##fn##i);
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \ #define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \
case kExpr##opcode: \ case kExpr##opcode: \
return EmitBinOp<k##type, k##type>( \ return EmitBinOp<k##kind, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \ LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \ auto ext_ref = ExternalReference::ext_ref_fn(); \
ValueType sig_reps[] = {kWasm##type, kWasm##type, kWasm##type}; \ ValueKind sig_reps[] = {k##kind, k##kind, k##kind}; \
const bool out_via_stack = kWasm##type == kWasmI64; \ const bool out_via_stack = k##kind == kI64; \
FunctionSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \ ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
ValueType out_arg_type = out_via_stack ? kWasmI64 : kWasmStmt; \ ValueKind out_arg_kind = out_via_stack ? kI64 : kStmt; \
GenerateCCall(&dst, &sig, out_arg_type, args, ext_ref); \ GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
}); });
switch (opcode) { switch (opcode) {
case kExprI32Add: case kExprI32Add:
...@@ -1806,7 +1800,7 @@ class LiftoffCompiler { ...@@ -1806,7 +1800,7 @@ class LiftoffCompiler {
} }
void I32Const(FullDecoder* decoder, Value* result, int32_t value) { void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
__ PushConstant(kWasmI32, value); __ PushConstant(kI32, value);
} }
void I64Const(FullDecoder* decoder, Value* result, int64_t value) { void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
...@@ -1816,24 +1810,24 @@ class LiftoffCompiler { ...@@ -1816,24 +1810,24 @@ class LiftoffCompiler {
// a register immediately. // a register immediately.
int32_t value_i32 = static_cast<int32_t>(value); int32_t value_i32 = static_cast<int32_t>(value);
if (value_i32 == value) { if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32); __ PushConstant(kI64, value_i32);
} else { } else {
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {}); LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {});
__ LoadConstant(reg, WasmValue(value)); __ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg); __ PushRegister(kI64, reg);
} }
} }
void F32Const(FullDecoder* decoder, Value* result, float value) { void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value)); __ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg); __ PushRegister(kF32, reg);
} }
void F64Const(FullDecoder* decoder, Value* result, double value) { void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value)); __ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg); __ PushRegister(kF64, reg);
} }
void RefNull(FullDecoder* decoder, ValueType type, Value*) { void RefNull(FullDecoder* decoder, ValueType type, Value*) {
...@@ -1843,34 +1837,29 @@ class LiftoffCompiler { ...@@ -1843,34 +1837,29 @@ class LiftoffCompiler {
} }
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {}); LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {}); LoadNullValue(null.gp(), {});
__ PushRegister(type, null); __ PushRegister(type.kind(), null);
} }
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) { void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc; WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc;
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_);
HeapType heap_type( ValueKind sig_reps[] = {kRef, kI32};
decoder->enabled_.has_typed_funcref() ValueKindSig sig(1, 1, sig_reps);
? decoder->module_->functions[function_index].sig_index
: HeapType::kFunc);
ValueType func_type = ValueType::Ref(heap_type, kNonNullable);
ValueType sig_reps[] = {func_type, kWasmI32};
FunctionSig sig(1, 1, sig_reps);
LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {}); LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(func_index_reg, WasmValue(function_index)); __ LoadConstant(func_index_reg, WasmValue(function_index));
LiftoffAssembler::VarState func_index_var(kWasmI32, func_index_reg, 0); LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {func_index_var}); __ PrepareBuiltinCall(&sig, call_descriptor, {func_index_var});
__ CallRuntimeStub(target); __ CallRuntimeStub(target);
DefineSafepoint(); DefineSafepoint();
__ PushRegister(func_type, LiftoffRegister(kReturnRegister0)); __ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
} }
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) { void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type); MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
__ PushRegister(ValueType::Ref(arg.type.heap_type(), kNonNullable), obj); __ PushRegister(kRef, obj);
} }
void Drop(FullDecoder* decoder) { __ DropValues(1); } void Drop(FullDecoder* decoder) { __ DropValues(1); }
...@@ -1888,11 +1877,11 @@ class LiftoffCompiler { ...@@ -1888,11 +1877,11 @@ class LiftoffCompiler {
// are not handled yet. // are not handled yet.
size_t num_returns = decoder->sig_->return_count(); size_t num_returns = decoder->sig_->return_count();
if (num_returns == 1) { if (num_returns == 1) {
ValueType return_type = decoder->sig_->GetReturn(0); ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
LiftoffRegister return_reg = LiftoffRegister return_reg =
__ LoadToRegister(__ cache_state()->stack_state.back(), pinned); __ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
__ Store(info.gp(), no_reg, 0, return_reg, __ Store(info.gp(), no_reg, 0, return_reg,
StoreType::ForValueType(return_type), pinned); StoreType::ForValueKind(return_kind), pinned);
} }
// Put the parameter in its place. // Put the parameter in its place.
WasmTraceExitDescriptor descriptor; WasmTraceExitDescriptor descriptor;
...@@ -1900,7 +1889,7 @@ class LiftoffCompiler { ...@@ -1900,7 +1889,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0); Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) { if (info.gp() != param_reg) {
__ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr); __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
} }
source_position_table_builder_.AddPosition( source_position_table_builder_.AddPosition(
...@@ -1925,7 +1914,7 @@ class LiftoffCompiler { ...@@ -1925,7 +1914,7 @@ class LiftoffCompiler {
const LocalIndexImmediate<validate>& imm) { const LocalIndexImmediate<validate>& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index]; auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back( __ cache_state()->stack_state.emplace_back(
local_slot.type(), __ NextSpillOffset(local_slot.type())); local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
auto* slot = &__ cache_state()->stack_state.back(); auto* slot = &__ cache_state()->stack_state.back();
if (local_slot.is_reg()) { if (local_slot.is_reg()) {
__ cache_state()->inc_used(local_slot.reg()); __ cache_state()->inc_used(local_slot.reg());
...@@ -1934,11 +1923,11 @@ class LiftoffCompiler { ...@@ -1934,11 +1923,11 @@ class LiftoffCompiler {
slot->MakeConstant(local_slot.i32_const()); slot->MakeConstant(local_slot.i32_const());
} else { } else {
DCHECK(local_slot.is_stack()); DCHECK(local_slot.is_stack());
auto rc = reg_class_for(local_slot.type()); auto rc = reg_class_for(local_slot.kind());
LiftoffRegister reg = __ GetUnusedRegister(rc, {}); LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ cache_state()->inc_used(reg); __ cache_state()->inc_used(reg);
slot->MakeRegister(reg); slot->MakeRegister(reg);
__ Fill(reg, local_slot.offset(), local_slot.type()); __ Fill(reg, local_slot.offset(), local_slot.kind());
} }
} }
...@@ -1946,21 +1935,21 @@ class LiftoffCompiler { ...@@ -1946,21 +1935,21 @@ class LiftoffCompiler {
uint32_t local_index) { uint32_t local_index) {
auto& state = *__ cache_state(); auto& state = *__ cache_state();
auto& src_slot = state.stack_state.back(); auto& src_slot = state.stack_state.back();
ValueType type = dst_slot->type(); ValueKind kind = dst_slot->kind();
if (dst_slot->is_reg()) { if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg(); LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) { if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(), src_slot.offset(), type); __ Fill(dst_slot->reg(), src_slot.offset(), kind);
return; return;
} }
state.dec_used(slot_reg); state.dec_used(slot_reg);
dst_slot->MakeStack(); dst_slot->MakeStack();
} }
DCHECK_EQ(type, __ local_type(local_index)); DCHECK_EQ(kind, __ local_type(local_index));
RegClass rc = reg_class_for(type); RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {}); LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type); __ Fill(dst_reg, src_slot.offset(), kind);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset()); *dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg); __ cache_state()->inc_used(dst_reg);
} }
...@@ -2055,11 +2044,12 @@ class LiftoffCompiler { ...@@ -2055,11 +2044,12 @@ class LiftoffCompiler {
void GlobalGet(FullDecoder* decoder, Value* result, void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) { const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index]; const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, global->type, "global")) { ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
return; return;
} }
if (global->type.is_reference_type()) { if (is_reference_type(kind)) {
if (global->mutability && global->imported) { if (global->mutability && global->imported) {
LiftoffRegList pinned; LiftoffRegList pinned;
Register base = no_reg; Register base = no_reg;
...@@ -2067,7 +2057,7 @@ class LiftoffCompiler { ...@@ -2067,7 +2057,7 @@ class LiftoffCompiler {
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned, GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
&base, &offset); &base, &offset);
__ LoadTaggedPointer(base, base, offset, 0, pinned); __ LoadTaggedPointer(base, base, offset, 0, pinned);
__ PushRegister(global->type, LiftoffRegister(base)); __ PushRegister(kind, LiftoffRegister(base));
return; return;
} }
...@@ -2081,27 +2071,28 @@ class LiftoffCompiler { ...@@ -2081,27 +2071,28 @@ class LiftoffCompiler {
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset), imm.global->offset),
pinned); pinned);
__ PushRegister(global->type, LiftoffRegister(value)); __ PushRegister(kind, LiftoffRegister(value));
return; return;
} }
LiftoffRegList pinned; LiftoffRegList pinned;
uint32_t offset = 0; uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset); Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value = LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned)); pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
LoadType type = LoadType::ForValueType(global->type); LoadType type = LoadType::ForValueKind(kind);
__ Load(value, addr, no_reg, offset, type, pinned, nullptr, true); __ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
__ PushRegister(global->type, value); __ PushRegister(kind, value);
} }
void GlobalSet(FullDecoder* decoder, const Value& value, void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) { const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index]; auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, global->type, "global")) { ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
return; return;
} }
if (global->type.is_reference_type()) { if (is_reference_type(kind)) {
if (global->mutability && global->imported) { if (global->mutability && global->imported) {
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
...@@ -2129,7 +2120,7 @@ class LiftoffCompiler { ...@@ -2129,7 +2120,7 @@ class LiftoffCompiler {
uint32_t offset = 0; uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset); Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned)); LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type); StoreType type = StoreType::ForValueKind(kind);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true); __ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
} }
...@@ -2149,9 +2140,9 @@ class LiftoffCompiler { ...@@ -2149,9 +2140,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
ValueType result_type = env_->module->tables[imm.index].type; ValueKind result_kind = env_->module->tables[imm.index].type.kind();
ValueType sig_reps[] = {result_type, kWasmI32, kWasmI32}; ValueKind sig_reps[] = {result_kind, kI32, kI32};
FunctionSig sig(1, 2, sig_reps); ValueKindSig sig(1, 2, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index}); __ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
__ CallRuntimeStub(target); __ CallRuntimeStub(target);
...@@ -2162,7 +2153,7 @@ class LiftoffCompiler { ...@@ -2162,7 +2153,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ PushRegister(result_type, LiftoffRegister(kReturnRegister0)); __ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
} }
void TableSet(FullDecoder* decoder, const Value&, const Value&, void TableSet(FullDecoder* decoder, const Value&, const Value&,
...@@ -2182,9 +2173,9 @@ class LiftoffCompiler { ...@@ -2182,9 +2173,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmI32, ValueKind table_kind = env_->module->tables[imm.index].type.kind();
env_->module->tables[imm.index].type}; ValueKind sig_reps[] = {kI32, kI32, table_kind};
FunctionSig sig(0, 3, sig_reps); ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value}); __ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
__ CallRuntimeStub(target); __ CallRuntimeStub(target);
...@@ -2211,25 +2202,25 @@ class LiftoffCompiler { ...@@ -2211,25 +2202,25 @@ class LiftoffCompiler {
const Value& tval, Value* result) { const Value& tval, Value* result) {
LiftoffRegList pinned; LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp(); Register condition = pinned.set(__ PopToRegister()).gp();
ValueType type = __ cache_state()->stack_state.end()[-1].type(); ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type()); DCHECK_EQ(kind, __ cache_state()->stack_state.end()[-2].kind());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned)); LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned); LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(), LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {}); {true_value, false_value}, {});
if (!__ emit_select(dst, condition, true_value, false_value, type)) { if (!__ emit_select(dst, condition, true_value, false_value, kind)) {
// Emit generic code (using branches) instead. // Emit generic code (using branches) instead.
Label cont; Label cont;
Label case_false; Label case_false;
__ emit_cond_jump(kEqual, &case_false, kWasmI32, condition); __ emit_cond_jump(kEqual, &case_false, kI32, condition);
if (dst != true_value) __ Move(dst, true_value, type); if (dst != true_value) __ Move(dst, true_value, kind);
__ emit_jump(&cont); __ emit_jump(&cont);
__ bind(&case_false); __ bind(&case_false);
if (dst != false_value) __ Move(dst, false_value, type); if (dst != false_value) __ Move(dst, false_value, kind);
__ bind(&cont); __ bind(&cont);
} }
__ PushRegister(type, dst); __ PushRegister(kind, dst);
} }
void BrImpl(Control* target) { void BrImpl(Control* target) {
...@@ -2264,17 +2255,17 @@ class LiftoffCompiler { ...@@ -2264,17 +2255,17 @@ class LiftoffCompiler {
if (!has_outstanding_op()) { if (!has_outstanding_op()) {
// Unary "equal" means "equals zero". // Unary "equal" means "equals zero".
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value); __ emit_cond_jump(kEqual, &cont_false, kI32, value);
} else if (outstanding_op_ == kExprI32Eqz) { } else if (outstanding_op_ == kExprI32Eqz) {
// Unary "unequal" means "not equals zero". // Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, &cont_false, kWasmI32, value); __ emit_cond_jump(kUnequal, &cont_false, kI32, value);
outstanding_op_ = kNoOutstandingOp; outstanding_op_ = kNoOutstandingOp;
} else { } else {
// Otherwise, it's an i32 compare opcode. // Otherwise, it's an i32 compare opcode.
LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_)); LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
Register rhs = value; Register rhs = value;
Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp(); Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
__ emit_cond_jump(cond, &cont_false, kWasmI32, lhs, rhs); __ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
outstanding_op_ = kNoOutstandingOp; outstanding_op_ = kNoOutstandingOp;
} }
...@@ -2312,7 +2303,7 @@ class LiftoffCompiler { ...@@ -2312,7 +2303,7 @@ class LiftoffCompiler {
uint32_t split = min + (max - min) / 2; uint32_t split = min + (max - min) / 2;
Label upper_half; Label upper_half;
__ LoadConstant(tmp, WasmValue(split)); __ LoadConstant(tmp, WasmValue(split));
__ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(), __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(),
tmp.gp()); tmp.gp());
// Emit br table for lower half: // Emit br table for lower half:
GenerateBrTable(decoder, tmp, value, min, split, table_iterator, GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
...@@ -2336,8 +2327,8 @@ class LiftoffCompiler { ...@@ -2336,8 +2327,8 @@ class LiftoffCompiler {
LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned); LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
__ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count})); __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
Label case_default; Label case_default;
__ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32, __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(),
value.gp(), tmp.gp()); tmp.gp());
GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator, GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
&br_targets); &br_targets);
...@@ -2375,7 +2366,7 @@ class LiftoffCompiler { ...@@ -2375,7 +2366,7 @@ class LiftoffCompiler {
auto& slot = __ cache_state()->stack_state[i]; auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue; if (!slot.is_reg()) continue;
spilled->entries.push_back(SpilledRegistersForInspection::Entry{ spilled->entries.push_back(SpilledRegistersForInspection::Entry{
slot.offset(), slot.reg(), slot.type()}); slot.offset(), slot.reg(), slot.kind()});
__ RecordUsedSpillOffset(slot.offset()); __ RecordUsedSpillOffset(slot.offset());
} }
return spilled; return spilled;
...@@ -2455,7 +2446,7 @@ class LiftoffCompiler { ...@@ -2455,7 +2446,7 @@ class LiftoffCompiler {
} else if (kSystemPointerSize == kInt32Size) { } else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size); DCHECK_GE(kMaxUInt32, env_->max_memory_size);
// Unary "unequal" means "not equals zero". // Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, index.high_gp()); __ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
} }
uintptr_t end_offset = offset + access_size - 1u; uintptr_t end_offset = offset + access_size - 1u;
...@@ -2503,12 +2494,12 @@ class LiftoffCompiler { ...@@ -2503,12 +2494,12 @@ class LiftoffCompiler {
// {emit_cond_jump} to use the "test" instruction without the "and" here. // {emit_cond_jump} to use the "test" instruction without the "and" here.
// Then we can also avoid using the temp register here. // Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask); __ emit_i32_andi(address, index, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address); __ emit_cond_jump(kUnequal, trap_label, kI32, address);
} else { } else {
// For alignment checks we only look at the lower 32-bits in {offset}. // For alignment checks we only look at the lower 32-bits in {offset}.
__ emit_i32_addi(address, index, static_cast<uint32_t>(offset)); __ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
__ emit_i32_andi(address, address, align_mask); __ emit_i32_andi(address, address, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address); __ emit_cond_jump(kUnequal, trap_label, kI32, address);
} }
} }
...@@ -2558,7 +2549,7 @@ class LiftoffCompiler { ...@@ -2558,7 +2549,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0); Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) { if (info.gp() != param_reg) {
__ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr); __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
} }
source_position_table_builder_.AddPosition(__ pc_offset(), source_position_table_builder_.AddPosition(__ pc_offset(),
...@@ -2601,8 +2592,8 @@ class LiftoffCompiler { ...@@ -2601,8 +2592,8 @@ class LiftoffCompiler {
void LoadMem(FullDecoder* decoder, LoadType type, void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm, const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) { const Value& index_val, Value* result) {
ValueType value_type = type.value_type(); ValueKind kind = type.value_type().kind();
if (!CheckSupportedType(decoder, value_type, "load")) return; if (!CheckSupportedType(decoder, kind, "load")) return;
LiftoffRegister full_index = __ PopToRegister(); LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset, Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDontForceCheck); full_index, {}, kDontForceCheck);
...@@ -2614,7 +2605,7 @@ class LiftoffCompiler { ...@@ -2614,7 +2605,7 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("load from memory"); DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
RegClass rc = reg_class_for(value_type); RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0; uint32_t protected_load_pc = 0;
__ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true); __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
...@@ -2623,7 +2614,7 @@ class LiftoffCompiler { ...@@ -2623,7 +2614,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc); protected_load_pc);
} }
__ PushRegister(value_type, value); __ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) { if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index, TraceMemoryOperation(false, type.mem_type().representation(), index,
...@@ -2637,7 +2628,7 @@ class LiftoffCompiler { ...@@ -2637,7 +2628,7 @@ class LiftoffCompiler {
const Value& index_val, Value* result) { const Value& index_val, Value* result) {
// LoadTransform requires SIMD support, so check for it here. If // LoadTransform requires SIMD support, so check for it here. If
// unsupported, bailout and let TurboFan lower the code. // unsupported, bailout and let TurboFan lower the code.
if (!CheckSupportedType(decoder, kWasmS128, "LoadTransform")) { if (!CheckSupportedType(decoder, kS128, "LoadTransform")) {
return; return;
} }
...@@ -2667,7 +2658,7 @@ class LiftoffCompiler { ...@@ -2667,7 +2658,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc); protected_load_pc);
} }
__ PushRegister(ValueType::Primitive(kS128), value); __ PushRegister(kS128, value);
if (FLAG_trace_wasm_memory) { if (FLAG_trace_wasm_memory) {
// Again load extend is different. // Again load extend is different.
...@@ -2682,7 +2673,7 @@ class LiftoffCompiler { ...@@ -2682,7 +2673,7 @@ class LiftoffCompiler {
void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value, void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
const Value& _index, const MemoryAccessImmediate<validate>& imm, const Value& _index, const MemoryAccessImmediate<validate>& imm,
const uint8_t laneidx, Value* _result) { const uint8_t laneidx, Value* _result) {
if (!CheckSupportedType(decoder, kWasmS128, "LoadLane")) { if (!CheckSupportedType(decoder, kS128, "LoadLane")) {
return; return;
} }
...@@ -2710,7 +2701,7 @@ class LiftoffCompiler { ...@@ -2710,7 +2701,7 @@ class LiftoffCompiler {
protected_load_pc); protected_load_pc);
} }
__ PushRegister(ValueType::Primitive(kS128), result); __ PushRegister(kS128, result);
if (FLAG_trace_wasm_memory) { if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index, TraceMemoryOperation(false, type.mem_type().representation(), index,
...@@ -2721,8 +2712,8 @@ class LiftoffCompiler { ...@@ -2721,8 +2712,8 @@ class LiftoffCompiler {
void StoreMem(FullDecoder* decoder, StoreType type, void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) { const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type(); ValueKind kind = type.value_type().kind();
if (!CheckSupportedType(decoder, value_type, "store")) return; if (!CheckSupportedType(decoder, kind, "store")) return;
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister()); LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned); LiftoffRegister full_index = __ PopToRegister(pinned);
...@@ -2755,7 +2746,7 @@ class LiftoffCompiler { ...@@ -2755,7 +2746,7 @@ class LiftoffCompiler {
void StoreLane(FullDecoder* decoder, StoreType type, void StoreLane(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const MemoryAccessImmediate<validate>& imm,
const Value& _index, const Value& _value, const uint8_t lane) { const Value& _index, const Value& _value, const uint8_t lane) {
if (!CheckSupportedType(decoder, kWasmS128, "StoreLane")) return; if (!CheckSupportedType(decoder, kS128, "StoreLane")) return;
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister()); LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned); LiftoffRegister full_index = __ PopToRegister(pinned);
...@@ -2794,7 +2785,7 @@ class LiftoffCompiler { ...@@ -2794,7 +2785,7 @@ class LiftoffCompiler {
__ LoadConstant(high_word, WasmValue{uint32_t{0}}); __ LoadConstant(high_word, WasmValue{uint32_t{0}});
result = LiftoffRegister::ForPair(mem_size, high_word.gp()); result = LiftoffRegister::ForPair(mem_size, high_word.gp());
} }
__ PushRegister(env_->module->is_memory64 ? kWasmI64 : kWasmI32, result); __ PushRegister(env_->module->is_memory64 ? kI64 : kI32, result);
} }
void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) { void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
...@@ -2812,20 +2803,20 @@ class LiftoffCompiler { ...@@ -2812,20 +2803,20 @@ class LiftoffCompiler {
WasmMemoryGrowDescriptor descriptor; WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount()); DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
DCHECK_EQ(kWasmI32.machine_type(), descriptor.GetParameterType(0)); DCHECK_EQ(machine_type(kI32), descriptor.GetParameterType(0));
Register param_reg = descriptor.GetRegisterParameter(0); Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32); if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow); __ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
DefineSafepoint(); DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
if (kReturnRegister0 != result.gp()) { if (kReturnRegister0 != result.gp()) {
__ Move(result.gp(), kReturnRegister0, kWasmI32); __ Move(result.gp(), kReturnRegister0, kI32);
} }
__ PushRegister(kWasmI32, result); __ PushRegister(kI32, result);
} }
void RegisterDebugSideTableEntry( void RegisterDebugSideTableEntry(
...@@ -2894,18 +2885,18 @@ class LiftoffCompiler { ...@@ -2894,18 +2885,18 @@ class LiftoffCompiler {
LiftoffRegister ref = pinned.set(__ PopToRegister(pinned)); LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
Register null = __ GetUnusedRegister(kGpReg, pinned).gp(); Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
LoadNullValue(null, pinned); LoadNullValue(null, pinned);
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type, ref.gp(), null); __ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
null);
BrOrRet(decoder, depth); BrOrRet(decoder, depth);
__ bind(&cont_false); __ bind(&cont_false);
__ PushRegister(ValueType::Ref(ref_object.type.heap_type(), kNonNullable), __ PushRegister(kRef, ref);
ref);
} }
template <ValueKind src_type, ValueKind result_type, typename EmitFn> template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitTerOp(EmitFn fn) { void EmitTerOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_type); static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src3 = __ PopToRegister(); LiftoffRegister src3 = __ PopToRegister();
LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3)); LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
LiftoffRegister src1 = LiftoffRegister src1 =
...@@ -2918,7 +2909,7 @@ class LiftoffCompiler { ...@@ -2918,7 +2909,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src1, src2)) LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc, {}); : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3); CallEmitFn(fn, dst, src1, src2, src3);
__ PushRegister(ValueType::Primitive(result_type), dst); __ PushRegister(result_kind, dst);
} }
template <typename EmitFn, typename EmitFnImm> template <typename EmitFn, typename EmitFnImm>
...@@ -2935,30 +2926,30 @@ class LiftoffCompiler { ...@@ -2935,30 +2926,30 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm); CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst); __ PushRegister(kS128, dst);
} else { } else {
LiftoffRegister count = __ PopToRegister(); LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister(); LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count); CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst); __ PushRegister(kS128, dst);
} }
} }
void EmitSimdFloatRoundingOpWithCFallback( void EmitSimdFloatRoundingOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister), bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
ExternalReference (*ext_ref)()) { ExternalReference (*ext_ref)()) {
static constexpr RegClass rc = reg_class_for(kWasmS128); static constexpr RegClass rc = reg_class_for(kS128);
LiftoffRegister src = __ PopToRegister(); LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {}); LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {});
if (!(asm_.*emit_fn)(dst, src)) { if (!(asm_.*emit_fn)(dst, src)) {
// Return v128 via stack for ARM. // Return v128 via stack for ARM.
ValueType sig_v_s_reps[] = {kWasmS128}; ValueKind sig_v_s_reps[] = {kS128};
FunctionSig sig_v_s(0, 1, sig_v_s_reps); ValueKindSig sig_v_s(0, 1, sig_v_s_reps);
GenerateCCall(&dst, &sig_v_s, kWasmS128, &src, ext_ref()); GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
} }
__ PushRegister(kWasmS128, dst); __ PushRegister(kS128, dst);
} }
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
...@@ -3438,17 +3429,17 @@ class LiftoffCompiler { ...@@ -3438,17 +3429,17 @@ class LiftoffCompiler {
} }
} }
template <ValueKind src_type, ValueKind result_type, typename EmitFn> template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitSimdExtractLaneOp(EmitFn fn, void EmitSimdExtractLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) { const SimdLaneImmediate<validate>& imm) {
static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_type); static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister lhs = __ PopToRegister(); LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, {}) ? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {}); : __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane); fn(dst, lhs, imm.lane);
__ PushRegister(ValueType::Primitive(result_type), dst); __ PushRegister(result_kind, dst);
} }
template <ValueKind src2_type, typename EmitFn> template <ValueKind src2_type, typename EmitFn>
...@@ -3474,7 +3465,7 @@ class LiftoffCompiler { ...@@ -3474,7 +3465,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src2)) LiftoffRegList::ForRegs(src2))
: __ GetUnusedRegister(result_rc, {src1}, {}); : __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane); fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst); __ PushRegister(kS128, dst);
} }
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode, void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
...@@ -3484,9 +3475,9 @@ class LiftoffCompiler { ...@@ -3484,9 +3475,9 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd"); return unsupported(decoder, kSimd, "simd");
} }
switch (opcode) { switch (opcode) {
#define CASE_SIMD_EXTRACT_LANE_OP(opcode, type, fn) \ #define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \ case wasm::kExpr##opcode: \
EmitSimdExtractLaneOp<kS128, k##type>( \ EmitSimdExtractLaneOp<kS128, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \ [=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
__ emit_##fn(dst, lhs, imm_lane_idx); \ __ emit_##fn(dst, lhs, imm_lane_idx); \
}, \ }, \
...@@ -3501,9 +3492,9 @@ class LiftoffCompiler { ...@@ -3501,9 +3492,9 @@ class LiftoffCompiler {
CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane) CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane) CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
#undef CASE_SIMD_EXTRACT_LANE_OP #undef CASE_SIMD_EXTRACT_LANE_OP
#define CASE_SIMD_REPLACE_LANE_OP(opcode, type, fn) \ #define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \ case wasm::kExpr##opcode: \
EmitSimdReplaceLaneOp<k##type>( \ EmitSimdReplaceLaneOp<k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \ [=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
uint8_t imm_lane_idx) { \ uint8_t imm_lane_idx) { \
__ emit_##fn(dst, src1, src2, imm_lane_idx); \ __ emit_##fn(dst, src1, src2, imm_lane_idx); \
...@@ -3541,7 +3532,7 @@ class LiftoffCompiler { ...@@ -3541,7 +3532,7 @@ class LiftoffCompiler {
} else { } else {
__ LiftoffAssembler::emit_s128_const(dst, imm.value); __ LiftoffAssembler::emit_s128_const(dst, imm.value);
} }
__ PushRegister(kWasmS128, dst); __ PushRegister(kS128, dst);
} }
void Simd8x16ShuffleOp(FullDecoder* decoder, void Simd8x16ShuffleOp(FullDecoder* decoder,
...@@ -3566,7 +3557,7 @@ class LiftoffCompiler { ...@@ -3566,7 +3557,7 @@ class LiftoffCompiler {
std::swap(lhs, rhs); std::swap(lhs, rhs);
} }
__ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle); __ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle);
__ PushRegister(kWasmS128, dst); __ PushRegister(kS128, dst);
} }
void ToSmi(Register reg) { void ToSmi(Register reg) {
...@@ -3624,9 +3615,9 @@ class LiftoffCompiler { ...@@ -3624,9 +3615,9 @@ class LiftoffCompiler {
GetBuiltinCallDescriptor<WasmAllocateFixedArrayDescriptor>( GetBuiltinCallDescriptor<WasmAllocateFixedArrayDescriptor>(
compilation_zone_); compilation_zone_);
ValueType create_values_sig_reps[] = {kPointerValueType, ValueKind create_values_sig_reps[] = {kPointerValueType,
LiftoffAssembler::kWasmIntPtr}; LiftoffAssembler::kIntPtr};
FunctionSig create_values_sig(1, 1, create_values_sig_reps); ValueKindSig create_values_sig(1, 1, create_values_sig_reps);
__ PrepareBuiltinCall( __ PrepareBuiltinCall(
&create_values_sig, create_values_descriptor, &create_values_sig, create_values_descriptor,
...@@ -3673,8 +3664,8 @@ class LiftoffCompiler { ...@@ -3673,8 +3664,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* throw_descriptor = compiler::CallDescriptor* throw_descriptor =
GetBuiltinCallDescriptor<WasmThrowDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmThrowDescriptor>(compilation_zone_);
ValueType throw_sig_reps[] = {kPointerValueType, kPointerValueType}; ValueKind throw_sig_reps[] = {kPointerValueType, kPointerValueType};
FunctionSig throw_sig(0, 2, throw_sig_reps); ValueKindSig throw_sig(0, 2, throw_sig_reps);
__ PrepareBuiltinCall( __ PrepareBuiltinCall(
&throw_sig, throw_descriptor, &throw_sig, throw_descriptor,
...@@ -3717,7 +3708,7 @@ class LiftoffCompiler { ...@@ -3717,7 +3708,7 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type, void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm) { const MemoryAccessImmediate<validate>& imm) {
ValueType value_type = type.value_type(); ValueKind kind = type.value_type().kind();
LiftoffRegister full_index = __ PopToRegister(); LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset, Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck); full_index, {}, kDoForceCheck);
...@@ -3730,10 +3721,10 @@ class LiftoffCompiler { ...@@ -3730,10 +3721,10 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("atomic load from memory"); DEBUG_CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
RegClass rc = reg_class_for(value_type); RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ AtomicLoad(value, addr, index, offset, type, pinned); __ AtomicLoad(value, addr, index, offset, type, pinned);
__ PushRegister(value_type, value); __ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) { if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index, TraceMemoryOperation(false, type.mem_type().representation(), index,
...@@ -3747,7 +3738,7 @@ class LiftoffCompiler { ...@@ -3747,7 +3738,7 @@ class LiftoffCompiler {
uintptr_t, LiftoffRegister, uintptr_t, LiftoffRegister,
LiftoffRegister, LiftoffRegister,
StoreType)) { StoreType)) {
ValueType result_type = type.value_type(); ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister()); LiftoffRegister value = pinned.set(__ PopToRegister());
#ifdef V8_TARGET_ARCH_IA32 #ifdef V8_TARGET_ARCH_IA32
...@@ -3758,7 +3749,7 @@ class LiftoffCompiler { ...@@ -3758,7 +3749,7 @@ class LiftoffCompiler {
LiftoffRegister result = value; LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) { if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned)); result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
__ Move(result, value, result_type); __ Move(result, value, result_kind);
pinned.clear(value); pinned.clear(value);
value = result; value = result;
} }
...@@ -3780,7 +3771,7 @@ class LiftoffCompiler { ...@@ -3780,7 +3771,7 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
(asm_.*emit_fn)(addr, index, offset, value, result, type); (asm_.*emit_fn)(addr, index, offset, value, result, type);
__ PushRegister(result_type, result); __ PushRegister(result_kind, result);
} }
void AtomicCompareExchange(FullDecoder* decoder, StoreType type, void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
...@@ -3816,10 +3807,10 @@ class LiftoffCompiler { ...@@ -3816,10 +3807,10 @@ class LiftoffCompiler {
// assembler now. // assembler now.
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result, __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
type); type);
__ PushRegister(type.value_type(), result); __ PushRegister(type.value_type().kind(), result);
return; return;
#else #else
ValueType result_type = type.value_type(); ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister()); LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned)); LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
...@@ -3835,11 +3826,11 @@ class LiftoffCompiler { ...@@ -3835,11 +3826,11 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister result = LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_type), pinned)); pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result, __ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
type); type);
__ PushRegister(result_type, result); __ PushRegister(result_kind, result);
#endif #endif
} }
...@@ -3855,15 +3846,15 @@ class LiftoffCompiler { ...@@ -3855,15 +3846,15 @@ class LiftoffCompiler {
StubCallMode::kCallWasmRuntimeStub); // stub call mode StubCallMode::kCallWasmRuntimeStub); // stub call mode
} }
void AtomicWait(FullDecoder* decoder, ValueType type, void AtomicWait(FullDecoder* decoder, ValueKind kind,
const MemoryAccessImmediate<validate>& imm) { const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(2, {}); LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index_reg = Register index_reg =
BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset, BoundsCheckMem(decoder, element_size_bytes(kind), imm.offset,
full_index, {}, kDoForceCheck); full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return; if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg); LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg, AlignmentCheckMem(decoder, element_size_bytes(kind), imm.offset, index_reg,
pinned); pinned);
uintptr_t offset = imm.offset; uintptr_t offset = imm.offset;
...@@ -3890,7 +3881,7 @@ class LiftoffCompiler { ...@@ -3890,7 +3881,7 @@ class LiftoffCompiler {
WasmCode::RuntimeStubId target; WasmCode::RuntimeStubId target;
compiler::CallDescriptor* call_descriptor; compiler::CallDescriptor* call_descriptor;
if (type == kWasmI32) { if (kind == kI32) {
if (kNeedI64RegPair) { if (kNeedI64RegPair) {
target = WasmCode::kWasmI32AtomicWait32; target = WasmCode::kWasmI32AtomicWait32;
call_descriptor = call_descriptor =
...@@ -3916,8 +3907,8 @@ class LiftoffCompiler { ...@@ -3916,8 +3907,8 @@ class LiftoffCompiler {
} }
} }
ValueType sig_reps[] = {kPointerValueType, type, kWasmI64}; ValueKind sig_reps[] = {kPointerValueType, kind, kI64};
FunctionSig sig(0, 3, sig_reps); ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, __ PrepareBuiltinCall(&sig, call_descriptor,
{index, expected_value, timeout}); {index, expected_value, timeout});
...@@ -3928,19 +3919,17 @@ class LiftoffCompiler { ...@@ -3928,19 +3919,17 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0)); __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
} }
void AtomicNotify(FullDecoder* decoder, void AtomicNotify(FullDecoder* decoder,
const MemoryAccessImmediate<validate>& imm) { const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(1, {}); LiftoffRegister full_index = __ PeekToRegister(1, {});
Register index_reg = Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
full_index, {}, kDoForceCheck); full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return; if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg); LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset, AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
index_reg, pinned);
uintptr_t offset = imm.offset; uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned); index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
...@@ -3954,8 +3943,8 @@ class LiftoffCompiler { ...@@ -3954,8 +3943,8 @@ class LiftoffCompiler {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset); __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
} }
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32}; ValueKind sig_reps[] = {kI32, kPointerValueType, kI32};
FunctionSig sig(1, 2, sig_reps); ValueKindSig sig(1, 2, sig_reps);
auto call_descriptor = auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
...@@ -3971,7 +3960,7 @@ class LiftoffCompiler { ...@@ -3971,7 +3960,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0)); __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
} }
#define ATOMIC_STORE_LIST(V) \ #define ATOMIC_STORE_LIST(V) \
...@@ -4081,10 +4070,10 @@ class LiftoffCompiler { ...@@ -4081,10 +4070,10 @@ class LiftoffCompiler {
#undef ATOMIC_COMPARE_EXCHANGE_OP #undef ATOMIC_COMPARE_EXCHANGE_OP
case kExprI32AtomicWait: case kExprI32AtomicWait:
AtomicWait(decoder, kWasmI32, imm); AtomicWait(decoder, kI32, imm);
break; break;
case kExprI64AtomicWait: case kExprI64AtomicWait:
AtomicWait(decoder, kWasmI64, imm); AtomicWait(decoder, kI64, imm);
break; break;
case kExprAtomicNotify: case kExprAtomicNotify:
AtomicNotify(decoder, imm); AtomicNotify(decoder, imm);
...@@ -4117,18 +4106,17 @@ class LiftoffCompiler { ...@@ -4117,18 +4106,17 @@ class LiftoffCompiler {
__ LoadConstant(segment_index, WasmValue(imm.data_segment_index)); __ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init(); ExternalReference ext_ref = ExternalReference::wasm_memory_init();
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
kWasmI32, kWasmI32, kWasmI32}; ValueKindSig sig(1, 5, sig_reps);
FunctionSig sig(1, 5, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size}; segment_index, size};
// We don't need the instance anymore after the call. We can use the // We don't need the instance anymore after the call. We can use the
// register for the result. // register for the result.
LiftoffRegister result(instance); LiftoffRegister result(instance);
GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref); GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap( Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds); decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp()); __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
} }
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) { void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
...@@ -4142,8 +4130,7 @@ class LiftoffCompiler { ...@@ -4142,8 +4130,7 @@ class LiftoffCompiler {
LiftoffRegister seg_index = LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)); pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Scale the seg_index for the array access. // Scale the seg_index for the array access.
__ LoadConstant(seg_index, __ LoadConstant(seg_index, WasmValue(imm.index << element_size_log2(kI32)));
WasmValue(imm.index << kWasmI32.element_size_log2()));
// Set the length of the segment to '0' to drop it. // Set the length of the segment to '0' to drop it.
LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
...@@ -4162,17 +4149,16 @@ class LiftoffCompiler { ...@@ -4162,17 +4149,16 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance); __ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_copy(); ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32, ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
kWasmI32}; ValueKindSig sig(1, 4, sig_reps);
FunctionSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size}; LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the // We don't need the instance anymore after the call. We can use the
// register for the result. // register for the result.
LiftoffRegister result(instance); LiftoffRegister result(instance);
GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref); GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap( Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds); decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp()); __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
} }
void MemoryFill(FullDecoder* decoder, void MemoryFill(FullDecoder* decoder,
...@@ -4185,23 +4171,22 @@ class LiftoffCompiler { ...@@ -4185,23 +4171,22 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance); __ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_fill(); ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32, ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
kWasmI32}; ValueKindSig sig(1, 4, sig_reps);
FunctionSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size}; LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the // We don't need the instance anymore after the call. We can use the
// register for the result. // register for the result.
LiftoffRegister result(instance); LiftoffRegister result(instance);
GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref); GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap( Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds); decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp()); __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
} }
void LoadSmi(LiftoffRegister reg, int value) { void LoadSmi(LiftoffRegister reg, int value) {
Address smi_value = Smi::FromInt(value).ptr(); Address smi_value = Smi::FromInt(value).ptr();
using smi_type = using smi_type =
std::conditional_t<kSmiValueType == kWasmI32, int32_t, int64_t>; std::conditional_t<kSmiValueType == kI32, int32_t, int64_t>;
__ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)}); __ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
} }
...@@ -4229,9 +4214,8 @@ class LiftoffCompiler { ...@@ -4229,9 +4214,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32, kSmiValueType, ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
kSmiValueType}; ValueKindSig sig(0, 5, sig_reps);
FunctionSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, __ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_index, segment_index}); {dst, src, size, table_index, segment_index});
...@@ -4287,9 +4271,8 @@ class LiftoffCompiler { ...@@ -4287,9 +4271,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32, kSmiValueType, ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
kSmiValueType}; ValueKindSig sig(0, 5, sig_reps);
FunctionSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, __ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_dst_index, table_src_index}); {dst, src, size, table_dst_index, table_src_index});
...@@ -4320,13 +4303,12 @@ class LiftoffCompiler { ...@@ -4320,13 +4303,12 @@ class LiftoffCompiler {
void StructNew(FullDecoder* decoder, void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const Value& rtt, const StructIndexImmediate<validate>& imm, const Value& rtt,
bool initial_values_on_stack) { bool initial_values_on_stack) {
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt; WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>( GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
compilation_zone_); compilation_zone_);
ValueType sig_reps[] = {struct_value_type, rtt.type}; ValueKind sig_reps[] = {kRef, rtt.type.kind()};
FunctionSig sig(1, 1, sig_reps); ValueKindSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value = LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1]; __ cache_state()->stack_state.end()[-1];
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value}); __ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
...@@ -4340,19 +4322,19 @@ class LiftoffCompiler { ...@@ -4340,19 +4322,19 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) { for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--; i--;
int offset = StructFieldOffset(imm.struct_type, i); int offset = StructFieldOffset(imm.struct_type, i);
ValueType field_type = imm.struct_type->field(i); ValueKind field_kind = imm.struct_type->field(i).kind();
LiftoffRegister value = initial_values_on_stack LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned)) ? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister( : pinned.set(__ GetUnusedRegister(
reg_class_for(field_type), pinned)); reg_class_for(field_kind), pinned));
if (!initial_values_on_stack) { if (!initial_values_on_stack) {
if (!CheckSupportedType(decoder, field_type, "default value")) return; if (!CheckSupportedType(decoder, field_kind, "default value")) return;
SetDefaultValue(value, field_type, pinned); SetDefaultValue(value, field_kind, pinned);
} }
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type); StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value); pinned.clear(value);
} }
__ PushRegister(struct_value_type, obj); __ PushRegister(kRef, obj);
} }
void StructNewWithRtt(FullDecoder* decoder, void StructNewWithRtt(FullDecoder* decoder,
...@@ -4371,34 +4353,34 @@ class LiftoffCompiler { ...@@ -4371,34 +4353,34 @@ class LiftoffCompiler {
const FieldIndexImmediate<validate>& field, bool is_signed, const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) { Value* result) {
const StructType* struct_type = field.struct_index.struct_type; const StructType* struct_type = field.struct_index.struct_type;
ValueType field_type = struct_type->field(field.index); ValueKind field_kind = struct_type->field(field.index).kind();
if (!CheckSupportedType(decoder, field_type, "field load")) return; if (!CheckSupportedType(decoder, field_kind, "field load")) return;
int offset = StructFieldOffset(struct_type, field.index); int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type); MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
LiftoffRegister value = LiftoffRegister value =
__ GetUnusedRegister(reg_class_for(field_type), pinned); __ GetUnusedRegister(reg_class_for(field_kind), pinned);
LoadObjectField(value, obj.gp(), no_reg, offset, field_type, is_signed, LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed,
pinned); pinned);
__ PushRegister(field_type.Unpacked(), value); __ PushRegister(unpacked(field_kind), value);
} }
void StructSet(FullDecoder* decoder, const Value& struct_obj, void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field, const FieldIndexImmediate<validate>& field,
const Value& field_value) { const Value& field_value) {
const StructType* struct_type = field.struct_index.struct_type; const StructType* struct_type = field.struct_index.struct_type;
ValueType field_type = struct_type->field(field.index); ValueKind field_kind = struct_type->field(field.index).kind();
int offset = StructFieldOffset(struct_type, field.index); int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type); MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type); StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
} }
void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm, void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
ValueType rtt_type, bool initial_value_on_stack) { ValueKind rtt_type, bool initial_value_on_stack) {
// Max length check. // Max length check.
{ {
LiftoffRegister length = LiftoffRegister length =
...@@ -4408,24 +4390,23 @@ class LiftoffCompiler { ...@@ -4408,24 +4390,23 @@ class LiftoffCompiler {
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(), __ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
static_cast<int>(wasm::kV8MaxWasmArrayLength)); static_cast<int>(wasm::kV8MaxWasmArrayLength));
} }
ValueType array_value_type = ValueType::Ref(imm.index, kNonNullable); ValueKind elem_kind = imm.array_type->element_type().kind();
ValueType elem_type = imm.array_type->element_type(); int elem_size = element_size_bytes(elem_kind);
int elem_size = elem_type.element_size_bytes();
// Allocate the array. // Allocate the array.
{ {
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt; WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt;
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>( GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
compilation_zone_); compilation_zone_);
ValueType sig_reps[] = {array_value_type, rtt_type, kWasmI32, kWasmI32}; ValueKind sig_reps[] = {kRef, rtt_type, kI32, kI32};
FunctionSig sig(1, 3, sig_reps); ValueKindSig sig(1, 3, sig_reps);
LiftoffAssembler::VarState rtt_var = LiftoffAssembler::VarState rtt_var =
__ cache_state()->stack_state.end()[-1]; __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState length_var = LiftoffAssembler::VarState length_var =
__ cache_state()->stack_state.end()[-2]; __ cache_state()->stack_state.end()[-2];
LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {}); LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(elem_size_reg, WasmValue(elem_size)); __ LoadConstant(elem_size_reg, WasmValue(elem_size));
LiftoffAssembler::VarState elem_size_var(kWasmI32, elem_size_reg, 0); LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, __ PrepareBuiltinCall(&sig, call_descriptor,
{rtt_var, length_var, elem_size_var}); {rtt_var, length_var, elem_size_var});
__ CallRuntimeStub(target); __ CallRuntimeStub(target);
...@@ -4440,10 +4421,10 @@ class LiftoffCompiler { ...@@ -4440,10 +4421,10 @@ class LiftoffCompiler {
LiftoffRegister value = initial_value_on_stack LiftoffRegister value = initial_value_on_stack
? pinned.set(__ PopToRegister(pinned)) ? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister( : pinned.set(__ GetUnusedRegister(
reg_class_for(elem_type), pinned)); reg_class_for(elem_kind), pinned));
if (!initial_value_on_stack) { if (!initial_value_on_stack) {
if (!CheckSupportedType(decoder, elem_type, "default value")) return; if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
SetDefaultValue(value, elem_type, pinned); SetDefaultValue(value, elem_kind, pinned);
} }
// Initialize the array's elements. // Initialize the array's elements.
...@@ -4452,34 +4433,34 @@ class LiftoffCompiler { ...@@ -4452,34 +4433,34 @@ class LiftoffCompiler {
offset, offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize))); WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length; LiftoffRegister end_offset = length;
if (elem_type.element_size_log2() != 0) { if (element_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(), __ emit_i32_shli(end_offset.gp(), length.gp(),
elem_type.element_size_log2()); element_size_log2(elem_kind));
} }
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp()); __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done; Label loop, done;
__ bind(&loop); __ bind(&loop);
__ emit_cond_jump(kUnsignedGreaterEqual, &done, kWasmI32, offset.gp(), __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
end_offset.gp()); end_offset.gp());
StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_type); StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
__ emit_i32_addi(offset.gp(), offset.gp(), elem_size); __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
__ emit_jump(&loop); __ emit_jump(&loop);
__ bind(&done); __ bind(&done);
__ PushRegister(array_value_type, obj); __ PushRegister(kRef, obj);
} }
void ArrayNewWithRtt(FullDecoder* decoder, void ArrayNewWithRtt(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm, const ArrayIndexImmediate<validate>& imm,
const Value& length_value, const Value& initial_value, const Value& length_value, const Value& initial_value,
const Value& rtt, Value* result) { const Value& rtt, Value* result) {
ArrayNew(decoder, imm, rtt.type, true); ArrayNew(decoder, imm, rtt.type.kind(), true);
} }
void ArrayNewDefault(FullDecoder* decoder, void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm, const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) { const Value& length, const Value& rtt, Value* result) {
ArrayNew(decoder, imm, rtt.type, false); ArrayNew(decoder, imm, rtt.type.kind(), false);
} }
void ArrayGet(FullDecoder* decoder, const Value& array_obj, void ArrayGet(FullDecoder* decoder, const Value& array_obj,
...@@ -4490,17 +4471,17 @@ class LiftoffCompiler { ...@@ -4490,17 +4471,17 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned)); LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type); MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned); BoundsCheck(decoder, array, index, pinned);
ValueType elem_type = imm.array_type->element_type(); ValueKind elem_kind = imm.array_type->element_type().kind();
if (!CheckSupportedType(decoder, elem_type, "array load")) return; if (!CheckSupportedType(decoder, elem_kind, "array load")) return;
int elem_size_shift = elem_type.element_size_log2(); int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) { if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift); __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
} }
LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned); LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned);
LoadObjectField(value, array.gp(), index.gp(), LoadObjectField(value, array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize), wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
elem_type, is_signed, pinned); elem_kind, is_signed, pinned);
__ PushRegister(elem_type.Unpacked(), value); __ PushRegister(unpacked(elem_kind), value);
} }
void ArraySet(FullDecoder* decoder, const Value& array_obj, void ArraySet(FullDecoder* decoder, const Value& array_obj,
...@@ -4512,14 +4493,14 @@ class LiftoffCompiler { ...@@ -4512,14 +4493,14 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned)); LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type); MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned); BoundsCheck(decoder, array, index, pinned);
ValueType elem_type = imm.array_type->element_type(); ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size_shift = elem_type.element_size_log2(); int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) { if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift); __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
} }
StoreObjectField(array.gp(), index.gp(), StoreObjectField(array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize), wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
value, pinned, elem_type); value, pinned, elem_kind);
} }
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) { void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
...@@ -4528,9 +4509,8 @@ class LiftoffCompiler { ...@@ -4528,9 +4509,8 @@ class LiftoffCompiler {
MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type); MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned); LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned);
int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset); int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kWasmI32, false, LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kI32, false, pinned);
pinned); __ PushRegister(kI32, len);
__ PushRegister(kWasmI32, len);
} }
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation. // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
...@@ -4546,7 +4526,7 @@ class LiftoffCompiler { ...@@ -4546,7 +4526,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits()); DCHECK(SmiValuesAre32Bits());
__ emit_i64_shli(dst, src, kI31To32BitSmiShift); __ emit_i64_shli(dst, src, kI31To32BitSmiShift);
} }
__ PushRegister(kWasmI31Ref, dst); __ PushRegister(kRef, dst);
} }
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) { void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
...@@ -4558,7 +4538,7 @@ class LiftoffCompiler { ...@@ -4558,7 +4538,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits()); DCHECK(SmiValuesAre32Bits());
__ emit_i64_sari(dst, src, kI31To32BitSmiShift); __ emit_i64_sari(dst, src, kI31To32BitSmiShift);
} }
__ PushRegister(kWasmI32, dst); __ PushRegister(kI32, dst);
} }
void I31GetU(FullDecoder* decoder, const Value& input, Value* result) { void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
...@@ -4570,7 +4550,7 @@ class LiftoffCompiler { ...@@ -4570,7 +4550,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits()); DCHECK(SmiValuesAre32Bits());
__ emit_i64_shri(dst, src, kI31To32BitSmiShift); __ emit_i64_shri(dst, src, kI31To32BitSmiShift);
} }
__ PushRegister(kWasmI32, dst); __ PushRegister(kI32, dst);
} }
void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) { void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
...@@ -4579,24 +4559,23 @@ class LiftoffCompiler { ...@@ -4579,24 +4559,23 @@ class LiftoffCompiler {
__ LoadTaggedPointer( __ LoadTaggedPointer(
rtt.gp(), rtt.gp(), no_reg, rtt.gp(), rtt.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {}); wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {});
__ PushRegister(ValueType::Rtt(type_index, 1), rtt); __ PushRegister(kRttWithDepth, rtt);
} }
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent, void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
Value* result) { Value* result) {
ValueType parent_value_type = parent.type; ValueKind parent_value_kind = parent.type.kind();
ValueType rtt_value_type = ValueKind rtt_value_type = kRttWithDepth;
ValueType::Rtt(type_index, parent_value_type.depth() + 1);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt; WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_); GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type}; ValueKind sig_reps[] = {rtt_value_type, kI32, parent_value_kind};
FunctionSig sig(1, 2, sig_reps); ValueKindSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState parent_var = LiftoffAssembler::VarState parent_var =
__ cache_state()->stack_state.end()[-1]; __ cache_state()->stack_state.end()[-1];
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {}); LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(type_reg, WasmValue(type_index)); __ LoadConstant(type_reg, WasmValue(type_index));
LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0); LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var}); __ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
__ CallRuntimeStub(target); __ CallRuntimeStub(target);
DefineSafepoint(); DefineSafepoint();
...@@ -4630,8 +4609,8 @@ class LiftoffCompiler { ...@@ -4630,8 +4609,8 @@ class LiftoffCompiler {
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
if (obj.type.is_nullable()) { if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned); LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, null_succeeds ? &match : no_match, obj.type, __ emit_cond_jump(kEqual, null_succeeds ? &match : no_match,
obj_reg.gp(), tmp1.gp()); obj.type.kind(), obj_reg.gp(), tmp1.gp());
} }
// Perform a regular type check. Check for exact match first. // Perform a regular type check. Check for exact match first.
...@@ -4641,7 +4620,8 @@ class LiftoffCompiler { ...@@ -4641,7 +4620,8 @@ class LiftoffCompiler {
if (decoder->module_->has_signature(rtt.type.ref_index())) { if (decoder->module_->has_signature(rtt.type.ref_index())) {
// Function case: currently, the only way for a function to match an rtt // Function case: currently, the only way for a function to match an rtt
// is if its map is equal to that rtt. // is if its map is equal to that rtt.
__ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp()); __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
rtt_reg.gp());
__ bind(&match); __ bind(&match);
return obj_reg; return obj_reg;
} }
...@@ -4650,7 +4630,7 @@ class LiftoffCompiler { ...@@ -4650,7 +4630,7 @@ class LiftoffCompiler {
// Check for rtt equality, and if not, check if the rtt is a struct/array // Check for rtt equality, and if not, check if the rtt is a struct/array
// rtt. // rtt.
__ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp()); __ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp());
// Constant-time subtyping check: load exactly one candidate RTT from the // Constant-time subtyping check: load exactly one candidate RTT from the
// supertypes list. // supertypes list.
...@@ -4674,7 +4654,8 @@ class LiftoffCompiler { ...@@ -4674,7 +4654,8 @@ class LiftoffCompiler {
tmp1.gp(), tmp1.gp(), no_reg, tmp1.gp(), tmp1.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()), wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
pinned); pinned);
__ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp()); __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
rtt_reg.gp());
} else { } else {
// Preserve {obj_reg} across the call. // Preserve {obj_reg} across the call.
LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg); LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
...@@ -4683,15 +4664,15 @@ class LiftoffCompiler { ...@@ -4683,15 +4664,15 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>( GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>(
compilation_zone_); compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmAnyRef, rtt.type}; ValueKind sig_reps[] = {kI32, kOptRef, rtt.type.kind()};
FunctionSig sig(1, 2, sig_reps); ValueKindSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0); LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0);
LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0); LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state}); __ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state});
__ CallRuntimeStub(target); __ CallRuntimeStub(target);
DefineSafepoint(); DefineSafepoint();
__ PopRegisters(saved_regs); __ PopRegisters(saved_regs);
__ Move(tmp1.gp(), kReturnRegister0, kWasmI32); __ Move(tmp1.gp(), kReturnRegister0, kI32);
__ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0); __ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
} }
...@@ -4716,7 +4697,7 @@ class LiftoffCompiler { ...@@ -4716,7 +4697,7 @@ class LiftoffCompiler {
__ bind(&return_false); __ bind(&return_false);
__ LoadConstant(result, WasmValue(0)); __ LoadConstant(result, WasmValue(0));
__ bind(&done); __ bind(&done);
__ PushRegister(kWasmI32, result); __ PushRegister(kI32, result);
} }
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt, void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
...@@ -4725,8 +4706,7 @@ class LiftoffCompiler { ...@@ -4725,8 +4706,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapIllegalCast); WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister obj_reg = LiftoffRegister obj_reg =
SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds); SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds);
__ PushRegister( __ PushRegister(obj.type.kind(), obj_reg);
ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()), obj_reg);
} }
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt, void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
...@@ -4742,17 +4722,13 @@ class LiftoffCompiler { ...@@ -4742,17 +4722,13 @@ class LiftoffCompiler {
LiftoffRegister obj_reg = LiftoffRegister obj_reg =
SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails); SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails);
__ PushRegister( __ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg);
rtt.type.is_bottom()
? kWasmBottom
: ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()),
obj_reg);
BrOrRet(decoder, depth); BrOrRet(decoder, depth);
__ bind(&cont_false); __ bind(&cont_false);
// Drop the branch's value, restore original value. // Drop the branch's value, restore original value.
Drop(decoder); Drop(decoder);
__ PushRegister(obj.type, obj_reg); __ PushRegister(obj.type.kind(), obj_reg);
} }
// Abstract type checkers. They all return the object register and fall // Abstract type checkers. They all return the object register and fall
...@@ -4771,7 +4747,7 @@ class LiftoffCompiler { ...@@ -4771,7 +4747,7 @@ class LiftoffCompiler {
if (obj.type.is_nullable()) { if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned); LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp()); __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
} }
__ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi); __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
...@@ -4796,7 +4772,7 @@ class LiftoffCompiler { ...@@ -4796,7 +4772,7 @@ class LiftoffCompiler {
if (obj.type.is_nullable()) { if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned); LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp()); __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
} }
__ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi); __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
...@@ -4841,7 +4817,7 @@ class LiftoffCompiler { ...@@ -4841,7 +4817,7 @@ class LiftoffCompiler {
__ bind(&no_match); __ bind(&no_match);
__ LoadConstant(result, WasmValue(0)); __ LoadConstant(result, WasmValue(0));
__ bind(&done); __ bind(&done);
__ PushRegister(kWasmI32, result); __ PushRegister(kI32, result);
} }
void RefIsData(FullDecoder* /* decoder */, const Value& object, void RefIsData(FullDecoder* /* decoder */, const Value& object,
...@@ -4861,36 +4837,33 @@ class LiftoffCompiler { ...@@ -4861,36 +4837,33 @@ class LiftoffCompiler {
template <TypeChecker type_checker> template <TypeChecker type_checker>
void AbstractTypeCast(const Value& object, FullDecoder* decoder, void AbstractTypeCast(const Value& object, FullDecoder* decoder,
ValueType result_type) { ValueKind result_kind) {
Label* trap_label = AddOutOfLineTrap(decoder->position(), Label* trap_label = AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapIllegalCast); WasmCode::kThrowWasmTrapIllegalCast);
Label match; Label match;
LiftoffRegister obj_reg = LiftoffRegister obj_reg =
(this->*type_checker)(object, trap_label, {}, no_reg); (this->*type_checker)(object, trap_label, {}, no_reg);
__ bind(&match); __ bind(&match);
__ PushRegister(result_type, obj_reg); __ PushRegister(result_kind, obj_reg);
} }
void RefAsData(FullDecoder* decoder, const Value& object, void RefAsData(FullDecoder* decoder, const Value& object,
Value* /* result */) { Value* /* result */) {
return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef);
kWasmDataRef);
} }
void RefAsFunc(FullDecoder* decoder, const Value& object, void RefAsFunc(FullDecoder* decoder, const Value& object,
Value* /* result */) { Value* /* result */) {
return AbstractTypeCast<&LiftoffCompiler::FuncCheck>( return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(object, decoder, kRef);
object, decoder, ValueType::Ref(HeapType::kFunc, kNonNullable));
} }
void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) { void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
kWasmI31Ref);
} }
template <TypeChecker type_checker> template <TypeChecker type_checker>
void BrOnAbstractType(const Value& object, FullDecoder* decoder, void BrOnAbstractType(const Value& object, FullDecoder* decoder,
uint32_t br_depth, ValueType result_type) { uint32_t br_depth, ValueKind result_kind) {
// Before branching, materialize all constants. This avoids repeatedly // Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch. // materializing them for each conditional branch.
if (br_depth != decoder->control_depth() - 1) { if (br_depth != decoder->control_depth() - 1) {
...@@ -4903,32 +4876,31 @@ class LiftoffCompiler { ...@@ -4903,32 +4876,31 @@ class LiftoffCompiler {
(this->*type_checker)(object, &no_match, {}, no_reg); (this->*type_checker)(object, &no_match, {}, no_reg);
__ bind(&match); __ bind(&match);
__ PushRegister(result_type, obj_reg); __ PushRegister(result_kind, obj_reg);
BrOrRet(decoder, br_depth); BrOrRet(decoder, br_depth);
__ bind(&no_match); __ bind(&no_match);
// Drop the branch's value, restore original value. // Drop the branch's value, restore original value.
Drop(decoder); Drop(decoder);
__ PushRegister(object.type, obj_reg); __ PushRegister(object.type.kind(), obj_reg);
} }
void BrOnData(FullDecoder* decoder, const Value& object, void BrOnData(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) { Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::DataCheck>( return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
object, decoder, br_depth, kWasmDataRef); br_depth, kRef);
} }
void BrOnFunc(FullDecoder* decoder, const Value& object, void BrOnFunc(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) { Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::FuncCheck>( return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
object, decoder, br_depth, br_depth, kRef);
ValueType::Ref(HeapType::kFunc, kNonNullable));
} }
void BrOnI31(FullDecoder* decoder, const Value& object, void BrOnI31(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) { Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder, return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
br_depth, kWasmI31Ref); br_depth, kRef);
} }
void Forward(FullDecoder* decoder, const Value& from, Value* to) { void Forward(FullDecoder* decoder, const Value& from, Value* to) {
...@@ -4936,10 +4908,20 @@ class LiftoffCompiler { ...@@ -4936,10 +4908,20 @@ class LiftoffCompiler {
} }
private: private:
ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
ValueKind* reps =
zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
ValueKind* ptr = reps;
for (ValueType type : sig->all()) *ptr++ = type.kind();
return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
reps);
}
void CallDirect(FullDecoder* decoder, void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm, const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], CallKind call_kind) { const Value args[], Value returns[], CallKind call_kind) {
for (ValueType ret : imm.sig->returns()) { ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return; if (!CheckSupportedType(decoder, ret, "return")) return;
} }
...@@ -4969,7 +4951,7 @@ class LiftoffCompiler { ...@@ -4969,7 +4951,7 @@ class LiftoffCompiler {
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned); ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref; Register* explicit_instance = &imported_function_ref;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance); __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) { if (call_kind == kReturnCall) {
__ PrepareTailCall( __ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()), static_cast<int>(call_descriptor->StackParameterCount()),
...@@ -4979,11 +4961,11 @@ class LiftoffCompiler { ...@@ -4979,11 +4961,11 @@ class LiftoffCompiler {
} else { } else {
source_position_table_builder_.AddPosition( source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true); __ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(imm.sig, call_descriptor, target); __ CallIndirect(sig, call_descriptor, target);
} }
} else { } else {
// A direct call within this module just gets the current instance. // A direct call within this module just gets the current instance.
__ PrepareCall(imm.sig, call_descriptor); __ PrepareCall(sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation. // Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index); Address addr = static_cast<Address>(imm.index);
if (call_kind == kReturnCall) { if (call_kind == kReturnCall) {
...@@ -5003,16 +4985,17 @@ class LiftoffCompiler { ...@@ -5003,16 +4985,17 @@ class LiftoffCompiler {
DefineSafepoint(); DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ FinishCall(imm.sig, call_descriptor); __ FinishCall(sig, call_descriptor);
} }
void CallIndirect(FullDecoder* decoder, const Value& index_val, void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm, const CallIndirectImmediate<validate>& imm,
CallKind call_kind) { CallKind call_kind) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
if (imm.table_index != 0) { if (imm.table_index != 0) {
return unsupported(decoder, kRefTypes, "table index != 0"); return unsupported(decoder, kRefTypes, "table index != 0");
} }
for (ValueType ret : imm.sig->returns()) { for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return; if (!CheckSupportedType(decoder, ret, "return")) return;
} }
...@@ -5038,8 +5021,8 @@ class LiftoffCompiler { ...@@ -5038,8 +5021,8 @@ class LiftoffCompiler {
// {instance->indirect_function_table_size}. // {instance->indirect_function_table_size}.
LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size, LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
pinned); pinned);
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32, __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
index, tmp_const); tmp_const);
// Mask the index to prevent SSCA. // Mask the index to prevent SSCA.
if (FLAG_untrusted_code_mitigations) { if (FLAG_untrusted_code_mitigations) {
...@@ -5078,8 +5061,8 @@ class LiftoffCompiler { ...@@ -5078,8 +5061,8 @@ class LiftoffCompiler {
Label* sig_mismatch_label = AddOutOfLineTrap( Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch); decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, sig_mismatch_label, __ emit_cond_jump(kUnequal, sig_mismatch_label, LiftoffAssembler::kIntPtr,
LiftoffAssembler::kWasmIntPtr, scratch, tmp_const); scratch, tmp_const);
// At this point {index} has already been multiplied by 4. // At this point {index} has already been multiplied by 4.
DEBUG_CODE_COMMENT("Execute indirect call"); DEBUG_CODE_COMMENT("Execute indirect call");
...@@ -5117,7 +5100,7 @@ class LiftoffCompiler { ...@@ -5117,7 +5100,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor); GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch; Register target = scratch;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance); __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) { if (call_kind == kReturnCall) {
__ PrepareTailCall( __ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()), static_cast<int>(call_descriptor->StackParameterCount()),
...@@ -5127,22 +5110,23 @@ class LiftoffCompiler { ...@@ -5127,22 +5110,23 @@ class LiftoffCompiler {
} else { } else {
source_position_table_builder_.AddPosition( source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true); __ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(imm.sig, call_descriptor, target); __ CallIndirect(sig, call_descriptor, target);
} }
DefineSafepoint(); DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ FinishCall(imm.sig, call_descriptor); __ FinishCall(sig, call_descriptor);
} }
void CallRef(FullDecoder* decoder, ValueType func_ref_type, void CallRef(FullDecoder* decoder, ValueType func_ref_type,
const FunctionSig* sig, CallKind call_kind) { const FunctionSig* type_sig, CallKind call_kind) {
for (ValueType ret : sig->returns()) { ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return; if (!CheckSupportedType(decoder, ret, "return")) return;
} }
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, sig); compiler::GetWasmCallDescriptor(compilation_zone_, type_sig);
call_descriptor = call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor); GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
...@@ -5211,7 +5195,7 @@ class LiftoffCompiler { ...@@ -5211,7 +5195,7 @@ class LiftoffCompiler {
imported_function_refs.gp(), pinned); imported_function_refs.gp(), pinned);
Label imported; Label imported;
__ emit_cond_jump(kSignedLessThan, &imported, kWasmI32, func_index.gp(), __ emit_cond_jump(kSignedLessThan, &imported, kI32, func_index.gp(),
imported_functions_num.gp()); imported_functions_num.gp());
{ {
...@@ -5293,19 +5277,19 @@ class LiftoffCompiler { ...@@ -5293,19 +5277,19 @@ class LiftoffCompiler {
compiler::CallDescriptor* builtin_call_descriptor = compiler::CallDescriptor* builtin_call_descriptor =
GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>( GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>(
compilation_zone_); compilation_zone_);
ValueType sig_reps[] = {kWasmAnyRef, kWasmAnyRef, kWasmAnyRef}; ValueKind sig_reps[] = {kOptRef, kOptRef, kOptRef};
FunctionSig builtin_sig(1, 2, sig_reps); ValueKindSig builtin_sig(1, 2, sig_reps);
LiftoffRegister current_instance = instance; LiftoffRegister current_instance = instance;
__ FillInstanceInto(current_instance.gp()); __ FillInstanceInto(current_instance.gp());
LiftoffAssembler::VarState instance_var(kWasmAnyRef, current_instance, 0); LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
LiftoffAssembler::VarState callable_var(kWasmFuncRef, callable, 0); LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
__ PrepareBuiltinCall(&builtin_sig, builtin_call_descriptor, __ PrepareBuiltinCall(&builtin_sig, builtin_call_descriptor,
{instance_var, callable_var}); {instance_var, callable_var});
__ CallRuntimeStub(builtin); __ CallRuntimeStub(builtin);
DefineSafepoint(); DefineSafepoint();
if (instance.gp() != kReturnRegister0) { if (instance.gp() != kReturnRegister0) {
__ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kWasmIntPtr); __ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kIntPtr);
} }
// Restore {func_data}, which we saved across the call. // Restore {func_data}, which we saved across the call.
...@@ -5357,7 +5341,7 @@ class LiftoffCompiler { ...@@ -5357,7 +5341,7 @@ class LiftoffCompiler {
decoder->position(), WasmCode::kThrowWasmTrapNullDereference); decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned); LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned); LoadNullValue(null.gp(), pinned);
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object, __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object,
null.gp()); null.gp());
} }
...@@ -5370,8 +5354,8 @@ class LiftoffCompiler { ...@@ -5370,8 +5354,8 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset); wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
__ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load, __ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load,
pinned); pinned);
__ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32,
kWasmI32, index.gp(), length.gp()); index.gp(), length.gp());
} }
int StructFieldOffset(const StructType* struct_type, int field_index) { int StructFieldOffset(const StructType* struct_type, int field_index) {
...@@ -5380,33 +5364,33 @@ class LiftoffCompiler { ...@@ -5380,33 +5364,33 @@ class LiftoffCompiler {
} }
void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg, void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
int offset, ValueType type, bool is_signed, int offset, ValueKind kind, bool is_signed,
LiftoffRegList pinned) { LiftoffRegList pinned) {
if (type.is_reference_type()) { if (is_reference_type(kind)) {
__ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned); __ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
} else { } else {
// Primitive type. // Primitive kind.
LoadType load_type = LoadType::ForValueType(type, is_signed); LoadType load_type = LoadType::ForValueKind(kind, is_signed);
__ Load(dst, src, offset_reg, offset, load_type, pinned); __ Load(dst, src, offset_reg, offset, load_type, pinned);
} }
} }
void StoreObjectField(Register obj, Register offset_reg, int offset, void StoreObjectField(Register obj, Register offset_reg, int offset,
LiftoffRegister value, LiftoffRegList pinned, LiftoffRegister value, LiftoffRegList pinned,
ValueType type) { ValueKind kind) {
if (type.is_reference_type()) { if (is_reference_type(kind)) {
__ StoreTaggedPointer(obj, offset_reg, offset, value, pinned); __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
} else { } else {
// Primitive type. // Primitive kind.
StoreType store_type = StoreType::ForValueType(type); StoreType store_type = StoreType::ForValueKind(kind);
__ Store(obj, offset_reg, offset, value, store_type, pinned); __ Store(obj, offset_reg, offset, value, store_type, pinned);
} }
} }
void SetDefaultValue(LiftoffRegister reg, ValueType type, void SetDefaultValue(LiftoffRegister reg, ValueKind kind,
LiftoffRegList pinned) { LiftoffRegList pinned) {
DCHECK(type.is_defaultable()); DCHECK(is_defaultable(kind));
switch (type.kind()) { switch (kind) {
case kI8: case kI8:
case kI16: case kI16:
case kI32: case kI32:
......
...@@ -45,12 +45,12 @@ static_assert(kNeedS128RegPair == (kFpRegPair != kNoReg), ...@@ -45,12 +45,12 @@ static_assert(kNeedS128RegPair == (kFpRegPair != kNoReg),
enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 }; enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
static inline constexpr bool needs_gp_reg_pair(ValueType type) { static inline constexpr bool needs_gp_reg_pair(ValueKind kind) {
return kNeedI64RegPair && type == kWasmI64; return kNeedI64RegPair && kind == kI64;
} }
static inline constexpr bool needs_fp_reg_pair(ValueType type) { static inline constexpr bool needs_fp_reg_pair(ValueKind kind) {
return kNeedS128RegPair && type == kWasmS128; return kNeedS128RegPair && kind == kS128;
} }
static inline constexpr RegClass reg_class_for(ValueKind kind) { static inline constexpr RegClass reg_class_for(ValueKind kind) {
...@@ -72,14 +72,10 @@ static inline constexpr RegClass reg_class_for(ValueKind kind) { ...@@ -72,14 +72,10 @@ static inline constexpr RegClass reg_class_for(ValueKind kind) {
case kRttWithDepth: case kRttWithDepth:
return kGpReg; return kGpReg;
default: default:
return kNoReg; // unsupported type return kNoReg; // unsupported kind
} }
} }
static inline constexpr RegClass reg_class_for(ValueType type) {
return reg_class_for(type.kind());
}
// Description of LiftoffRegister code encoding. // Description of LiftoffRegister code encoding.
// This example uses the ARM architecture, which as of writing has: // This example uses the ARM architecture, which as of writing has:
// - 9 GP registers, requiring 4 bits // - 9 GP registers, requiring 4 bits
...@@ -192,9 +188,9 @@ class LiftoffRegister { ...@@ -192,9 +188,9 @@ class LiftoffRegister {
// Shifts the register code depending on the type before converting to a // Shifts the register code depending on the type before converting to a
// LiftoffRegister. // LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueType type, static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) { int code) {
if (!kSimpleFPAliasing && type == kWasmF32) { if (!kSimpleFPAliasing && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and // Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64 // double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order // registers. The f32 register code must therefore be halved in order
...@@ -202,7 +198,7 @@ class LiftoffRegister { ...@@ -202,7 +198,7 @@ class LiftoffRegister {
DCHECK_EQ(0, code % 2); DCHECK_EQ(0, code % 2);
return LiftoffRegister::from_code(rc, code >> 1); return LiftoffRegister::from_code(rc, code >> 1);
} }
if (kNeedS128RegPair && type == kWasmS128) { if (kNeedS128RegPair && kind == kS128) {
// Similarly for double registers and SIMD registers, the SIMD code // Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff. // needs to be doubled to pass the f64 code to Liftoff.
return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1)); return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
......
...@@ -84,8 +84,8 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, ...@@ -84,8 +84,8 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
} }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
ValueType type) { ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
assm->movl(dst.gp(), src); assm->movl(dst.gp(), src);
break; break;
...@@ -111,8 +111,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, ...@@ -111,8 +111,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
} }
inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src, inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
ValueType type) { ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
assm->movl(dst, src.gp()); assm->movl(dst, src.gp());
break; break;
...@@ -133,8 +133,8 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src, ...@@ -133,8 +133,8 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
} }
} }
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
case kI64: case kI64:
case kRef: case kRef:
...@@ -243,13 +243,13 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() { ...@@ -243,13 +243,13 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset; return liftoff::kInstanceOffset;
} }
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
return type.is_reference_type() ? kSystemPointerSize return is_reference_type(kind) ? kSystemPointerSize
: type.element_size_bytes(); : element_size_bytes(kind);
} }
bool LiftoffAssembler::NeedsAlignment(ValueType type) { bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return type.is_reference_type(); return is_reference_type(kind);
} }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
...@@ -778,66 +778,66 @@ void LiftoffAssembler::AtomicFence() { mfence(); } ...@@ -778,66 +778,66 @@ void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1)); Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type); liftoff::Load(this, dst, src, kind);
} }
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx, uint32_t caller_slot_idx,
ValueType type) { ValueKind kind) {
Operand dst(rbp, kSystemPointerSize * (caller_slot_idx + 1)); Operand dst(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Store(this, dst, src, type); liftoff::Store(this, dst, src, kind);
} }
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset, void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
ValueType type) { ValueKind kind) {
Operand src(rsp, offset); Operand src(rsp, offset);
liftoff::Load(this, reg, src, type); liftoff::Load(this, reg, src, kind);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueKind kind) {
DCHECK_NE(dst_offset, src_offset); DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset); Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset); Operand src = liftoff::GetStackSlot(src_offset);
if (type.element_size_log2() == 2) { if (element_size_log2(kind) == 2) {
movl(kScratchRegister, src); movl(kScratchRegister, src);
movl(dst, kScratchRegister); movl(dst, kScratchRegister);
} else { } else {
DCHECK_EQ(3, type.element_size_log2()); DCHECK_EQ(3, element_size_log2(kind));
movq(kScratchRegister, src); movq(kScratchRegister, src);
movq(dst, kScratchRegister); movq(dst, kScratchRegister);
} }
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmI32) { if (kind == kI32) {
movl(dst, src); movl(dst, src);
} else { } else {
DCHECK(kWasmI64 == type || type.is_reference_type()); DCHECK(kI64 == kind || is_reference_type(kind));
movq(dst, src); movq(dst, src);
} }
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (kind == kF32) {
Movss(dst, src); Movss(dst, src);
} else if (type == kWasmF64) { } else if (kind == kF64) {
Movsd(dst, src); Movsd(dst, src);
} else { } else {
DCHECK_EQ(kWasmS128, type); DCHECK_EQ(kS128, kind);
Movapd(dst, src); Movapd(dst, src);
} }
} }
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset); Operand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
movl(dst, reg.gp()); movl(dst, reg.gp());
break; break;
...@@ -889,8 +889,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { ...@@ -889,8 +889,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type); liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
} }
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
...@@ -1142,16 +1142,16 @@ void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) { ...@@ -1142,16 +1142,16 @@ void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) {
} }
namespace liftoff { namespace liftoff {
template <ValueKind type> template <ValueKind kind>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst, inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount, Register src, Register amount,
void (Assembler::*emit_shift)(Register)) { void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx. // If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) { if (dst == rcx) {
assm->Move(kScratchRegister, src, ValueType::Primitive(type)); assm->Move(kScratchRegister, src, kind);
if (amount != rcx) assm->Move(rcx, amount, ValueType::Primitive(type)); if (amount != rcx) assm->Move(rcx, amount, kind);
(assm->*emit_shift)(kScratchRegister); (assm->*emit_shift)(kScratchRegister);
assm->Move(rcx, kScratchRegister, ValueType::Primitive(type)); assm->Move(rcx, kScratchRegister, kind);
return; return;
} }
...@@ -1163,11 +1163,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst, ...@@ -1163,11 +1163,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx)); src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx); if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister; if (src == rcx) src = kScratchRegister;
assm->Move(rcx, amount, ValueType::Primitive(type)); assm->Move(rcx, amount, kind);
} }
// Do the actual shift. // Do the actual shift.
if (dst != src) assm->Move(dst, src, ValueType::Primitive(type)); if (dst != src) assm->Move(dst, src, kind);
(assm->*emit_shift)(dst); (assm->*emit_shift)(dst);
// Restore rcx if needed. // Restore rcx if needed.
...@@ -2050,11 +2050,11 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); } ...@@ -2050,11 +2050,11 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); } void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type, Label* label, ValueKind kind,
Register lhs, Register rhs) { Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond); Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) { if (rhs != no_reg) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
cmpl(lhs, rhs); cmpl(lhs, rhs);
break; break;
...@@ -2071,7 +2071,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ...@@ -2071,7 +2071,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
UNREACHABLE(); UNREACHABLE();
} }
} else { } else {
DCHECK_EQ(type, kWasmI32); DCHECK_EQ(kind, kI32);
testl(lhs, lhs); testl(lhs, lhs);
} }
...@@ -2160,12 +2160,12 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond, ...@@ -2160,12 +2160,12 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value, LiftoffRegister true_value,
LiftoffRegister false_value, LiftoffRegister false_value,
ValueType type) { ValueKind kind) {
if (type != kWasmI32 && type != kWasmI64) return false; if (kind != kI32 && kind != kI64) return false;
testl(condition, condition); testl(condition, condition);
if (type == kWasmI32) { if (kind == kI32) {
if (dst == false_value) { if (dst == false_value) {
cmovl(not_zero, dst.gp(), true_value.gp()); cmovl(not_zero, dst.gp(), true_value.gp());
} else { } else {
...@@ -4385,17 +4385,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -4385,17 +4385,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize)); ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
} }
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
const LiftoffRegister* rets, const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes, ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) { ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes); AllocateStackSpace(stack_bytes);
int arg_bytes = 0; int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_type); liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_kind);
arg_bytes += param_type.element_size_bytes(); arg_bytes += element_size_bytes(param_kind);
} }
DCHECK_LE(arg_bytes, stack_bytes); DCHECK_LE(arg_bytes, stack_bytes);
...@@ -4420,8 +4420,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ...@@ -4420,8 +4420,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
} }
// Load potential output value from the buffer on the stack. // Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) { if (out_argument_kind != kStmt) {
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type); liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_kind);
} }
addq(rsp, Immediate(stack_bytes)); addq(rsp, Immediate(stack_bytes));
...@@ -4435,7 +4435,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { ...@@ -4435,7 +4435,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
near_jmp(addr, RelocInfo::WASM_CALL); near_jmp(addr, RelocInfo::WASM_CALL);
} }
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target) { Register target) {
if (target == no_reg) { if (target == no_reg) {
...@@ -4481,12 +4481,12 @@ void LiftoffStackSlots::Construct() { ...@@ -4481,12 +4481,12 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmI32) { if (src.kind() == kI32) {
// Load i32 values to a register first to ensure they are zero // Load i32 values to a register first to ensure they are zero
// extended. // extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_)); asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister); asm_->pushq(kScratchRegister);
} else if (src.type() == kWasmS128) { } else if (src.kind() == kS128) {
// Since offsets are subtracted from sp, we need a smaller offset to // Since offsets are subtracted from sp, we need a smaller offset to
// push the top of a s128 value. // push the top of a s128 value.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8)); asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
...@@ -4500,7 +4500,7 @@ void LiftoffStackSlots::Construct() { ...@@ -4500,7 +4500,7 @@ void LiftoffStackSlots::Construct() {
} }
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type()); liftoff::push(asm_, src.reg(), src.kind());
break; break;
case LiftoffAssembler::VarState::kIntConst: case LiftoffAssembler::VarState::kIntConst:
asm_->pushq(Immediate(src.i32_const())); asm_->pushq(Immediate(src.i32_const()));
......
...@@ -179,12 +179,94 @@ enum ValueKind : uint8_t { ...@@ -179,12 +179,94 @@ enum ValueKind : uint8_t {
#undef DEF_ENUM #undef DEF_ENUM
}; };
// A ValueType is encoded by three components: A Kind, a heap representation constexpr bool is_reference_type(ValueKind kind) {
// (for reference types), and an inheritance depth (for rtts only). Those are return kind == kRef || kind == kOptRef || kind == kRtt ||
// encoded into 32 bits using base::BitField. The underlying Kind enumeration kind == kRttWithDepth;
// includes four elements which do not strictly correspond to value types: the }
// two packed types i8 and i16, the type of void blocks (stmt), and a bottom
// value (for internal use). constexpr bool is_object_reference_type(ValueKind kind) {
return kind == kRef || kind == kOptRef;
}
constexpr int element_size_log2(ValueKind kind) {
constexpr int8_t kElementSizeLog2[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size_log_2 = kElementSizeLog2[kind];
CONSTEXPR_DCHECK(size_log_2 >= 0);
return size_log_2;
}
constexpr int element_size_bytes(ValueKind kind) {
constexpr int8_t kElementSize[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size = kElementSize[kind];
CONSTEXPR_DCHECK(size > 0);
return size;
}
constexpr char short_name(ValueKind kind) {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
FOREACH_VALUE_TYPE(SHORT_NAME)
#undef SHORT_NAME
};
return kShortName[kind];
}
constexpr const char* name(ValueKind kind) {
constexpr const char* kKindName[] = {
#define KIND_NAME(kind, log2Size, code, machineType, shortName, kindName, ...) \
kindName,
FOREACH_VALUE_TYPE(KIND_NAME)
#undef TYPE_NAME
};
return kKindName[kind];
}
constexpr MachineType machine_type(ValueKind kind) {
CONSTEXPR_DCHECK(kBottom != kind);
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
MachineType::machineType(),
FOREACH_VALUE_TYPE(MACH_TYPE)
#undef MACH_TYPE
};
return kMachineType[kind];
}
constexpr bool is_packed(ValueKind kind) { return kind == kI8 || kind == kI16; }
constexpr ValueKind unpacked(ValueKind kind) {
return is_packed(kind) ? kI32 : kind;
}
constexpr bool is_rtt(ValueKind kind) {
return kind == kRtt || kind == kRttWithDepth;
}
constexpr bool is_defaultable(ValueKind kind) {
CONSTEXPR_DCHECK(kind != kBottom && kind != kStmt);
return kind != kRef && !is_rtt(kind);
}
// A ValueType is encoded by three components: A ValueKind, a heap
// representation (for reference types), and an inheritance depth (for rtts
// only). Those are encoded into 32 bits using base::BitField. The underlying
// ValueKind enumeration includes four elements which do not strictly correspond
// to value types: the two packed types i8 and i16, the type of void blocks
// (stmt), and a bottom value (for internal use).
class ValueType { class ValueType {
public: public:
/******************************* Constructors *******************************/ /******************************* Constructors *******************************/
...@@ -224,12 +306,11 @@ class ValueType { ...@@ -224,12 +306,11 @@ class ValueType {
/******************************** Type checks *******************************/ /******************************** Type checks *******************************/
constexpr bool is_reference_type() const { constexpr bool is_reference_type() const {
return kind() == kRef || kind() == kOptRef || kind() == kRtt || return wasm::is_reference_type(kind());
kind() == kRttWithDepth;
} }
constexpr bool is_object_reference_type() const { constexpr bool is_object_reference_type() const {
return kind() == kRef || kind() == kOptRef; return wasm::is_object_reference_type(kind());
} }
constexpr bool is_nullable() const { return kind() == kOptRef; } constexpr bool is_nullable() const { return kind() == kOptRef; }
...@@ -239,23 +320,18 @@ class ValueType { ...@@ -239,23 +320,18 @@ class ValueType {
heap_representation() == htype; heap_representation() == htype;
} }
constexpr bool is_rtt() const { constexpr bool is_rtt() const { return wasm::is_rtt(kind()); }
return kind() == kRtt || kind() == kRttWithDepth;
}
constexpr bool has_depth() const { return kind() == kRttWithDepth; } constexpr bool has_depth() const { return kind() == kRttWithDepth; }
constexpr bool has_index() const { constexpr bool has_index() const {
return is_rtt() || (is_object_reference_type() && heap_type().is_index()); return is_rtt() || (is_object_reference_type() && heap_type().is_index());
} }
constexpr bool is_defaultable() const { constexpr bool is_defaultable() const { return wasm::is_defaultable(kind()); }
CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
return kind() != kRef && !is_rtt();
}
constexpr bool is_bottom() const { return kind() == kBottom; } constexpr bool is_bottom() const { return kind() == kBottom; }
constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; } constexpr bool is_packed() const { return wasm::is_packed(kind()); }
constexpr ValueType Unpacked() const { constexpr ValueType Unpacked() const {
return is_packed() ? Primitive(kI32) : *this; return is_packed() ? Primitive(kI32) : *this;
...@@ -301,42 +377,16 @@ class ValueType { ...@@ -301,42 +377,16 @@ class ValueType {
} }
constexpr int element_size_log2() const { constexpr int element_size_log2() const {
constexpr int8_t kElementSizeLog2[] = { return wasm::element_size_log2(kind());
#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size_log_2 = kElementSizeLog2[kind()];
CONSTEXPR_DCHECK(size_log_2 >= 0);
return size_log_2;
} }
constexpr int element_size_bytes() const { constexpr int element_size_bytes() const {
constexpr int8_t kElementSize[] = { return wasm::element_size_bytes(kind());
#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size = kElementSize[kind()];
CONSTEXPR_DCHECK(size > 0);
return size;
} }
/*************************** Machine-type related ***************************/ /*************************** Machine-type related ***************************/
constexpr MachineType machine_type() const { constexpr MachineType machine_type() const {
CONSTEXPR_DCHECK(kBottom != kind()); return wasm::machine_type(kind());
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
MachineType::machineType(),
FOREACH_VALUE_TYPE(MACH_TYPE)
#undef MACH_TYPE
};
return kMachineType[kind()];
} }
constexpr MachineRepresentation machine_representation() const { constexpr MachineRepresentation machine_representation() const {
...@@ -427,15 +477,7 @@ class ValueType { ...@@ -427,15 +477,7 @@ class ValueType {
static constexpr int kLastUsedBit = 30; static constexpr int kLastUsedBit = 30;
/****************************** Pretty-printing *****************************/ /****************************** Pretty-printing *****************************/
constexpr char short_name() const { constexpr char short_name() const { return wasm::short_name(kind()); }
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
FOREACH_VALUE_TYPE(SHORT_NAME)
#undef SHORT_NAME
};
return kShortName[kind()];
}
std::string name() const { std::string name() const {
std::ostringstream buf; std::ostringstream buf;
...@@ -483,16 +525,7 @@ class ValueType { ...@@ -483,16 +525,7 @@ class ValueType {
constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {} constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
constexpr const char* kind_name() const { constexpr const char* kind_name() const { return wasm::name(kind()); }
constexpr const char* kTypeName[] = {
#define KIND_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
typeName,
FOREACH_VALUE_TYPE(KIND_NAME)
#undef TYPE_NAME
};
return kTypeName[kind()];
}
uint32_t bit_field_; uint32_t bit_field_;
}; };
...@@ -573,8 +606,8 @@ class LoadType { ...@@ -573,8 +606,8 @@ class LoadType {
constexpr ValueType value_type() const { return kValueType[val_]; } constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; } constexpr MachineType mem_type() const { return kMemType[val_]; }
static LoadType ForValueType(ValueType type, bool is_signed = false) { static LoadType ForValueKind(ValueKind kind, bool is_signed = false) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
return kI32Load; return kI32Load;
case kI64: case kI64:
...@@ -649,8 +682,8 @@ class StoreType { ...@@ -649,8 +682,8 @@ class StoreType {
constexpr ValueType value_type() const { return kValueType[val_]; } constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineRepresentation mem_rep() const { return kMemRep[val_]; } constexpr MachineRepresentation mem_rep() const { return kMemRep[val_]; }
static StoreType ForValueType(ValueType type) { static StoreType ForValueKind(ValueKind kind) {
switch (type.kind()) { switch (kind) {
case kI32: case kI32:
return kI32Store; return kI32Store;
case kI64: case kI64:
......
...@@ -95,8 +95,8 @@ void DebugSideTable::Entry::Print(std::ostream& os) const { ...@@ -95,8 +95,8 @@ void DebugSideTable::Entry::Print(std::ostream& os) const {
os << std::setw(6) << std::hex << pc_offset_ << std::dec << " stack height " os << std::setw(6) << std::hex << pc_offset_ << std::dec << " stack height "
<< stack_height_ << " ["; << stack_height_ << " [";
for (auto& value : changed_values_) { for (auto& value : changed_values_) {
os << " " << value.type.name() << ":"; os << " " << name(value.kind) << ":";
switch (value.kind) { switch (value.storage) {
case kConstant: case kConstant:
os << "const#" << value.i32_const; os << "const#" << value.i32_const;
break; break;
...@@ -510,8 +510,8 @@ class DebugInfoImpl { ...@@ -510,8 +510,8 @@ class DebugInfoImpl {
const auto* value = const auto* value =
debug_side_table->FindValue(debug_side_table_entry, index); debug_side_table->FindValue(debug_side_table_entry, index);
if (value->is_constant()) { if (value->is_constant()) {
DCHECK(value->type == kWasmI32 || value->type == kWasmI64); DCHECK(value->kind == kI32 || value->kind == kI64);
return value->type == kWasmI32 ? WasmValue(value->i32_const) return value->kind == kI32 ? WasmValue(value->i32_const)
: WasmValue(int64_t{value->i32_const}); : WasmValue(int64_t{value->i32_const});
} }
...@@ -523,14 +523,14 @@ class DebugInfoImpl { ...@@ -523,14 +523,14 @@ class DebugInfoImpl {
reg.code()); reg.code());
}; };
if (reg.is_gp_pair()) { if (reg.is_gp_pair()) {
DCHECK_EQ(kWasmI64, value->type); DCHECK_EQ(kI64, value->kind);
uint32_t low_word = ReadUnalignedValue<uint32_t>(gp_addr(reg.low_gp())); uint32_t low_word = ReadUnalignedValue<uint32_t>(gp_addr(reg.low_gp()));
uint32_t high_word = uint32_t high_word =
ReadUnalignedValue<uint32_t>(gp_addr(reg.high_gp())); ReadUnalignedValue<uint32_t>(gp_addr(reg.high_gp()));
return WasmValue((uint64_t{high_word} << 32) | low_word); return WasmValue((uint64_t{high_word} << 32) | low_word);
} }
if (reg.is_gp()) { if (reg.is_gp()) {
return value->type == kWasmI32 return value->kind == kI32
? WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp()))) ? WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp())))
: WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp()))); : WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp())));
} }
...@@ -544,11 +544,11 @@ class DebugInfoImpl { ...@@ -544,11 +544,11 @@ class DebugInfoImpl {
Address spilled_addr = Address spilled_addr =
debug_break_fp + debug_break_fp +
WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset(code); WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset(code);
if (value->type == kWasmF32) { if (value->kind == kF32) {
return WasmValue(ReadUnalignedValue<float>(spilled_addr)); return WasmValue(ReadUnalignedValue<float>(spilled_addr));
} else if (value->type == kWasmF64) { } else if (value->kind == kF64) {
return WasmValue(ReadUnalignedValue<double>(spilled_addr)); return WasmValue(ReadUnalignedValue<double>(spilled_addr));
} else if (value->type == kWasmS128) { } else if (value->kind == kS128) {
return WasmValue(Simd128(ReadUnalignedValue<int16>(spilled_addr))); return WasmValue(Simd128(ReadUnalignedValue<int16>(spilled_addr)));
} else { } else {
// All other cases should have been handled above. // All other cases should have been handled above.
...@@ -558,7 +558,7 @@ class DebugInfoImpl { ...@@ -558,7 +558,7 @@ class DebugInfoImpl {
// Otherwise load the value from the stack. // Otherwise load the value from the stack.
Address stack_address = stack_frame_base - value->stack_offset; Address stack_address = stack_frame_base - value->stack_offset;
switch (value->type.kind()) { switch (value->kind) {
case kI32: case kI32:
return WasmValue(ReadUnalignedValue<int32_t>(stack_address)); return WasmValue(ReadUnalignedValue<int32_t>(stack_address));
case kI64: case kI64:
......
...@@ -40,11 +40,11 @@ class DebugSideTable { ...@@ -40,11 +40,11 @@ class DebugSideTable {
public: public:
class Entry { class Entry {
public: public:
enum ValueKind : int8_t { kConstant, kRegister, kStack }; enum Storage : int8_t { kConstant, kRegister, kStack };
struct Value { struct Value {
int index; int index;
ValueType type;
ValueKind kind; ValueKind kind;
Storage storage;
union { union {
int32_t i32_const; // if kind == kConstant int32_t i32_const; // if kind == kConstant
int reg_code; // if kind == kRegister int reg_code; // if kind == kRegister
...@@ -53,9 +53,9 @@ class DebugSideTable { ...@@ -53,9 +53,9 @@ class DebugSideTable {
bool operator==(const Value& other) const { bool operator==(const Value& other) const {
if (index != other.index) return false; if (index != other.index) return false;
if (type != other.type) return false;
if (kind != other.kind) return false; if (kind != other.kind) return false;
switch (kind) { if (storage != other.storage) return false;
switch (storage) {
case kConstant: case kConstant:
return i32_const == other.i32_const; return i32_const == other.i32_const;
case kRegister: case kRegister:
...@@ -66,8 +66,8 @@ class DebugSideTable { ...@@ -66,8 +66,8 @@ class DebugSideTable {
} }
bool operator!=(const Value& other) const { return !(*this == other); } bool operator!=(const Value& other) const { return !(*this == other); }
bool is_constant() const { return kind == kConstant; } bool is_constant() const { return storage == kConstant; }
bool is_register() const { return kind == kRegister; } bool is_register() const { return storage == kRegister; }
}; };
Entry(int pc_offset, int stack_height, std::vector<Value> changed_values) Entry(int pc_offset, int stack_height, std::vector<Value> changed_values)
......
...@@ -177,8 +177,8 @@ struct DebugSideTableEntry { ...@@ -177,8 +177,8 @@ struct DebugSideTableEntry {
// Check for equality, but ignore exact register and stack offset. // Check for equality, but ignore exact register and stack offset.
static bool CheckValueEquals(const DebugSideTable::Entry::Value& a, static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
const DebugSideTable::Entry::Value& b) { const DebugSideTable::Entry::Value& b) {
return a.index == b.index && a.type == b.type && a.kind == b.kind && return a.index == b.index && a.kind == b.kind && a.kind == b.kind &&
(a.kind != DebugSideTable::Entry::kConstant || (a.storage != DebugSideTable::Entry::kConstant ||
a.i32_const == b.i32_const); a.i32_const == b.i32_const);
} }
}; };
...@@ -189,8 +189,8 @@ std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) { ...@@ -189,8 +189,8 @@ std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) {
out << "stack height " << entry.stack_height << ", changed: {"; out << "stack height " << entry.stack_height << ", changed: {";
const char* comma = ""; const char* comma = "";
for (auto& v : entry.changed_values) { for (auto& v : entry.changed_values) {
out << comma << v.index << ":" << v.type.name() << " "; out << comma << v.index << ":" << name(v.kind) << " ";
switch (v.kind) { switch (v.storage) {
case DebugSideTable::Entry::kConstant: case DebugSideTable::Entry::kConstant:
out << "const:" << v.i32_const; out << "const:" << v.i32_const;
break; break;
...@@ -213,27 +213,27 @@ std::ostream& operator<<(std::ostream& out, ...@@ -213,27 +213,27 @@ std::ostream& operator<<(std::ostream& out,
#endif // DEBUG #endif // DEBUG
// Named constructors to make the tests more readable. // Named constructors to make the tests more readable.
DebugSideTable::Entry::Value Constant(int index, ValueType type, DebugSideTable::Entry::Value Constant(int index, ValueKind kind,
int32_t constant) { int32_t constant) {
DebugSideTable::Entry::Value value; DebugSideTable::Entry::Value value;
value.index = index; value.index = index;
value.type = type; value.kind = kind;
value.kind = DebugSideTable::Entry::kConstant; value.storage = DebugSideTable::Entry::kConstant;
value.i32_const = constant; value.i32_const = constant;
return value; return value;
} }
DebugSideTable::Entry::Value Register(int index, ValueType type) { DebugSideTable::Entry::Value Register(int index, ValueKind kind) {
DebugSideTable::Entry::Value value; DebugSideTable::Entry::Value value;
value.index = index; value.index = index;
value.type = type; value.kind = kind;
value.kind = DebugSideTable::Entry::kRegister; value.storage = DebugSideTable::Entry::kRegister;
return value; return value;
} }
DebugSideTable::Entry::Value Stack(int index, ValueType type) { DebugSideTable::Entry::Value Stack(int index, ValueKind kind) {
DebugSideTable::Entry::Value value; DebugSideTable::Entry::Value value;
value.index = index; value.index = index;
value.type = type; value.kind = kind;
value.kind = DebugSideTable::Entry::kStack; value.storage = DebugSideTable::Entry::kStack;
return value; return value;
} }
...@@ -296,9 +296,9 @@ TEST(Liftoff_debug_side_table_simple) { ...@@ -296,9 +296,9 @@ TEST(Liftoff_debug_side_table_simple) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, locals in registers. // function entry, locals in registers.
{2, {Register(0, kWasmI32), Register(1, kWasmI32)}}, {2, {Register(0, kI32), Register(1, kI32)}},
// OOL stack check, locals spilled, stack still empty. // OOL stack check, locals spilled, stack still empty.
{2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}}, {2, {Stack(0, kI32), Stack(1, kI32)}},
}, },
debug_side_table.get()); debug_side_table.get());
} }
...@@ -312,9 +312,9 @@ TEST(Liftoff_debug_side_table_call) { ...@@ -312,9 +312,9 @@ TEST(Liftoff_debug_side_table_call) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, local in register. // function entry, local in register.
{1, {Register(0, kWasmI32)}}, {1, {Register(0, kI32)}},
// call, local spilled, stack empty. // call, local spilled, stack empty.
{1, {Stack(0, kWasmI32)}}, {1, {Stack(0, kI32)}},
// OOL stack check, local spilled as before, stack empty. // OOL stack check, local spilled as before, stack empty.
{1, {}}, {1, {}},
}, },
...@@ -332,11 +332,11 @@ TEST(Liftoff_debug_side_table_call_const) { ...@@ -332,11 +332,11 @@ TEST(Liftoff_debug_side_table_call_const) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, local in register. // function entry, local in register.
{1, {Register(0, kWasmI32)}}, {1, {Register(0, kI32)}},
// call, local is kConst. // call, local is kConst.
{1, {Constant(0, kWasmI32, kConst)}}, {1, {Constant(0, kI32, kConst)}},
// OOL stack check, local spilled. // OOL stack check, local spilled.
{1, {Stack(0, kWasmI32)}}, {1, {Stack(0, kI32)}},
}, },
debug_side_table.get()); debug_side_table.get());
} }
...@@ -351,13 +351,13 @@ TEST(Liftoff_debug_side_table_indirect_call) { ...@@ -351,13 +351,13 @@ TEST(Liftoff_debug_side_table_indirect_call) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, local in register. // function entry, local in register.
{1, {Register(0, kWasmI32)}}, {1, {Register(0, kI32)}},
// indirect call, local spilled, stack empty. // indirect call, local spilled, stack empty.
{1, {Stack(0, kWasmI32)}}, {1, {Stack(0, kI32)}},
// OOL stack check, local still spilled. // OOL stack check, local still spilled.
{1, {}}, {1, {}},
// OOL trap (invalid index), local still spilled, stack has {kConst}. // OOL trap (invalid index), local still spilled, stack has {kConst}.
{2, {Constant(1, kWasmI32, kConst)}}, {2, {Constant(1, kI32, kConst)}},
// OOL trap (sig mismatch), stack unmodified. // OOL trap (sig mismatch), stack unmodified.
{2, {}}, {2, {}},
}, },
...@@ -373,11 +373,11 @@ TEST(Liftoff_debug_side_table_loop) { ...@@ -373,11 +373,11 @@ TEST(Liftoff_debug_side_table_loop) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, local in register. // function entry, local in register.
{1, {Register(0, kWasmI32)}}, {1, {Register(0, kI32)}},
// OOL stack check, local spilled, stack empty. // OOL stack check, local spilled, stack empty.
{1, {Stack(0, kWasmI32)}}, {1, {Stack(0, kI32)}},
// OOL loop stack check, local still spilled, stack has {kConst}. // OOL loop stack check, local still spilled, stack has {kConst}.
{2, {Constant(1, kWasmI32, kConst)}}, {2, {Constant(1, kI32, kConst)}},
}, },
debug_side_table.get()); debug_side_table.get());
} }
...@@ -390,9 +390,9 @@ TEST(Liftoff_debug_side_table_trap) { ...@@ -390,9 +390,9 @@ TEST(Liftoff_debug_side_table_trap) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// function entry, locals in registers. // function entry, locals in registers.
{2, {Register(0, kWasmI32), Register(1, kWasmI32)}}, {2, {Register(0, kI32), Register(1, kI32)}},
// OOL stack check, local spilled, stack empty. // OOL stack check, local spilled, stack empty.
{2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}}, {2, {Stack(0, kI32), Stack(1, kI32)}},
// OOL trap (div by zero), stack as before. // OOL trap (div by zero), stack as before.
{2, {}}, {2, {}},
// OOL trap (unrepresentable), stack as before. // OOL trap (unrepresentable), stack as before.
...@@ -414,11 +414,11 @@ TEST(Liftoff_breakpoint_simple) { ...@@ -414,11 +414,11 @@ TEST(Liftoff_breakpoint_simple) {
CheckDebugSideTable( CheckDebugSideTable(
{ {
// First break point, locals in registers. // First break point, locals in registers.
{2, {Register(0, kWasmI32), Register(1, kWasmI32)}}, {2, {Register(0, kI32), Register(1, kI32)}},
// Second break point, locals unchanged, two register stack values. // Second break point, locals unchanged, two register stack values.
{4, {Register(2, kWasmI32), Register(3, kWasmI32)}}, {4, {Register(2, kI32), Register(3, kI32)}},
// OOL stack check, locals spilled, stack empty. // OOL stack check, locals spilled, stack empty.
{2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}}, {2, {Stack(0, kI32), Stack(1, kI32)}},
}, },
debug_side_table.get()); debug_side_table.get());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment