Commit 96a0677a authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[Liftoff] Use ValueKind instead of ValueType

The precise type is only used for validation. For code generation,
knowing the kind is more than enough. Hence, only store and pass the
ValueKind in Liftoff, and not the full ValueType.

R=manoskouk@chromium.org

Bug: v8:11477
Change-Id: Ia42c0fa419f75b508bd2f210c767b631e93d3398
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2707170
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarManos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72997}
parent 23fa9ffd
......@@ -293,14 +293,14 @@ inline void F64x2Compare(LiftoffAssembler* assm, LiftoffRegister dst,
}
inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
ValueType type) {
ValueKind kind) {
#ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{assm}.CanAcquire());
#endif
switch (type.kind()) {
switch (kind) {
case kI32:
case kOptRef:
case kRef:
......@@ -334,8 +334,8 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) {
switch (type.kind()) {
ValueKind kind) {
switch (kind) {
case kI32:
case kOptRef:
case kRef:
......@@ -534,17 +534,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type.kind()) {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
return type.element_size_bytes();
return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return (type.kind() == kS128 || type.is_reference_type());
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -1081,7 +1081,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
}
ParallelRegisterMove(
{{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}});
{{dst, LiftoffRegister::ForPair(dst_low, dst_high), kI64}});
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
......@@ -1203,11 +1203,10 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ ParallelRegisterMove(
{{LiftoffRegister::ForPair(new_value_low, new_value_high), new_value,
kWasmI64},
{LiftoffRegister::ForPair(expected_low, expected_high), expected,
kWasmI64},
{dst_addr, dst_addr_reg, kWasmI32},
{offset, offset_reg != no_reg ? offset_reg : offset, kWasmI32}});
kI64},
{LiftoffRegister::ForPair(expected_low, expected_high), expected, kI64},
{dst_addr, dst_addr_reg, kI32},
{offset, offset_reg != no_reg ? offset_reg : offset, kI32}});
{
UseScratchRegisterScope temps(lasm);
......@@ -1235,7 +1234,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ bind(&done);
__ ParallelRegisterMove(
{{result, LiftoffRegister::ForPair(result_low, result_high), kWasmI64}});
{{result, LiftoffRegister::ForPair(result_low, result_high), kI64}});
}
#undef __
} // namespace liftoff
......@@ -1346,52 +1345,52 @@ void LiftoffAssembler::AtomicFence() { dmb(ISH); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
MemOperand src(fp, (caller_slot_idx + 1) * kSystemPointerSize);
liftoff::Load(this, dst, src, type);
liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
MemOperand dst(fp, (caller_slot_idx + 1) * kSystemPointerSize);
liftoff::Store(this, src, dst, type);
liftoff::Store(this, src, dst, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
ValueType type) {
ValueKind kind) {
MemOperand src(sp, offset);
liftoff::Load(this, dst, src, type);
liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
Fill(reg, src_offset, kind);
Spill(dst_offset, reg, kind);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
DCHECK(type == kWasmI32 || type.is_reference_type());
DCHECK(kind == kI32 || is_reference_type(kind));
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
ValueKind kind) {
DCHECK_NE(dst, src);
if (type == kWasmF32) {
if (kind == kF32) {
vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
} else if (type == kWasmF64) {
} else if (kind == kF64) {
vmov(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
DCHECK_EQ(kS128, kind);
vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
}
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
......@@ -1399,7 +1398,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset);
liftoff::Store(this, reg, dst, type);
liftoff::Store(this, reg, dst, kind);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
......@@ -1434,8 +1433,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
......@@ -2186,16 +2185,16 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); }
void LiftoffAssembler::emit_jump(Register target) { bx(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
DCHECK_EQ(type, kWasmI32);
DCHECK_EQ(kind, kI32);
cmp(lhs, Operand(0));
} else {
DCHECK(type == kWasmI32 ||
(type.is_reference_type() &&
DCHECK(kind == kI32 ||
(is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs);
}
......@@ -2304,7 +2303,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
ValueType type) {
ValueKind kind) {
return false;
}
......@@ -4114,10 +4113,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
// Arguments are passed by pushing them all to the stack and then passing
// a pointer to them.
......@@ -4126,8 +4125,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
switch (param_type.kind()) {
for (ValueKind param_kind : sig->parameters()) {
switch (param_kind) {
case kI32:
str(args->gp(), MemOperand(sp, arg_bytes));
break;
......@@ -4150,7 +4149,7 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
UNREACHABLE();
}
args++;
arg_bytes += param_type.element_size_bytes();
arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
......@@ -4174,8 +4173,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) {
switch (out_argument_type.kind()) {
if (out_argument_kind != kStmt) {
switch (out_argument_kind) {
case kI32:
ldr(result_reg->gp(), MemOperand(sp));
break;
......@@ -4208,7 +4207,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
DCHECK(target != no_reg);
......@@ -4240,7 +4239,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
switch (src.type().kind()) {
switch (src.kind()) {
// i32 and i64 can be treated as similar cases, i64 being previously
// split into two i32 registers
case kI32:
......@@ -4276,7 +4275,7 @@ void LiftoffStackSlots::Construct() {
break;
}
case LiftoffAssembler::VarState::kRegister:
switch (src.type().kind()) {
switch (src.kind()) {
case kI64: {
LiftoffRegister reg =
slot.half_ == kLowWord ? src.reg().low() : src.reg().high();
......@@ -4301,7 +4300,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kIntConst: {
DCHECK(src.type() == kWasmI32 || src.type() == kWasmI64);
DCHECK(src.kind() == kI32 || src.kind() == kI64);
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
// The high word is the sign extension of the low word.
......
......@@ -72,8 +72,8 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
switch (type.kind()) {
inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueKind kind) {
switch (kind) {
case kI32:
return reg.gp().W();
case kI64:
......@@ -104,8 +104,8 @@ inline CPURegList PadVRegList(RegList list) {
}
inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
ValueType type) {
switch (type.kind()) {
ValueKind kind) {
switch (kind) {
case kI32:
return temps->AcquireW();
case kI64:
......@@ -357,19 +357,19 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take.
switch (type.kind()) {
switch (kind) {
case kS128:
return type.element_size_bytes();
return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.kind() == kS128 || type.is_reference_type();
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -840,56 +840,56 @@ void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(fp, offset));
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
Str(liftoff::GetRegFromType(src, type), MemOperand(fp, offset));
Str(liftoff::GetRegFromType(src, kind), MemOperand(fp, offset));
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
ValueType type) {
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(sp, offset));
ValueKind kind) {
Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(sp, offset));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
ValueKind kind) {
UseScratchRegisterScope temps(this);
CPURegister scratch = liftoff::AcquireByType(&temps, type);
CPURegister scratch = liftoff::AcquireByType(&temps, kind);
Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_offset));
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
if (type == kWasmI32) {
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
if (kind == kI32) {
Mov(dst.W(), src.W());
} else {
DCHECK(kWasmI64 == type || type.is_reference_type());
DCHECK(kI64 == kind || is_reference_type(kind));
Mov(dst.X(), src.X());
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
if (type == kWasmF32) {
ValueKind kind) {
if (kind == kF32) {
Fmov(dst.S(), src.S());
} else if (type == kWasmF64) {
} else if (kind == kF64) {
Fmov(dst.D(), src.D());
} else {
DCHECK_EQ(kWasmS128, type);
DCHECK_EQ(kS128, kind);
Mov(dst.Q(), src.Q());
}
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst);
Str(liftoff::GetRegFromType(reg, kind), dst);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
......@@ -921,9 +921,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Str(src, dst);
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
Ldr(liftoff::GetRegFromType(reg, type), src);
Ldr(liftoff::GetRegFromType(reg, kind), src);
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
......@@ -1506,10 +1506,10 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
switch (type.kind()) {
switch (kind) {
case kI32:
if (rhs.is_valid()) {
Cmp(lhs.W(), rhs.W());
......@@ -1598,7 +1598,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
ValueType type) {
ValueKind kind) {
return false;
}
......@@ -3124,10 +3124,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
// The stack pointer is required to be quadword aligned.
int total_size = RoundUp(stack_bytes, kQuadWordSizeInBytes);
......@@ -3135,9 +3135,9 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
Claim(total_size, 1);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
Poke(liftoff::GetRegFromType(*args++, param_type), arg_bytes);
arg_bytes += param_type.element_size_bytes();
for (ValueKind param_kind : sig->parameters()) {
Poke(liftoff::GetRegFromType(*args++, param_kind), arg_bytes);
arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
......@@ -3160,8 +3160,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) {
Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_type), 0);
if (out_argument_kind != kStmt) {
Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_kind), 0);
}
Drop(total_size, 1);
......@@ -3175,7 +3175,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// For Arm64, we have more cache registers than wasm parameters. That means
......@@ -3217,34 +3217,34 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffStackSlots::Construct() {
size_t num_slots = 0;
for (auto& slot : slots_) {
num_slots += slot.src_.type() == kWasmS128 ? 2 : 1;
num_slots += slot.src_.kind() == kS128 ? 2 : 1;
}
// The stack pointer is required to be quadword aligned.
asm_->Claim(RoundUp(num_slots, 2));
size_t poke_offset = num_slots * kXRegSize;
for (auto& slot : slots_) {
poke_offset -= slot.src_.type() == kWasmS128 ? kXRegSize * 2 : kXRegSize;
poke_offset -= slot.src_.kind() == kS128 ? kXRegSize * 2 : kXRegSize;
switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.kind());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Poke(scratch, poke_offset);
break;
}
case LiftoffAssembler::VarState::kRegister:
asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.kind()),
poke_offset);
break;
case LiftoffAssembler::VarState::kIntConst:
DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64);
DCHECK(slot.src_.kind() == kI32 || slot.src_.kind() == kI64);
if (slot.src_.i32_const() == 0) {
Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr;
Register zero_reg = slot.src_.kind() == kI32 ? wzr : xzr;
asm_->Poke(zero_reg, poke_offset);
} else {
UseScratchRegisterScope temps(asm_);
Register scratch = slot.src_.type() == kWasmI32 ? temps.AcquireW()
: temps.AcquireX();
Register scratch =
slot.src_.kind() == kI32 ? temps.AcquireW() : temps.AcquireX();
asm_->Mov(scratch, int64_t{slot.src_.i32_const()});
asm_->Poke(scratch, poke_offset);
}
......
......@@ -65,9 +65,9 @@ static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf(eax, ecx, edx)>();
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
int32_t offset, ValueType type) {
int32_t offset, ValueKind kind) {
Operand src(base, offset);
switch (type.kind()) {
switch (kind) {
case kI32:
case kOptRef:
case kRef:
......@@ -94,9 +94,9 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
LiftoffRegister src, ValueKind kind) {
Operand dst(base, offset);
switch (type.kind()) {
switch (kind) {
case kI32:
assm->mov(dst, src.gp());
break;
......@@ -118,8 +118,8 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type.kind()) {
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
switch (kind) {
case kI32:
case kRef:
case kOptRef:
......@@ -261,13 +261,13 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
return type.is_reference_type() ? kSystemPointerSize
: type.element_size_bytes();
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
return is_reference_type(kind) ? kSystemPointerSize
: element_size_bytes(kind);
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.is_reference_type();
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -828,7 +828,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
__ SpillRegisters(old_hi, old_lo, new_hi, base, offset);
__ ParallelRegisterMove(
{{LiftoffRegister::ForPair(base, offset),
LiftoffRegister::ForPair(dst_addr, offset_reg), kWasmI64}});
LiftoffRegister::ForPair(dst_addr, offset_reg), kI64}});
Operand dst_op_lo = Operand(base, offset, times_1, offset_imm);
Operand dst_op_hi = Operand(base, offset, times_1, offset_imm + 4);
......@@ -877,7 +877,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
// Move the result into the correct registers.
__ ParallelRegisterMove(
{{result, LiftoffRegister::ForPair(old_lo, old_hi), kWasmI64}});
{{result, LiftoffRegister::ForPair(old_lo, old_hi), kI64}});
}
#undef __
......@@ -1065,9 +1065,9 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move all other values into the right register.
ParallelRegisterMove(
{{LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32},
{LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64},
{LiftoffRegister(new_hi), new_value.high(), kWasmI32}});
{{LiftoffRegister(address), LiftoffRegister(dst_addr), kI32},
{LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kI64},
{LiftoffRegister(new_hi), new_value.high(), kI32}});
Operand dst_op = Operand(address, offset_imm);
......@@ -1079,33 +1079,33 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move the result into the correct registers.
ParallelRegisterMove(
{{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}});
{{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kI64}});
}
void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
type);
kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
ValueType type) {
liftoff::Load(this, reg, esp, offset, type);
ValueKind kind) {
liftoff::Load(this, reg, esp, offset, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
liftoff::Store(this, ebp, kSystemPointerSize * (caller_slot_idx + 1), src,
type);
kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
if (needs_gp_reg_pair(type)) {
ValueKind kind) {
if (needs_gp_reg_pair(kind)) {
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_offset, kLowWord));
......@@ -1118,29 +1118,29 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
}
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
DCHECK(kWasmI32 == type || type.is_reference_type());
DCHECK(kI32 == kind || is_reference_type(kind));
mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
ValueKind kind) {
DCHECK_NE(dst, src);
if (type == kWasmF32) {
if (kind == kF32) {
movss(dst, src);
} else if (type == kWasmF64) {
} else if (kind == kF64) {
movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
DCHECK_EQ(kS128, kind);
Movaps(dst, src);
}
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) {
switch (kind) {
case kI32:
case kOptRef:
case kRef:
......@@ -1186,8 +1186,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
liftoff::Load(this, reg, ebp, -offset, type);
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
liftoff::Load(this, reg, ebp, -offset, kind);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
......@@ -1517,7 +1517,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
if (tmp_result != dst) assm->Move(dst, tmp_result, kI64);
}
template <void (Assembler::*op)(Register, const Immediate&),
......@@ -1576,9 +1576,8 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
SpillRegisters(dst_hi, dst_lo, lhs_hi, rhs_lo);
// Move lhs and rhs into the respective registers.
ParallelRegisterMove(
{{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64},
{LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}});
ParallelRegisterMove({{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kI64},
{LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kI64}});
// First mul: lhs_hi' = lhs_hi * rhs_lo.
imul(lhs_hi, rhs_lo);
......@@ -1593,7 +1592,7 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
// Finally, move back the temporary result to the actual dst register pair.
LiftoffRegister dst_tmp = LiftoffRegister::ForPair(dst_lo, dst_hi);
if (dst != dst_tmp) Move(dst, dst_tmp, kWasmI64);
if (dst != dst_tmp) Move(dst, dst_tmp, kI64);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -1660,11 +1659,11 @@ inline void Emit64BitShiftOperation(
(assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
pinned.has(LiftoffRegister(ecx)))) {
ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
reg_moves.emplace_back(ecx_replace, ecx, kWasmI32);
reg_moves.emplace_back(ecx_replace, ecx, kI32);
}
reg_moves.emplace_back(dst, src, kWasmI64);
reg_moves.emplace_back(ecx, amount, kWasmI32);
reg_moves.emplace_back(dst, src, kI64);
reg_moves.emplace_back(ecx, amount, kI32);
assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift.
......@@ -1689,7 +1688,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shl(dst.high_gp(), amount - 32);
xor_(dst.low_gp(), dst.low_gp());
} else {
if (dst != src) Move(dst, src, kWasmI64);
if (dst != src) Move(dst, src, kI64);
ShlPair(dst.high_gp(), dst.low_gp(), amount);
}
}
......@@ -1709,7 +1708,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) sar(dst.low_gp(), amount - 32);
sar(dst.high_gp(), 31);
} else {
if (dst != src) Move(dst, src, kWasmI64);
if (dst != src) Move(dst, src, kI64);
SarPair(dst.high_gp(), dst.low_gp(), amount);
}
}
......@@ -1727,7 +1726,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shr(dst.low_gp(), amount - 32);
xor_(dst.high_gp(), dst.high_gp());
} else {
if (dst != src) Move(dst, src, kWasmI64);
if (dst != src) Move(dst, src, kI64);
ShrPair(dst.high_gp(), dst.low_gp(), amount);
}
}
......@@ -2402,11 +2401,11 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (type.kind()) {
switch (kind) {
case kRef:
case kOptRef:
case kRtt:
......@@ -2420,7 +2419,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
UNREACHABLE();
}
} else {
DCHECK_EQ(type, kWasmI32);
DCHECK_EQ(kind, kI32);
test(lhs, lhs);
}
......@@ -2572,7 +2571,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
ValueType type) {
ValueKind kind) {
return false;
}
......@@ -4880,17 +4879,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
liftoff::Store(this, esp, arg_bytes, *args++, param_type);
arg_bytes += param_type.element_size_bytes();
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, esp, arg_bytes, *args++, param_kind);
arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
......@@ -4919,8 +4918,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) {
liftoff::Load(this, *next_result_reg, esp, 0, out_argument_type);
if (out_argument_kind != kStmt) {
liftoff::Load(this, *next_result_reg, esp, 0, out_argument_kind);
}
add(esp, Immediate(stack_bytes));
......@@ -4934,7 +4933,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
jmp(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// Since we have more cache registers than parameter registers, the
......@@ -4980,26 +4979,26 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
// The combination of AllocateStackSpace and 2 movdqu is usually smaller
// in code size than doing 4 pushes.
if (src.type() == kWasmS128) {
if (src.kind() == kS128) {
asm_->AllocateStackSpace(sizeof(double) * 2);
asm_->movdqu(liftoff::kScratchDoubleReg,
liftoff::GetStackSlot(slot.src_offset_));
asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
break;
}
if (src.type() == kWasmF64) {
if (src.kind() == kF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
}
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
kWasmI32);
kI32);
} else {
liftoff::push(asm_, src.reg(), src.type());
liftoff::push(asm_, src.reg(), src.kind());
}
break;
case LiftoffAssembler::VarState::kIntConst:
......
......@@ -23,17 +23,18 @@ namespace internal {
namespace wasm {
using VarState = LiftoffAssembler::VarState;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
constexpr ValueType LiftoffAssembler::kWasmIntPtr;
constexpr ValueKind LiftoffAssembler::kIntPtr;
namespace {
class StackTransferRecipe {
struct RegisterMove {
LiftoffRegister src;
ValueType type;
constexpr RegisterMove(LiftoffRegister src, ValueType type)
: src(src), type(type) {}
ValueKind kind;
constexpr RegisterMove(LiftoffRegister src, ValueKind kind)
: src(src), kind(kind) {}
};
struct RegisterLoad {
......@@ -45,35 +46,34 @@ class StackTransferRecipe {
kHighHalfStack // fill a register from the high half of a stack slot.
};
LoadKind kind;
ValueType type;
LoadKind load_kind;
ValueKind kind;
int32_t value; // i32 constant value or stack offset, depending on kind.
// Named constructors.
static RegisterLoad Const(WasmValue constant) {
if (constant.type() == kWasmI32) {
return {kConstant, kWasmI32, constant.to_i32()};
if (constant.type().kind() == kI32) {
return {kConstant, kI32, constant.to_i32()};
}
DCHECK_EQ(kWasmI64, constant.type());
DCHECK_EQ(kI64, constant.type().kind());
int32_t i32_const = static_cast<int32_t>(constant.to_i64());
DCHECK_EQ(constant.to_i64(), i32_const);
return {kConstant, kWasmI64, i32_const};
return {kConstant, kI64, i32_const};
}
static RegisterLoad Stack(int32_t offset, ValueType type) {
return {kStack, type, offset};
static RegisterLoad Stack(int32_t offset, ValueKind kind) {
return {kStack, kind, offset};
}
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
offset};
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kI32, offset};
}
static RegisterLoad Nop() {
// ValueType does not matter.
return {kNop, kWasmI32, 0};
// ValueKind does not matter.
return {kNop, kI32, 0};
}
private:
RegisterLoad(LoadKind kind, ValueType type, int32_t value)
: kind(kind), type(type), value(value) {}
RegisterLoad(LoadKind load_kind, ValueKind kind, int32_t value)
: load_kind(load_kind), kind(kind), value(value) {}
};
public:
......@@ -92,12 +92,12 @@ class StackTransferRecipe {
}
#if DEBUG
bool CheckCompatibleStackSlotTypes(ValueType dst, ValueType src) {
if (dst.is_object_reference_type()) {
bool CheckCompatibleStackSlotTypes(ValueKind dst, ValueKind src) {
if (is_object_reference_type(dst)) {
// Since Liftoff doesn't do accurate type tracking (e.g. on loop back
// edges), we only care that pointer types stay amongst pointer types.
// It's fine if ref/optref overwrite each other.
DCHECK(src.is_object_reference_type());
DCHECK(is_object_reference_type(src));
} else {
// All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
DCHECK_EQ(dst, src);
......@@ -107,7 +107,7 @@ class StackTransferRecipe {
#endif
V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK(CheckCompatibleStackSlotTypes(dst.type(), src.type()));
DCHECK(CheckCompatibleStackSlotTypes(dst.kind(), src.kind()));
if (dst.is_reg()) {
LoadIntoRegister(dst.reg(), src, src.offset());
return;
......@@ -120,11 +120,11 @@ class StackTransferRecipe {
switch (src.loc()) {
case VarState::kStack:
if (src.offset() != dst.offset()) {
asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
asm_->MoveStackValue(dst.offset(), src.offset(), src.kind());
}
break;
case VarState::kRegister:
asm_->Spill(dst.offset(), src.reg(), src.type());
asm_->Spill(dst.offset(), src.reg(), src.kind());
break;
case VarState::kIntConst:
asm_->Spill(dst.offset(), src.constant());
......@@ -137,11 +137,11 @@ class StackTransferRecipe {
uint32_t src_offset) {
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst, src_offset, src.type());
LoadStackSlot(dst, src_offset, src.kind());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
break;
case VarState::kIntConst:
LoadConstant(dst, src.constant());
......@@ -155,7 +155,7 @@ class StackTransferRecipe {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
DCHECK_EQ(kWasmI64, src.type());
DCHECK_EQ(kI64, src.kind());
switch (src.loc()) {
case VarState::kStack:
LoadI64HalfStackSlot(dst, offset, half);
......@@ -163,7 +163,7 @@ class StackTransferRecipe {
case VarState::kRegister: {
LiftoffRegister src_half =
half == kLowWord ? src.reg().low() : src.reg().high();
if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
if (dst != src_half) MoveRegister(dst, src_half, kI32);
break;
}
case VarState::kIntConst:
......@@ -175,45 +175,44 @@ class StackTransferRecipe {
}
}
void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueKind kind) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_EQ(reg_class_for(type), src.reg_class());
DCHECK_EQ(reg_class_for(kind), src.reg_class());
if (src.is_gp_pair()) {
DCHECK_EQ(kWasmI64, type);
if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
if (dst.high() != src.high())
MoveRegister(dst.high(), src.high(), kWasmI32);
DCHECK_EQ(kI64, kind);
if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kI32);
if (dst.high() != src.high()) MoveRegister(dst.high(), src.high(), kI32);
return;
}
if (src.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type);
DCHECK_EQ(kS128, kind);
if (dst.low() != src.low()) {
MoveRegister(dst.low(), src.low(), kWasmF64);
MoveRegister(dst.high(), src.high(), kWasmF64);
MoveRegister(dst.low(), src.low(), kF64);
MoveRegister(dst.high(), src.high(), kF64);
}
return;
}
if (move_dst_regs_.has(dst)) {
DCHECK_EQ(register_move(dst)->src, src);
// Non-fp registers can only occur with the exact same type.
DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->type == type);
DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->kind == kind);
// It can happen that one fp register holds both the f32 zero and the f64
// zero, as the initial value for local variables. Move the value as f64
// in that case.
if (type == kWasmF64) register_move(dst)->type = kWasmF64;
if (kind == kF64) register_move(dst)->kind = kF64;
return;
}
move_dst_regs_.set(dst);
++*src_reg_use_count(src);
*register_move(dst) = {src, type};
*register_move(dst) = {src, kind};
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
DCHECK(!load_dst_regs_.has(dst));
load_dst_regs_.set(dst);
if (dst.is_gp_pair()) {
DCHECK_EQ(kWasmI64, value.type());
DCHECK_EQ(kI64, value.type().kind());
int64_t i64 = value.to_i64();
*register_load(dst.low()) =
RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64)));
......@@ -225,7 +224,7 @@ class StackTransferRecipe {
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
ValueType type) {
ValueKind kind) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
// slots, and then we reload them later into the same dst register.
......@@ -234,20 +233,20 @@ class StackTransferRecipe {
}
load_dst_regs_.set(dst);
if (dst.is_gp_pair()) {
DCHECK_EQ(kWasmI64, type);
DCHECK_EQ(kI64, kind);
*register_load(dst.low()) =
RegisterLoad::HalfStack(stack_offset, kLowWord);
*register_load(dst.high()) =
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type);
DCHECK_EQ(kS128, kind);
// Only need register_load for low_gp since we load 128 bits at one go.
// Both low and high need to be set in load_dst_regs_ but when iterating
// over it, both low and high will be cleared, so we won't load twice.
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, kind);
*register_load(dst.high()) = RegisterLoad::Nop();
} else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
*register_load(dst) = RegisterLoad::Stack(stack_offset, kind);
}
}
......@@ -295,7 +294,7 @@ class StackTransferRecipe {
void ExecuteMove(LiftoffRegister dst) {
RegisterMove* move = register_move(dst);
DCHECK_EQ(0, *src_reg_use_count(dst));
asm_->Move(dst, move->src, move->type);
asm_->Move(dst, move->src, move->kind);
ClearExecutedMove(dst);
}
......@@ -329,11 +328,11 @@ class StackTransferRecipe {
// TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
last_spill_offset += LiftoffAssembler::SlotSizeForType(move->type);
last_spill_offset += LiftoffAssembler::SlotSizeForType(move->kind);
LiftoffRegister spill_reg = move->src;
asm_->Spill(last_spill_offset, spill_reg, move->type);
asm_->Spill(last_spill_offset, spill_reg, move->kind);
// Remember to reload into the destination register later.
LoadStackSlot(dst, last_spill_offset, move->type);
LoadStackSlot(dst, last_spill_offset, move->kind);
ClearExecutedMove(dst);
}
}
......@@ -341,20 +340,20 @@ class StackTransferRecipe {
void ExecuteLoads() {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
switch (load->kind) {
switch (load->load_kind) {
case RegisterLoad::kNop:
break;
case RegisterLoad::kConstant:
asm_->LoadConstant(dst, load->type == kWasmI64
asm_->LoadConstant(dst, load->kind == kI64
? WasmValue(int64_t{load->value})
: WasmValue(int32_t{load->value}));
break;
case RegisterLoad::kStack:
if (kNeedS128RegPair && load->type == kWasmS128) {
if (kNeedS128RegPair && load->kind == kS128) {
asm_->Fill(LiftoffRegister::ForFpPair(dst.fp()), load->value,
load->type);
load->kind);
} else {
asm_->Fill(dst, load->value, load->type);
asm_->Fill(dst, load->value, load->kind);
}
break;
case RegisterLoad::kLowHalfStack:
......@@ -431,18 +430,18 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
reg = register_reuse_map.Lookup(source->reg());
}
// Third try: Use any free register.
RegClass rc = reg_class_for(source->type());
RegClass rc = reg_class_for(source->kind());
if (!reg && state->has_unused_register(rc, used_regs)) {
reg = state->unused_register(rc, used_regs);
}
if (!reg) {
// No free register; make this a stack slot.
*target = VarState(source->type(), source->offset());
*target = VarState(source->kind(), source->offset());
continue;
}
if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
state->inc_used(*reg);
*target = VarState(source->type(), *reg, source->offset());
*target = VarState(source->kind(), *reg, source->offset());
}
}
......@@ -534,7 +533,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
ZoneVector<int>* slots, LiftoffRegList* spills,
SpillLocation spill_location) {
for (const auto& slot : stack_state) {
if (!slot.type().is_reference_type()) continue;
if (!is_reference_type(slot.kind())) continue;
if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
// Registers get spilled just before the call to the runtime. In {spills}
......@@ -553,7 +552,7 @@ void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (const auto& slot : stack_state) {
DCHECK(!slot.is_reg());
if (slot.type().is_reference_type()) {
if (is_reference_type(slot.kind())) {
safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
}
}
......@@ -591,12 +590,12 @@ LiftoffAssembler::~LiftoffAssembler() {
LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
LiftoffRegList pinned) {
if (slot.is_reg()) return slot.reg();
LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.type()), pinned);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned);
if (slot.is_const()) {
LoadConstant(reg, slot.constant());
} else {
DCHECK(slot.is_stack());
Fill(reg, slot.offset(), slot.type());
Fill(reg, slot.offset(), slot.kind());
}
return reg;
}
......@@ -647,7 +646,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
if (slot.is_stack()) continue;
RegClass rc = reg_class_for(slot.type());
RegClass rc = reg_class_for(slot.kind());
if (slot.is_reg()) {
if (cache_state_.get_use_count(slot.reg()) > 1) {
// If the register is used more than once, we cannot use it for the
......@@ -655,7 +654,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
LiftoffRegList pinned;
pinned.set(slot.reg());
LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
Move(dst_reg, slot.reg(), slot.type());
Move(dst_reg, slot.reg(), slot.kind());
cache_state_.dec_used(slot.reg());
cache_state_.inc_used(dst_reg);
slot.MakeRegister(dst_reg);
......@@ -677,7 +676,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
VectorOf(stack_base, num_locals())}) {
for (VarState& slot : slots) {
if (!slot.is_const()) continue;
RegClass rc = reg_class_for(slot.type());
RegClass rc = reg_class_for(slot.kind());
if (cache_state_.has_unused_register(rc)) {
LiftoffRegister reg = cache_state_.unused_register(rc);
LoadConstant(reg, slot.constant());
......@@ -744,7 +743,7 @@ void LiftoffAssembler::Spill(VarState* slot) {
case VarState::kStack:
return;
case VarState::kRegister:
Spill(slot->offset(), slot->reg(), slot->type());
Spill(slot->offset(), slot->reg(), slot->kind());
cache_state_.dec_used(slot->reg());
break;
case VarState::kIntConst:
......@@ -764,7 +763,7 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
Spill(slot.offset(), slot.reg(), slot.type());
Spill(slot.offset(), slot.reg(), slot.kind());
slot.MakeStack();
}
cache_state_.ClearCachedInstanceRegister();
......@@ -786,7 +785,7 @@ void LiftoffAssembler::ClearRegister(
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp();
Move(replacement, reg, LiftoffAssembler::kWasmIntPtr);
Move(replacement, reg, LiftoffAssembler::kIntPtr);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
......@@ -794,7 +793,7 @@ void LiftoffAssembler::ClearRegister(
}
namespace {
void PrepareStackTransfers(const FunctionSig* sig,
void PrepareStackTransfers(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
const VarState* slots,
LiftoffStackSlots* stack_slots,
......@@ -807,8 +806,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
for (uint32_t i = num_params; i > 0; --i) {
const uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
const bool is_gp_pair = kNeedI64RegPair && type == kWasmI64;
ValueKind kind = sig->GetParam(param);
const bool is_gp_pair = kNeedI64RegPair && kind == kI64;
const int num_lowered_params = is_gp_pair ? 2 : 1;
const VarState& slot = slots[param];
const uint32_t stack_offset = slot.offset();
......@@ -822,10 +821,10 @@ void PrepareStackTransfers(const FunctionSig* sig,
call_descriptor->GetInputLocation(call_desc_input_idx);
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(kind);
int reg_code = loc.AsRegister();
LiftoffRegister reg =
LiftoffRegister::from_external_code(rc, type, reg_code);
LiftoffRegister::from_external_code(rc, kind, reg_code);
param_regs->set(reg);
if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
......@@ -844,7 +843,7 @@ void PrepareStackTransfers(const FunctionSig* sig,
} // namespace
void LiftoffAssembler::PrepareBuiltinCall(
const FunctionSig* sig, compiler::CallDescriptor* call_descriptor,
const ValueKindSig* sig, compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params) {
LiftoffStackSlots stack_slots(this);
StackTransferRecipe stack_transfers(this);
......@@ -863,7 +862,7 @@ void LiftoffAssembler::PrepareBuiltinCall(
cache_state_.reset_used_registers();
}
void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
Register* target_instance) {
......@@ -878,7 +877,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
!cache_state_.used_registers.is_empty();
--it) {
if (!it->is_reg()) continue;
Spill(it->offset(), it->reg(), it->type());
Spill(it->offset(), it->reg(), it->kind());
cache_state_.dec_used(it->reg());
it->MakeStack();
}
......@@ -895,8 +894,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) {
stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
LiftoffRegister(*target_instance),
kWasmIntPtr);
LiftoffRegister(*target_instance), kIntPtr);
}
if (num_params) {
......@@ -914,10 +912,10 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
kWasmIntPtr);
kIntPtr);
*target = new_target.gp();
} else {
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr,
LiftoffRegister(*target), 0));
*target = no_reg;
}
......@@ -939,15 +937,15 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
}
}
void LiftoffAssembler::FinishCall(const FunctionSig* sig,
void LiftoffAssembler::FinishCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor) {
int call_desc_return_idx = 0;
for (ValueType return_type : sig->returns()) {
for (ValueKind return_kind : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
const bool needs_gp_pair = needs_gp_reg_pair(return_type);
const bool needs_gp_pair = needs_gp_reg_pair(return_kind);
const int num_lowered_params = 1 + needs_gp_pair;
const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
const RegClass rc = reg_class_for(lowered_type);
const ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
const RegClass rc = reg_class_for(lowered_kind);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
kGpCacheRegList.GetFirstRegSet()};
......@@ -958,7 +956,7 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
reg_pair[pair_idx] = LiftoffRegister::from_external_code(
rc, lowered_type, loc.AsRegister());
rc, lowered_kind, loc.AsRegister());
} else {
DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
......@@ -966,16 +964,16 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
int offset = call_descriptor->GetOffsetToReturns();
int return_slot = -loc.GetLocation() - offset - 1;
LoadReturnStackSlot(reg_pair[pair_idx],
return_slot * kSystemPointerSize, lowered_type);
return_slot * kSystemPointerSize, lowered_kind);
}
if (pair_idx == 0) {
pinned.set(reg_pair[0]);
}
}
if (num_lowered_params == 1) {
PushRegister(return_type, reg_pair[0]);
PushRegister(return_kind, reg_pair[0]);
} else {
PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(),
PushRegister(return_kind, LiftoffRegister::ForPair(reg_pair[0].gp(),
reg_pair[1].gp()));
}
}
......@@ -984,21 +982,21 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
ValueType type) {
ValueKind kind) {
DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_NE(dst, src);
if (kNeedI64RegPair && dst.is_gp_pair()) {
// Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap.
StackTransferRecipe(this).MoveRegister(dst, src, type);
StackTransferRecipe(this).MoveRegister(dst, src, kind);
} else if (kNeedS128RegPair && dst.is_fp_pair()) {
// Calling low_fp is fine, Move will automatically check the type and
// Calling low_fp is fine, Move will automatically check the kind and
// convert this FP to its SIMD register, and use a SIMD move.
Move(dst.low_fp(), src.low_fp(), type);
Move(dst.low_fp(), src.low_fp(), kind);
} else if (dst.is_gp()) {
Move(dst.gp(), src.gp(), type);
Move(dst.gp(), src.gp(), kind);
} else {
Move(dst.fp(), src.fp(), type);
Move(dst.fp(), src.fp(), kind);
}
}
......@@ -1007,7 +1005,7 @@ void LiftoffAssembler::ParallelRegisterMove(
StackTransferRecipe stack_transfers(this);
for (auto tuple : tuples) {
if (tuple.dst == tuple.src) continue;
stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type);
stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.kind);
}
}
......@@ -1015,19 +1013,19 @@ void LiftoffAssembler::MoveToReturnLocations(
const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
StackTransferRecipe stack_transfers(this);
if (sig->return_count() == 1) {
ValueType return_type = sig->GetReturn(0);
// Defaults to a gp reg, will be set below if return type is not gp.
ValueKind return_kind = sig->GetReturn(0).kind();
// Defaults to a gp reg, will be set below if return kind is not gp.
LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
if (needs_gp_reg_pair(return_type)) {
if (needs_gp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForPair(kGpReturnRegisters[0],
kGpReturnRegisters[1]);
} else if (needs_fp_reg_pair(return_type)) {
} else if (needs_fp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
} else if (reg_class_for(return_type) == kFpReg) {
} else if (reg_class_for(return_kind) == kFpReg) {
return_reg = LiftoffRegister(kFpReturnRegisters[0]);
} else {
DCHECK_EQ(kGpReg, reg_class_for(return_type));
DCHECK_EQ(kGpReg, reg_class_for(return_kind));
}
stack_transfers.LoadIntoRegister(return_reg,
cache_state_.stack_state.back(),
......@@ -1042,8 +1040,8 @@ void LiftoffAssembler::MoveToReturnLocations(
// Fill return frame slots first to ensure that all potential spills happen
// before we prepare the stack transfers.
for (size_t i = 0; i < sig->return_count(); ++i) {
ValueType return_type = sig->GetReturn(i);
bool needs_gp_pair = needs_gp_reg_pair(return_type);
ValueKind return_kind = sig->GetReturn(i).kind();
bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation loc =
......@@ -1054,16 +1052,16 @@ void LiftoffAssembler::MoveToReturnLocations(
LiftoffRegister reg = needs_gp_pair
? LoadI64HalfIntoRegister(slot, half)
: LoadToRegister(slot, {});
ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_type);
ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_kind);
}
}
}
// Prepare and execute stack transfers.
call_desc_return_idx = 0;
for (size_t i = 0; i < sig->return_count(); ++i) {
ValueType return_type = sig->GetReturn(i);
bool needs_gp_pair = needs_gp_reg_pair(return_type);
ValueKind return_kind = sig->GetReturn(i).kind();
bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord;
......@@ -1072,10 +1070,10 @@ void LiftoffAssembler::MoveToReturnLocations(
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
RegClass rc = reg_class_for(lowered_type);
ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
RegClass rc = reg_class_for(lowered_kind);
LiftoffRegister reg =
LiftoffRegister::from_external_code(rc, return_type, reg_code);
LiftoffRegister::from_external_code(rc, return_kind, reg_code);
VarState& slot = slots[i];
if (needs_gp_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(),
......@@ -1194,7 +1192,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.last_spilled_regs.set(slot->reg().low());
cache_state_.last_spilled_regs.set(slot->reg().high());
}
Spill(slot->offset(), slot->reg(), slot->type());
Spill(slot->offset(), slot->reg(), slot->kind());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
......@@ -1206,14 +1204,14 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals;
if (num_locals > kInlineLocalTypes) {
more_local_types_ = reinterpret_cast<ValueType*>(
base::Malloc(num_locals * sizeof(ValueType)));
more_local_types_ = reinterpret_cast<ValueKind*>(
base::Malloc(num_locals * sizeof(ValueKind)));
DCHECK_NOT_NULL(more_local_types_);
}
}
std::ostream& operator<<(std::ostream& os, VarState slot) {
os << slot.type().name() << ":";
os << name(slot.kind()) << ":";
switch (slot.loc()) {
case VarState::kStack:
return os << "s";
......
......@@ -73,25 +73,26 @@ class LiftoffAssembler : public TurboAssembler {
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
static constexpr ValueKind kIntPtr = kSystemPointerSize == 8 ? kI64 : kI32;
using ValueKindSig = Signature<ValueKind>;
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
explicit VarState(ValueType type, int offset)
: loc_(kStack), type_(type), spill_offset_(offset) {}
explicit VarState(ValueType type, LiftoffRegister r, int offset)
: loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
explicit VarState(ValueKind kind, int offset)
: loc_(kStack), kind_(kind), spill_offset_(offset) {}
explicit VarState(ValueKind kind, LiftoffRegister r, int offset)
: loc_(kRegister), kind_(kind), reg_(r), spill_offset_(offset) {
DCHECK_EQ(r.reg_class(), reg_class_for(kind));
}
explicit VarState(ValueType type, int32_t i32_const, int offset)
explicit VarState(ValueKind kind, int32_t i32_const, int offset)
: loc_(kIntConst),
type_(type),
kind_(kind),
i32_const_(i32_const),
spill_offset_(offset) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
DCHECK(kind_ == kI32 || kind_ == kI64);
}
bool is_stack() const { return loc_ == kStack; }
......@@ -100,7 +101,7 @@ class LiftoffAssembler : public TurboAssembler {
bool is_reg() const { return loc_ == kRegister; }
bool is_const() const { return loc_ == kIntConst; }
ValueType type() const { return type_; }
ValueKind kind() const { return kind_; }
Location loc() const { return loc_; }
......@@ -109,9 +110,9 @@ class LiftoffAssembler : public TurboAssembler {
return i32_const_;
}
WasmValue constant() const {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
DCHECK(kind_ == kI32 || kind_ == kI64);
DCHECK_EQ(loc_, kIntConst);
return type_ == kWasmI32 ? WasmValue(i32_const_)
return kind_ == kI32 ? WasmValue(i32_const_)
: WasmValue(int64_t{i32_const_});
}
......@@ -133,7 +134,7 @@ class LiftoffAssembler : public TurboAssembler {
}
void MakeConstant(int32_t i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
DCHECK(kind_ == kI32 || kind_ == kI64);
loc_ = kIntConst;
i32_const_ = i32_const;
}
......@@ -142,7 +143,7 @@ class LiftoffAssembler : public TurboAssembler {
// from different stack states.
void Copy(VarState src) {
loc_ = src.loc();
type_ = src.type();
kind_ = src.kind();
if (loc_ == kRegister) {
reg_ = src.reg();
} else if (loc_ == kIntConst) {
......@@ -154,7 +155,7 @@ class LiftoffAssembler : public TurboAssembler {
Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each
// stack value. Try to collapse.
ValueType type_;
ValueKind kind_;
union {
LiftoffRegister reg_; // used if loc_ == kRegister
......@@ -388,13 +389,13 @@ class LiftoffAssembler : public TurboAssembler {
// Use this to pop a value into a register that has no other uses, so it
// can be modified.
LiftoffRegister PopToModifiableRegister(LiftoffRegList pinned = {}) {
ValueType type = cache_state_.stack_state.back().type();
ValueKind kind = cache_state_.stack_state.back().kind();
LiftoffRegister reg = PopToRegister(pinned);
if (cache_state()->is_free(reg)) return reg;
pinned.set(reg);
LiftoffRegister new_reg = GetUnusedRegister(reg.reg_class(), pinned);
Move(new_reg, reg, type);
Move(new_reg, reg, kind);
return new_reg;
}
......@@ -413,10 +414,10 @@ class LiftoffAssembler : public TurboAssembler {
// stack, so that we can merge different values on the back-edge.
void PrepareLoopArgs(int num);
int NextSpillOffset(ValueType type) {
int offset = TopSpillOffset() + SlotSizeForType(type);
if (NeedsAlignment(type)) {
offset = RoundUp(offset, SlotSizeForType(type));
int NextSpillOffset(ValueKind kind) {
int offset = TopSpillOffset() + SlotSizeForType(kind);
if (NeedsAlignment(kind)) {
offset = RoundUp(offset, SlotSizeForType(kind));
}
return offset;
}
......@@ -427,25 +428,25 @@ class LiftoffAssembler : public TurboAssembler {
: cache_state_.stack_state.back().offset();
}
void PushRegister(ValueType type, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class());
void PushRegister(ValueKind kind, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(kind), reg.reg_class());
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type));
cache_state_.stack_state.emplace_back(kind, reg, NextSpillOffset(kind));
}
void PushConstant(ValueType type, int32_t i32_const) {
DCHECK(type == kWasmI32 || type == kWasmI64);
cache_state_.stack_state.emplace_back(type, i32_const,
NextSpillOffset(type));
void PushConstant(ValueKind kind, int32_t i32_const) {
DCHECK(kind == kI32 || kind == kI64);
cache_state_.stack_state.emplace_back(kind, i32_const,
NextSpillOffset(kind));
}
void PushStack(ValueType type) {
cache_state_.stack_state.emplace_back(type, NextSpillOffset(type));
void PushStack(ValueKind kind) {
cache_state_.stack_state.emplace_back(kind, NextSpillOffset(kind));
}
void SpillRegister(LiftoffRegister);
uint32_t GetNumUses(LiftoffRegister reg) {
uint32_t GetNumUses(LiftoffRegister reg) const {
return cache_state_.get_use_count(reg);
}
......@@ -535,32 +536,32 @@ class LiftoffAssembler : public TurboAssembler {
}
// Load parameters into the right registers / stack slots for the call.
void PrepareBuiltinCall(const FunctionSig* sig,
void PrepareBuiltinCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params);
// Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(const FunctionSig*, compiler::CallDescriptor*,
void PrepareCall(const ValueKindSig*, compiler::CallDescriptor*,
Register* target = nullptr,
Register* target_instance = nullptr);
// Process return values of the call.
void FinishCall(const FunctionSig*, compiler::CallDescriptor*);
void FinishCall(const ValueKindSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind);
// Parallel register move: For a list of tuples <dst, src, type>, move the
// {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
// Parallel register move: For a list of tuples <dst, src, kind>, move the
// {src} register of kind {kind} into {dst}. If {src} equals {dst}, ignore
// that tuple.
struct ParallelRegisterMoveTuple {
LiftoffRegister dst;
LiftoffRegister src;
ValueType type;
ValueKind kind;
template <typename Dst, typename Src>
ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
: dst(dst), src(src), type(type) {}
ParallelRegisterMoveTuple(Dst dst, Src src, ValueKind kind)
: dst(dst), src(src), kind(kind) {}
};
void ParallelRegisterMove(Vector<const ParallelRegisterMoveTuple>);
......@@ -594,8 +595,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
inline static int SlotSizeForType(ValueType type);
inline static bool NeedsAlignment(ValueType type);
inline static int SlotSizeForType(ValueKind kind);
inline static bool NeedsAlignment(ValueKind kind);
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
......@@ -685,19 +686,19 @@ class LiftoffAssembler : public TurboAssembler {
inline void AtomicFence();
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
ValueKind);
inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueType);
ValueKind);
inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType);
ValueKind);
inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Move(Register dst, Register src, ValueKind);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueKind);
inline void Spill(int offset, LiftoffRegister, ValueType);
inline void Spill(int offset, LiftoffRegister, ValueKind);
inline void Spill(int offset, WasmValue);
inline void Fill(LiftoffRegister, int offset, ValueType);
inline void Fill(LiftoffRegister, int offset, ValueKind);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, int offset, RegPairHalf);
......@@ -840,7 +841,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
LiftoffRegister(src));
} else if (dst != src) {
Move(dst, src, kWasmI32);
Move(dst, src, kI32);
}
}
......@@ -906,7 +907,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_jump(Label*);
inline void emit_jump(Register);
inline void emit_cond_jump(LiftoffCondition, Label*, ValueType value,
inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg);
inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
Register lhs, int imm);
......@@ -926,7 +927,7 @@ class LiftoffAssembler : public TurboAssembler {
// should be emitted instead.
inline bool emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value, ValueType type);
LiftoffRegister false_value, ValueKind kind);
enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi };
inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode);
......@@ -1367,18 +1368,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Execute a C call. Arguments are pushed to the stack and a pointer to this
// region is passed to the C function. If {out_argument_type != kWasmStmt},
// region is passed to the C function. If {out_argument_kind != kStmt},
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
inline void CallC(const FunctionSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueType out_argument_type,
inline void CallC(const ValueKindSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueKind out_argument_kind,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
inline void TailCallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
inline void CallIndirect(const FunctionSig* sig,
inline void CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void TailCallIndirect(Register target);
......@@ -1399,17 +1400,17 @@ class LiftoffAssembler : public TurboAssembler {
int GetTotalFrameSize() const { return max_used_spill_offset_; }
ValueType local_type(uint32_t index) {
ValueKind local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
ValueType* locals =
ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
return locals[index];
}
void set_local_type(uint32_t index, ValueType type) {
ValueType* locals =
void set_local_type(uint32_t index, ValueKind kind) {
ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
locals[index] = type;
locals[index] = kind;
}
CacheState* cache_state() { return &cache_state_; }
......@@ -1431,13 +1432,13 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
static constexpr uint32_t kInlineLocalTypes = 16;
union {
ValueType local_types_[kInlineLocalTypes];
ValueType* more_local_types_;
ValueKind local_types_[kInlineLocalTypes];
ValueKind* more_local_types_;
};
static_assert(sizeof(ValueType) == 4,
"Reconsider this inlining if ValueType gets bigger");
static_assert(sizeof(ValueKind) == 1,
"Reconsider this inlining if ValueKind gets bigger");
CacheState cache_state_;
// The maximum spill offset for slots in the value stack.
int max_used_spill_offset_ = StaticStackFrameSize();
......@@ -1483,7 +1484,7 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
assm->Move(dst.low_gp(), tmp, kWasmI32);
assm->Move(dst.low_gp(), tmp, kI32);
}
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
......@@ -1511,7 +1512,7 @@ void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
assm->Move(dst.low_gp(), tmp, kWasmI32);
assm->Move(dst.low_gp(), tmp, kI32);
}
} // namespace liftoff
......
......@@ -86,13 +86,12 @@ struct assert_field_size {
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
constexpr ValueType kPointerValueType =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
constexpr ValueKind kPointerValueType = kSystemPointerSize == 8 ? kI64 : kI32;
#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
constexpr ValueType kSmiValueType = kWasmI32;
constexpr ValueKind kSmiValueType = kI32;
#else
constexpr ValueType kSmiValueType = kWasmI64;
constexpr ValueKind kSmiValueType = kI64;
#endif
#if V8_TARGET_ARCH_ARM64
......@@ -271,23 +270,23 @@ class DebugSideTableBuilder {
for (const auto& slot : stack_state) {
Value new_value;
new_value.index = index;
new_value.type = slot.type();
new_value.kind = slot.kind();
switch (slot.loc()) {
case kIntConst:
new_value.kind = Entry::kConstant;
new_value.storage = Entry::kConstant;
new_value.i32_const = slot.i32_const();
break;
case kRegister:
DCHECK_NE(kDidSpill, assume_spilling);
if (assume_spilling == kAllowRegisters) {
new_value.kind = Entry::kRegister;
new_value.storage = Entry::kRegister;
new_value.reg_code = slot.reg().liftoff_code();
break;
}
DCHECK_EQ(kAssumeSpilling, assume_spilling);
V8_FALLTHROUGH;
case kStack:
new_value.kind = Entry::kStack;
new_value.storage = Entry::kStack;
new_value.stack_offset = slot.offset();
break;
}
......@@ -405,6 +404,7 @@ class LiftoffCompiler {
};
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
......@@ -412,7 +412,7 @@ class LiftoffCompiler {
struct Entry {
int offset;
LiftoffRegister reg;
ValueType type;
ValueKind kind;
};
ZoneVector<Entry> entries;
......@@ -541,10 +541,10 @@ class LiftoffCompiler {
return true;
}
bool CheckSupportedType(FullDecoder* decoder, ValueType type,
bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
const char* context) {
LiftoffBailoutReason bailout_reason = kOtherReason;
switch (type.kind()) {
switch (kind) {
case kI32:
case kI64:
case kF32:
......@@ -568,7 +568,7 @@ class LiftoffCompiler {
UNREACHABLE();
}
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", type.name().c_str(), context);
SNPrintF(buffer, "%s %s", name(kind), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
......@@ -599,27 +599,27 @@ class LiftoffCompiler {
int num_locals = decoder->num_locals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
ValueType type = decoder->local_type(i);
__ set_local_type(i, type);
ValueKind kind = decoder->local_type(i).kind();
__ set_local_type(i, kind);
}
}
// Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
const bool needs_pair = needs_gp_reg_pair(type);
const ValueType reg_type = needs_pair ? kWasmI32 : type;
const RegClass rc = reg_class_for(reg_type);
uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
const bool needs_pair = needs_gp_reg_pair(kind);
const ValueKind reg_kind = needs_pair ? kI32 : kind;
const RegClass rc = reg_class_for(reg_kind);
auto LoadToReg = [this, reg_type, rc](compiler::LinkageLocation location,
auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location,
LiftoffRegList pinned) {
if (location.IsRegister()) {
DCHECK(!location.IsAnyRegister());
return LiftoffRegister::from_external_code(rc, reg_type,
return LiftoffRegister::from_external_code(rc, reg_kind,
location.AsRegister());
}
DCHECK(location.IsCallerFrameSlot());
LiftoffRegister reg = __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_type);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
};
......@@ -631,7 +631,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(reg));
reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
}
__ PushRegister(type, reg);
__ PushRegister(kind, reg);
return needs_pair ? 2 : 1;
}
......@@ -680,8 +680,8 @@ class LiftoffCompiler {
// because other types cannot be initialized to constants.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueType type = decoder->local_type(param_idx);
if (type != kWasmI32 && type != kWasmI64) return true;
ValueKind kind = __ local_type(param_idx);
if (kind != kI32 && kind != kI64) return true;
}
return false;
}
......@@ -749,16 +749,16 @@ class LiftoffCompiler {
if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueType type = decoder->local_type(param_idx);
__ PushStack(type);
ValueKind kind = __ local_type(param_idx);
__ PushStack(kind);
}
int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size);
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueType type = decoder->local_type(param_idx);
__ PushConstant(type, int32_t{0});
ValueKind kind = __ local_type(param_idx);
__ PushConstant(kind, int32_t{0});
}
}
......@@ -767,14 +767,14 @@ class LiftoffCompiler {
Register null_ref_reg = no_reg;
for (uint32_t local_index = num_params; local_index < __ num_locals();
++local_index) {
ValueType type = decoder->local_type(local_index);
if (type.is_reference_type()) {
ValueKind kind = __ local_type(local_index);
if (is_reference_type(kind)) {
if (null_ref_reg == no_reg) {
null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
LoadNullValue(null_ref_reg, {});
}
__ Spill(__ cache_state()->stack_state[local_index].offset(),
LiftoffRegister(null_ref_reg), type);
LiftoffRegister(null_ref_reg), kind);
}
}
}
......@@ -821,8 +821,7 @@ class LiftoffCompiler {
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
// Unary "unequal" means "different from zero".
__ emit_cond_jump(kUnequal, &no_tierup, kWasmI32,
old_number_of_calls.gp());
__ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
TierUpFunction(decoder);
// After the runtime call, the instance cache register is clobbered (we
// reset it already in {SpillAllRegisters} above, but then we still access
......@@ -868,7 +867,7 @@ class LiftoffCompiler {
__ PushRegisters(ool->regs_to_save);
} else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
for (auto& entry : ool->spilled_registers->entries) {
__ Spill(entry.offset, entry.reg, entry.type);
__ Spill(entry.offset, entry.reg, entry.kind);
}
}
......@@ -906,7 +905,7 @@ class LiftoffCompiler {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
DCHECK(for_debugging_);
for (auto& entry : ool->spilled_registers->entries) {
__ Fill(entry.reg, entry.offset, entry.type);
__ Fill(entry.reg, entry.offset, entry.kind);
}
}
__ emit_jump(ool->continuation.get());
......@@ -978,12 +977,12 @@ class LiftoffCompiler {
{});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
// Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, &do_break, kWasmI32, flag);
__ emit_cond_jump(kUnequal, &do_break, kI32, flag);
// Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
// Unary "equal" means "equals zero".
__ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
__ emit_cond_jump(kEqual, &no_break, kI32, flag);
__ bind(&do_break);
EmitBreakpoint(decoder);
......@@ -1083,8 +1082,7 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
value);
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
......@@ -1153,8 +1151,8 @@ class LiftoffCompiler {
void EndControl(FullDecoder* decoder, Control* c) {}
void GenerateCCall(const LiftoffRegister* result_regs, const FunctionSig* sig,
ValueType out_argument_type,
void GenerateCCall(const LiftoffRegister* result_regs,
const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs,
ExternalReference ext_ref) {
// Before making a call, spill all cache registers.
......@@ -1162,14 +1160,13 @@ class LiftoffCompiler {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
for (ValueType param_type : sig->parameters()) {
param_bytes += param_type.element_size_bytes();
for (ValueKind param_kind : sig->parameters()) {
param_bytes += element_size_bytes(param_kind);
}
int out_arg_bytes = out_argument_type == kWasmStmt
? 0
: out_argument_type.element_size_bytes();
int out_arg_bytes =
out_argument_kind == kStmt ? 0 : element_size_bytes(out_argument_kind);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
__ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref);
}
......@@ -1219,38 +1216,38 @@ class LiftoffCompiler {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
}
template <ValueKind src_type, ValueKind result_type, class EmitFn>
template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_type);
constexpr RegClass result_rc = reg_class_for(result_type);
constexpr RegClass src_rc = reg_class_for(src_kind);
constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
__ PushRegister(ValueType::Primitive(result_type), dst);
__ PushRegister(result_kind, dst);
}
template <ValueKind type>
template <ValueKind kind>
void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
ValueType sig_reps[] = {ValueType::Primitive(type)};
FunctionSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref);
ValueKind sig_reps[] = {kind};
ValueKindSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, kind, &src, ext_ref);
};
EmitUnOp<type, type>(emit_with_c_fallback);
EmitUnOp<kind, kind>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
template <ValueKind dst_type, ValueKind src_type,
template <ValueKind dst_type, ValueKind src_kind,
TypeConversionTrapping can_trap>
void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
WasmCodePosition trap_position) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc
......@@ -1266,22 +1263,20 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)};
FunctionSig sig(1, 1, sig_reps);
ValueKind sig_reps[] = {kI32, src_kind};
ValueKindSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src,
ext_ref);
__ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
__ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
} else {
ValueType sig_reps[] = {ValueType::Primitive(src_type)};
FunctionSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src,
ext_ref);
ValueKind sig_reps[] = {src_kind};
ValueKindSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
}
}
__ PushRegister(ValueType::Primitive(dst_type), dst);
__ PushRegister(dst_type, dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
......@@ -1292,16 +1287,16 @@ class LiftoffCompiler {
#define CASE_I64_UNOP(opcode, fn) \
case kExpr##opcode: \
return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP(opcode, type, fn) \
#define CASE_FLOAT_UNOP(opcode, kind, fn) \
case kExpr##opcode: \
return EmitUnOp<k##type, k##type>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \
case kExpr##opcode: \
return EmitFloatUnOpWithCFallback<k##type>(&LiftoffAssembler::emit_##fn, \
return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn);
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_kind, ext_ref, can_trap) \
case kExpr##opcode: \
return EmitTypeConversion<k##dst_type, k##src_type, can_trap>( \
return EmitTypeConversion<k##dst_type, k##src_kind, can_trap>( \
kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
......@@ -1390,9 +1385,9 @@ class LiftoffCompiler {
return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
FunctionSig sig_i_i(1, 1, sig_i_i_reps);
GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src,
ValueKind sig_i_i_reps[] = {kI32, kI32};
ValueKindSig sig_i_i(1, 1, sig_i_i_reps);
GenerateCCall(&dst, &sig_i_i, kStmt, &src,
ExternalReference::wasm_word32_popcnt());
});
case kExprI64Popcnt:
......@@ -1400,10 +1395,10 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
ValueType sig_i_l_reps[] = {kWasmI32, kWasmI64};
FunctionSig sig_i_l(1, 1, sig_i_l_reps);
ValueKind sig_i_l_reps[] = {kI32, kI64};
ValueKindSig sig_i_l(1, 1, sig_i_l_reps);
LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
GenerateCCall(&c_call_dst, &sig_i_l, kWasmStmt, &src,
GenerateCCall(&c_call_dst, &sig_i_l, kStmt, &src,
ExternalReference::wasm_word64_popcnt());
// Now zero-extend the result to i64.
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
......@@ -1422,7 +1417,7 @@ class LiftoffCompiler {
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
__ PushRegister(kWasmI32, dst);
__ PushRegister(kI32, dst);
return;
}
default:
......@@ -1435,11 +1430,11 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
template <ValueKind src_type, ValueKind result_type, typename EmitFn,
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn,
typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
......@@ -1456,18 +1451,18 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType::Primitive(result_type), dst);
__ PushRegister(result_kind, dst);
} else {
// The RHS was not an immediate.
EmitBinOp<src_type, result_type>(fn);
EmitBinOp<src_kind, result_kind>(fn);
}
}
template <ValueKind src_type, ValueKind result_type,
template <ValueKind src_kind, ValueKind result_kind,
bool swap_lhs_rhs = false, typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
......@@ -1477,7 +1472,7 @@ class LiftoffCompiler {
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
__ PushRegister(ValueType::Primitive(result_type), dst);
__ PushRegister(result_kind, dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -1491,16 +1486,15 @@ class LiftoffCompiler {
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
ValueKind sig_kinds[] = {kI32, kI64, kI64};
// <i64, i64> -> i32 (with i64 output argument)
FunctionSig sig(1, 2, sig_types);
GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
ValueKindSig sig(1, 2, sig_kinds);
GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0}));
__ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
__ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
if (trap_unrepresentable) {
__ LoadConstant(tmp, WasmValue(int32_t{-1}));
__ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
tmp.gp());
__ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp());
}
}
......@@ -1527,17 +1521,17 @@ class LiftoffCompiler {
amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
}, \
&LiftoffAssembler::emit_##fn##i);
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
#define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \
case kExpr##opcode: \
return EmitBinOp<k##type, k##type>( \
return EmitBinOp<k##kind, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
ValueType sig_reps[] = {kWasm##type, kWasm##type, kWasm##type}; \
const bool out_via_stack = kWasm##type == kWasmI64; \
FunctionSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
ValueType out_arg_type = out_via_stack ? kWasmI64 : kWasmStmt; \
GenerateCCall(&dst, &sig, out_arg_type, args, ext_ref); \
ValueKind sig_reps[] = {k##kind, k##kind, k##kind}; \
const bool out_via_stack = k##kind == kI64; \
ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
ValueKind out_arg_kind = out_via_stack ? kI64 : kStmt; \
GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
});
switch (opcode) {
case kExprI32Add:
......@@ -1806,7 +1800,7 @@ class LiftoffCompiler {
}
void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
__ PushConstant(kWasmI32, value);
__ PushConstant(kI32, value);
}
void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
......@@ -1816,24 +1810,24 @@ class LiftoffCompiler {
// a register immediately.
int32_t value_i32 = static_cast<int32_t>(value);
if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32);
__ PushConstant(kI64, value_i32);
} else {
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
__ PushRegister(kI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
__ PushRegister(kF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
__ PushRegister(kF64, reg);
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
......@@ -1843,34 +1837,29 @@ class LiftoffCompiler {
}
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {});
__ PushRegister(type, null);
__ PushRegister(type.kind(), null);
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_);
HeapType heap_type(
decoder->enabled_.has_typed_funcref()
? decoder->module_->functions[function_index].sig_index
: HeapType::kFunc);
ValueType func_type = ValueType::Ref(heap_type, kNonNullable);
ValueType sig_reps[] = {func_type, kWasmI32};
FunctionSig sig(1, 1, sig_reps);
ValueKind sig_reps[] = {kRef, kI32};
ValueKindSig sig(1, 1, sig_reps);
LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(func_index_reg, WasmValue(function_index));
LiftoffAssembler::VarState func_index_var(kWasmI32, func_index_reg, 0);
LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {func_index_var});
__ CallRuntimeStub(target);
DefineSafepoint();
__ PushRegister(func_type, LiftoffRegister(kReturnRegister0));
__ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
__ PushRegister(ValueType::Ref(arg.type.heap_type(), kNonNullable), obj);
__ PushRegister(kRef, obj);
}
void Drop(FullDecoder* decoder) { __ DropValues(1); }
......@@ -1888,11 +1877,11 @@ class LiftoffCompiler {
// are not handled yet.
size_t num_returns = decoder->sig_->return_count();
if (num_returns == 1) {
ValueType return_type = decoder->sig_->GetReturn(0);
ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
LiftoffRegister return_reg =
__ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
__ Store(info.gp(), no_reg, 0, return_reg,
StoreType::ForValueType(return_type), pinned);
StoreType::ForValueKind(return_kind), pinned);
}
// Put the parameter in its place.
WasmTraceExitDescriptor descriptor;
......@@ -1900,7 +1889,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
__ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
__ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
}
source_position_table_builder_.AddPosition(
......@@ -1925,7 +1914,7 @@ class LiftoffCompiler {
const LocalIndexImmediate<validate>& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back(
local_slot.type(), __ NextSpillOffset(local_slot.type()));
local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
auto* slot = &__ cache_state()->stack_state.back();
if (local_slot.is_reg()) {
__ cache_state()->inc_used(local_slot.reg());
......@@ -1934,11 +1923,11 @@ class LiftoffCompiler {
slot->MakeConstant(local_slot.i32_const());
} else {
DCHECK(local_slot.is_stack());
auto rc = reg_class_for(local_slot.type());
auto rc = reg_class_for(local_slot.kind());
LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ cache_state()->inc_used(reg);
slot->MakeRegister(reg);
__ Fill(reg, local_slot.offset(), local_slot.type());
__ Fill(reg, local_slot.offset(), local_slot.kind());
}
}
......@@ -1946,21 +1935,21 @@ class LiftoffCompiler {
uint32_t local_index) {
auto& state = *__ cache_state();
auto& src_slot = state.stack_state.back();
ValueType type = dst_slot->type();
ValueKind kind = dst_slot->kind();
if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(), src_slot.offset(), type);
__ Fill(dst_slot->reg(), src_slot.offset(), kind);
return;
}
state.dec_used(slot_reg);
dst_slot->MakeStack();
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
DCHECK_EQ(kind, __ local_type(local_index));
RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ Fill(dst_reg, src_slot.offset(), kind);
*dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
}
......@@ -2055,11 +2044,12 @@ class LiftoffCompiler {
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, global->type, "global")) {
ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
if (global->type.is_reference_type()) {
if (is_reference_type(kind)) {
if (global->mutability && global->imported) {
LiftoffRegList pinned;
Register base = no_reg;
......@@ -2067,7 +2057,7 @@ class LiftoffCompiler {
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
&base, &offset);
__ LoadTaggedPointer(base, base, offset, 0, pinned);
__ PushRegister(global->type, LiftoffRegister(base));
__ PushRegister(kind, LiftoffRegister(base));
return;
}
......@@ -2081,27 +2071,28 @@ class LiftoffCompiler {
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset),
pinned);
__ PushRegister(global->type, LiftoffRegister(value));
__ PushRegister(kind, LiftoffRegister(value));
return;
}
LiftoffRegList pinned;
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type);
pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
LoadType type = LoadType::ForValueKind(kind);
__ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
__ PushRegister(global->type, value);
__ PushRegister(kind, value);
}
void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, global->type, "global")) {
ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
if (global->type.is_reference_type()) {
if (is_reference_type(kind)) {
if (global->mutability && global->imported) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
......@@ -2129,7 +2120,7 @@ class LiftoffCompiler {
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type);
StoreType type = StoreType::ForValueKind(kind);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
......@@ -2149,9 +2140,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
ValueType result_type = env_->module->tables[imm.index].type;
ValueType sig_reps[] = {result_type, kWasmI32, kWasmI32};
FunctionSig sig(1, 2, sig_reps);
ValueKind result_kind = env_->module->tables[imm.index].type.kind();
ValueKind sig_reps[] = {result_kind, kI32, kI32};
ValueKindSig sig(1, 2, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
__ CallRuntimeStub(target);
......@@ -2162,7 +2153,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ PushRegister(result_type, LiftoffRegister(kReturnRegister0));
__ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
}
void TableSet(FullDecoder* decoder, const Value&, const Value&,
......@@ -2182,9 +2173,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmI32,
env_->module->tables[imm.index].type};
FunctionSig sig(0, 3, sig_reps);
ValueKind table_kind = env_->module->tables[imm.index].type.kind();
ValueKind sig_reps[] = {kI32, kI32, table_kind};
ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
__ CallRuntimeStub(target);
......@@ -2211,25 +2202,25 @@ class LiftoffCompiler {
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
ValueType type = __ cache_state()->stack_state.end()[-1].type();
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
DCHECK_EQ(kind, __ cache_state()->stack_state.end()[-2].kind());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {});
if (!__ emit_select(dst, condition, true_value, false_value, type)) {
if (!__ emit_select(dst, condition, true_value, false_value, kind)) {
// Emit generic code (using branches) instead.
Label cont;
Label case_false;
__ emit_cond_jump(kEqual, &case_false, kWasmI32, condition);
if (dst != true_value) __ Move(dst, true_value, type);
__ emit_cond_jump(kEqual, &case_false, kI32, condition);
if (dst != true_value) __ Move(dst, true_value, kind);
__ emit_jump(&cont);
__ bind(&case_false);
if (dst != false_value) __ Move(dst, false_value, type);
if (dst != false_value) __ Move(dst, false_value, kind);
__ bind(&cont);
}
__ PushRegister(type, dst);
__ PushRegister(kind, dst);
}
void BrImpl(Control* target) {
......@@ -2264,17 +2255,17 @@ class LiftoffCompiler {
if (!has_outstanding_op()) {
// Unary "equal" means "equals zero".
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
__ emit_cond_jump(kEqual, &cont_false, kI32, value);
} else if (outstanding_op_ == kExprI32Eqz) {
// Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, &cont_false, kWasmI32, value);
__ emit_cond_jump(kUnequal, &cont_false, kI32, value);
outstanding_op_ = kNoOutstandingOp;
} else {
// Otherwise, it's an i32 compare opcode.
LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
Register rhs = value;
Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
__ emit_cond_jump(cond, &cont_false, kWasmI32, lhs, rhs);
__ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
outstanding_op_ = kNoOutstandingOp;
}
......@@ -2312,7 +2303,7 @@ class LiftoffCompiler {
uint32_t split = min + (max - min) / 2;
Label upper_half;
__ LoadConstant(tmp, WasmValue(split));
__ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
__ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(),
tmp.gp());
// Emit br table for lower half:
GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
......@@ -2336,8 +2327,8 @@ class LiftoffCompiler {
LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
__ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
Label case_default;
__ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
value.gp(), tmp.gp());
__ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(),
tmp.gp());
GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
&br_targets);
......@@ -2375,7 +2366,7 @@ class LiftoffCompiler {
auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue;
spilled->entries.push_back(SpilledRegistersForInspection::Entry{
slot.offset(), slot.reg(), slot.type()});
slot.offset(), slot.reg(), slot.kind()});
__ RecordUsedSpillOffset(slot.offset());
}
return spilled;
......@@ -2455,7 +2446,7 @@ class LiftoffCompiler {
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
// Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, index.high_gp());
__ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
......@@ -2503,12 +2494,12 @@ class LiftoffCompiler {
// {emit_cond_jump} to use the "test" instruction without the "and" here.
// Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
__ emit_cond_jump(kUnequal, trap_label, kI32, address);
} else {
// For alignment checks we only look at the lower 32-bits in {offset}.
__ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
__ emit_i32_andi(address, address, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
__ emit_cond_jump(kUnequal, trap_label, kI32, address);
}
}
......@@ -2558,7 +2549,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
__ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
__ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
}
source_position_table_builder_.AddPosition(__ pc_offset(),
......@@ -2601,8 +2592,8 @@ class LiftoffCompiler {
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
if (!CheckSupportedType(decoder, value_type, "load")) return;
ValueKind kind = type.value_type().kind();
if (!CheckSupportedType(decoder, kind, "load")) return;
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDontForceCheck);
......@@ -2614,7 +2605,7 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
RegClass rc = reg_class_for(value_type);
RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
__ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
......@@ -2623,7 +2614,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(value_type, value);
__ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
......@@ -2637,7 +2628,7 @@ class LiftoffCompiler {
const Value& index_val, Value* result) {
// LoadTransform requires SIMD support, so check for it here. If
// unsupported, bailout and let TurboFan lower the code.
if (!CheckSupportedType(decoder, kWasmS128, "LoadTransform")) {
if (!CheckSupportedType(decoder, kS128, "LoadTransform")) {
return;
}
......@@ -2667,7 +2658,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(ValueType::Primitive(kS128), value);
__ PushRegister(kS128, value);
if (FLAG_trace_wasm_memory) {
// Again load extend is different.
......@@ -2682,7 +2673,7 @@ class LiftoffCompiler {
void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
const Value& _index, const MemoryAccessImmediate<validate>& imm,
const uint8_t laneidx, Value* _result) {
if (!CheckSupportedType(decoder, kWasmS128, "LoadLane")) {
if (!CheckSupportedType(decoder, kS128, "LoadLane")) {
return;
}
......@@ -2710,7 +2701,7 @@ class LiftoffCompiler {
protected_load_pc);
}
__ PushRegister(ValueType::Primitive(kS128), result);
__ PushRegister(kS128, result);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
......@@ -2721,8 +2712,8 @@ class LiftoffCompiler {
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
if (!CheckSupportedType(decoder, value_type, "store")) return;
ValueKind kind = type.value_type().kind();
if (!CheckSupportedType(decoder, kind, "store")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned);
......@@ -2755,7 +2746,7 @@ class LiftoffCompiler {
void StoreLane(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& _index, const Value& _value, const uint8_t lane) {
if (!CheckSupportedType(decoder, kWasmS128, "StoreLane")) return;
if (!CheckSupportedType(decoder, kS128, "StoreLane")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned);
......@@ -2794,7 +2785,7 @@ class LiftoffCompiler {
__ LoadConstant(high_word, WasmValue{uint32_t{0}});
result = LiftoffRegister::ForPair(mem_size, high_word.gp());
}
__ PushRegister(env_->module->is_memory64 ? kWasmI64 : kWasmI32, result);
__ PushRegister(env_->module->is_memory64 ? kI64 : kI32, result);
}
void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
......@@ -2812,20 +2803,20 @@ class LiftoffCompiler {
WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
DCHECK_EQ(kWasmI32.machine_type(), descriptor.GetParameterType(0));
DCHECK_EQ(machine_type(kI32), descriptor.GetParameterType(0));
Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
if (kReturnRegister0 != result.gp()) {
__ Move(result.gp(), kReturnRegister0, kWasmI32);
__ Move(result.gp(), kReturnRegister0, kI32);
}
__ PushRegister(kWasmI32, result);
__ PushRegister(kI32, result);
}
void RegisterDebugSideTableEntry(
......@@ -2894,18 +2885,18 @@ class LiftoffCompiler {
LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
LoadNullValue(null, pinned);
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type, ref.gp(), null);
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
null);
BrOrRet(decoder, depth);
__ bind(&cont_false);
__ PushRegister(ValueType::Ref(ref_object.type.heap_type(), kNonNullable),
ref);
__ PushRegister(kRef, ref);
}
template <ValueKind src_type, ValueKind result_type, typename EmitFn>
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitTerOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src3 = __ PopToRegister();
LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
LiftoffRegister src1 =
......@@ -2918,7 +2909,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
__ PushRegister(ValueType::Primitive(result_type), dst);
__ PushRegister(result_kind, dst);
}
template <typename EmitFn, typename EmitFnImm>
......@@ -2935,30 +2926,30 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst);
__ PushRegister(kS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst);
__ PushRegister(kS128, dst);
}
}
void EmitSimdFloatRoundingOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
ExternalReference (*ext_ref)()) {
static constexpr RegClass rc = reg_class_for(kWasmS128);
static constexpr RegClass rc = reg_class_for(kS128);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {});
if (!(asm_.*emit_fn)(dst, src)) {
// Return v128 via stack for ARM.
ValueType sig_v_s_reps[] = {kWasmS128};
FunctionSig sig_v_s(0, 1, sig_v_s_reps);
GenerateCCall(&dst, &sig_v_s, kWasmS128, &src, ext_ref());
ValueKind sig_v_s_reps[] = {kS128};
ValueKindSig sig_v_s(0, 1, sig_v_s_reps);
GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
__ PushRegister(kWasmS128, dst);
__ PushRegister(kS128, dst);
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
......@@ -3438,17 +3429,17 @@ class LiftoffCompiler {
}
}
template <ValueKind src_type, ValueKind result_type, typename EmitFn>
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitSimdExtractLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
__ PushRegister(ValueType::Primitive(result_type), dst);
__ PushRegister(result_kind, dst);
}
template <ValueKind src2_type, typename EmitFn>
......@@ -3474,7 +3465,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src2))
: __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst);
__ PushRegister(kS128, dst);
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
......@@ -3484,9 +3475,9 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
#define CASE_SIMD_EXTRACT_LANE_OP(opcode, type, fn) \
#define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \
EmitSimdExtractLaneOp<kS128, k##type>( \
EmitSimdExtractLaneOp<kS128, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
__ emit_##fn(dst, lhs, imm_lane_idx); \
}, \
......@@ -3501,9 +3492,9 @@ class LiftoffCompiler {
CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
#undef CASE_SIMD_EXTRACT_LANE_OP
#define CASE_SIMD_REPLACE_LANE_OP(opcode, type, fn) \
#define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \
EmitSimdReplaceLaneOp<k##type>( \
EmitSimdReplaceLaneOp<k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
uint8_t imm_lane_idx) { \
__ emit_##fn(dst, src1, src2, imm_lane_idx); \
......@@ -3541,7 +3532,7 @@ class LiftoffCompiler {
} else {
__ LiftoffAssembler::emit_s128_const(dst, imm.value);
}
__ PushRegister(kWasmS128, dst);
__ PushRegister(kS128, dst);
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
......@@ -3566,7 +3557,7 @@ class LiftoffCompiler {
std::swap(lhs, rhs);
}
__ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle);
__ PushRegister(kWasmS128, dst);
__ PushRegister(kS128, dst);
}
void ToSmi(Register reg) {
......@@ -3624,9 +3615,9 @@ class LiftoffCompiler {
GetBuiltinCallDescriptor<WasmAllocateFixedArrayDescriptor>(
compilation_zone_);
ValueType create_values_sig_reps[] = {kPointerValueType,
LiftoffAssembler::kWasmIntPtr};
FunctionSig create_values_sig(1, 1, create_values_sig_reps);
ValueKind create_values_sig_reps[] = {kPointerValueType,
LiftoffAssembler::kIntPtr};
ValueKindSig create_values_sig(1, 1, create_values_sig_reps);
__ PrepareBuiltinCall(
&create_values_sig, create_values_descriptor,
......@@ -3673,8 +3664,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* throw_descriptor =
GetBuiltinCallDescriptor<WasmThrowDescriptor>(compilation_zone_);
ValueType throw_sig_reps[] = {kPointerValueType, kPointerValueType};
FunctionSig throw_sig(0, 2, throw_sig_reps);
ValueKind throw_sig_reps[] = {kPointerValueType, kPointerValueType};
ValueKindSig throw_sig(0, 2, throw_sig_reps);
__ PrepareBuiltinCall(
&throw_sig, throw_descriptor,
......@@ -3717,7 +3708,7 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm) {
ValueType value_type = type.value_type();
ValueKind kind = type.value_type().kind();
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
......@@ -3730,10 +3721,10 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
RegClass rc = reg_class_for(value_type);
RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ AtomicLoad(value, addr, index, offset, type, pinned);
__ PushRegister(value_type, value);
__ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
......@@ -3747,7 +3738,7 @@ class LiftoffCompiler {
uintptr_t, LiftoffRegister,
LiftoffRegister,
StoreType)) {
ValueType result_type = type.value_type();
ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
#ifdef V8_TARGET_ARCH_IA32
......@@ -3758,7 +3749,7 @@ class LiftoffCompiler {
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
__ Move(result, value, result_type);
__ Move(result, value, result_kind);
pinned.clear(value);
value = result;
}
......@@ -3780,7 +3771,7 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
(asm_.*emit_fn)(addr, index, offset, value, result, type);
__ PushRegister(result_type, result);
__ PushRegister(result_kind, result);
}
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
......@@ -3816,10 +3807,10 @@ class LiftoffCompiler {
// assembler now.
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
type);
__ PushRegister(type.value_type(), result);
__ PushRegister(type.value_type().kind(), result);
return;
#else
ValueType result_type = type.value_type();
ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
......@@ -3835,11 +3826,11 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_type), pinned));
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
type);
__ PushRegister(result_type, result);
__ PushRegister(result_kind, result);
#endif
}
......@@ -3855,15 +3846,15 @@ class LiftoffCompiler {
StubCallMode::kCallWasmRuntimeStub); // stub call mode
}
void AtomicWait(FullDecoder* decoder, ValueType type,
void AtomicWait(FullDecoder* decoder, ValueKind kind,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index_reg =
BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
BoundsCheckMem(decoder, element_size_bytes(kind), imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
AlignmentCheckMem(decoder, element_size_bytes(kind), imm.offset, index_reg,
pinned);
uintptr_t offset = imm.offset;
......@@ -3890,7 +3881,7 @@ class LiftoffCompiler {
WasmCode::RuntimeStubId target;
compiler::CallDescriptor* call_descriptor;
if (type == kWasmI32) {
if (kind == kI32) {
if (kNeedI64RegPair) {
target = WasmCode::kWasmI32AtomicWait32;
call_descriptor =
......@@ -3916,8 +3907,8 @@ class LiftoffCompiler {
}
}
ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
FunctionSig sig(0, 3, sig_reps);
ValueKind sig_reps[] = {kPointerValueType, kind, kI64};
ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{index, expected_value, timeout});
......@@ -3928,19 +3919,17 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
void AtomicNotify(FullDecoder* decoder,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(1, {});
Register index_reg =
BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
index_reg, pinned);
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
......@@ -3954,8 +3943,8 @@ class LiftoffCompiler {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
FunctionSig sig(1, 2, sig_reps);
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32};
ValueKindSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
......@@ -3971,7 +3960,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
#define ATOMIC_STORE_LIST(V) \
......@@ -4081,10 +4070,10 @@ class LiftoffCompiler {
#undef ATOMIC_COMPARE_EXCHANGE_OP
case kExprI32AtomicWait:
AtomicWait(decoder, kWasmI32, imm);
AtomicWait(decoder, kI32, imm);
break;
case kExprI64AtomicWait:
AtomicWait(decoder, kWasmI64, imm);
AtomicWait(decoder, kI64, imm);
break;
case kExprAtomicNotify:
AtomicNotify(decoder, imm);
......@@ -4117,18 +4106,17 @@ class LiftoffCompiler {
__ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32,
kWasmI32, kWasmI32, kWasmI32};
FunctionSig sig(1, 5, sig_reps);
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
ValueKindSig sig(1, 5, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
......@@ -4142,8 +4130,7 @@ class LiftoffCompiler {
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Scale the seg_index for the array access.
__ LoadConstant(seg_index,
WasmValue(imm.index << kWasmI32.element_size_log2()));
__ LoadConstant(seg_index, WasmValue(imm.index << element_size_log2(kI32)));
// Set the length of the segment to '0' to drop it.
LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
......@@ -4162,17 +4149,16 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
kWasmI32};
FunctionSig sig(1, 4, sig_reps);
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
ValueKindSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void MemoryFill(FullDecoder* decoder,
......@@ -4185,23 +4171,22 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
kWasmI32};
FunctionSig sig(1, 4, sig_reps);
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
ValueKindSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void LoadSmi(LiftoffRegister reg, int value) {
Address smi_value = Smi::FromInt(value).ptr();
using smi_type =
std::conditional_t<kSmiValueType == kWasmI32, int32_t, int64_t>;
std::conditional_t<kSmiValueType == kI32, int32_t, int64_t>;
__ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
}
......@@ -4229,9 +4214,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32, kSmiValueType,
kSmiValueType};
FunctionSig sig(0, 5, sig_reps);
ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
ValueKindSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_index, segment_index});
......@@ -4287,9 +4271,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32, kSmiValueType,
kSmiValueType};
FunctionSig sig(0, 5, sig_reps);
ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
ValueKindSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_dst_index, table_src_index});
......@@ -4320,13 +4303,12 @@ class LiftoffCompiler {
void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const Value& rtt,
bool initial_values_on_stack) {
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
compilation_zone_);
ValueType sig_reps[] = {struct_value_type, rtt.type};
FunctionSig sig(1, 1, sig_reps);
ValueKind sig_reps[] = {kRef, rtt.type.kind()};
ValueKindSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1];
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
......@@ -4340,19 +4322,19 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
ValueType field_type = imm.struct_type->field(i);
ValueKind field_kind = imm.struct_type->field(i).kind();
LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
reg_class_for(field_type), pinned));
reg_class_for(field_kind), pinned));
if (!initial_values_on_stack) {
if (!CheckSupportedType(decoder, field_type, "default value")) return;
SetDefaultValue(value, field_type, pinned);
if (!CheckSupportedType(decoder, field_kind, "default value")) return;
SetDefaultValue(value, field_kind, pinned);
}
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value);
}
__ PushRegister(struct_value_type, obj);
__ PushRegister(kRef, obj);
}
void StructNewWithRtt(FullDecoder* decoder,
......@@ -4371,34 +4353,34 @@ class LiftoffCompiler {
const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) {
const StructType* struct_type = field.struct_index.struct_type;
ValueType field_type = struct_type->field(field.index);
if (!CheckSupportedType(decoder, field_type, "field load")) return;
ValueKind field_kind = struct_type->field(field.index).kind();
if (!CheckSupportedType(decoder, field_kind, "field load")) return;
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
LiftoffRegister value =
__ GetUnusedRegister(reg_class_for(field_type), pinned);
LoadObjectField(value, obj.gp(), no_reg, offset, field_type, is_signed,
__ GetUnusedRegister(reg_class_for(field_kind), pinned);
LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed,
pinned);
__ PushRegister(field_type.Unpacked(), value);
__ PushRegister(unpacked(field_kind), value);
}
void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field,
const Value& field_value) {
const StructType* struct_type = field.struct_index.struct_type;
ValueType field_type = struct_type->field(field.index);
ValueKind field_kind = struct_type->field(field.index).kind();
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
}
void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
ValueType rtt_type, bool initial_value_on_stack) {
ValueKind rtt_type, bool initial_value_on_stack) {
// Max length check.
{
LiftoffRegister length =
......@@ -4408,24 +4390,23 @@ class LiftoffCompiler {
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
static_cast<int>(wasm::kV8MaxWasmArrayLength));
}
ValueType array_value_type = ValueType::Ref(imm.index, kNonNullable);
ValueType elem_type = imm.array_type->element_type();
int elem_size = elem_type.element_size_bytes();
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size = element_size_bytes(elem_kind);
// Allocate the array.
{
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
compilation_zone_);
ValueType sig_reps[] = {array_value_type, rtt_type, kWasmI32, kWasmI32};
FunctionSig sig(1, 3, sig_reps);
ValueKind sig_reps[] = {kRef, rtt_type, kI32, kI32};
ValueKindSig sig(1, 3, sig_reps);
LiftoffAssembler::VarState rtt_var =
__ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState length_var =
__ cache_state()->stack_state.end()[-2];
LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(elem_size_reg, WasmValue(elem_size));
LiftoffAssembler::VarState elem_size_var(kWasmI32, elem_size_reg, 0);
LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor,
{rtt_var, length_var, elem_size_var});
__ CallRuntimeStub(target);
......@@ -4440,10 +4421,10 @@ class LiftoffCompiler {
LiftoffRegister value = initial_value_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
reg_class_for(elem_type), pinned));
reg_class_for(elem_kind), pinned));
if (!initial_value_on_stack) {
if (!CheckSupportedType(decoder, elem_type, "default value")) return;
SetDefaultValue(value, elem_type, pinned);
if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
SetDefaultValue(value, elem_kind, pinned);
}
// Initialize the array's elements.
......@@ -4452,34 +4433,34 @@ class LiftoffCompiler {
offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length;
if (elem_type.element_size_log2() != 0) {
if (element_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(),
elem_type.element_size_log2());
element_size_log2(elem_kind));
}
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done;
__ bind(&loop);
__ emit_cond_jump(kUnsignedGreaterEqual, &done, kWasmI32, offset.gp(),
__ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
end_offset.gp());
StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_type);
StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
__ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
__ emit_jump(&loop);
__ bind(&done);
__ PushRegister(array_value_type, obj);
__ PushRegister(kRef, obj);
}
void ArrayNewWithRtt(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length_value, const Value& initial_value,
const Value& rtt, Value* result) {
ArrayNew(decoder, imm, rtt.type, true);
ArrayNew(decoder, imm, rtt.type.kind(), true);
}
void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) {
ArrayNew(decoder, imm, rtt.type, false);
ArrayNew(decoder, imm, rtt.type.kind(), false);
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
......@@ -4490,17 +4471,17 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned);
ValueType elem_type = imm.array_type->element_type();
if (!CheckSupportedType(decoder, elem_type, "array load")) return;
int elem_size_shift = elem_type.element_size_log2();
ValueKind elem_kind = imm.array_type->element_type().kind();
if (!CheckSupportedType(decoder, elem_kind, "array load")) return;
int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned);
LoadObjectField(value, array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
elem_type, is_signed, pinned);
__ PushRegister(elem_type.Unpacked(), value);
elem_kind, is_signed, pinned);
__ PushRegister(unpacked(elem_kind), value);
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
......@@ -4512,14 +4493,14 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned);
ValueType elem_type = imm.array_type->element_type();
int elem_size_shift = elem_type.element_size_log2();
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
StoreObjectField(array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
value, pinned, elem_type);
value, pinned, elem_kind);
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
......@@ -4528,9 +4509,8 @@ class LiftoffCompiler {
MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned);
int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kWasmI32, false,
pinned);
__ PushRegister(kWasmI32, len);
LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kI32, false, pinned);
__ PushRegister(kI32, len);
}
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
......@@ -4546,7 +4526,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_shli(dst, src, kI31To32BitSmiShift);
}
__ PushRegister(kWasmI31Ref, dst);
__ PushRegister(kRef, dst);
}
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
......@@ -4558,7 +4538,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_sari(dst, src, kI31To32BitSmiShift);
}
__ PushRegister(kWasmI32, dst);
__ PushRegister(kI32, dst);
}
void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
......@@ -4570,7 +4550,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_shri(dst, src, kI31To32BitSmiShift);
}
__ PushRegister(kWasmI32, dst);
__ PushRegister(kI32, dst);
}
void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
......@@ -4579,24 +4559,23 @@ class LiftoffCompiler {
__ LoadTaggedPointer(
rtt.gp(), rtt.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {});
__ PushRegister(ValueType::Rtt(type_index, 1), rtt);
__ PushRegister(kRttWithDepth, rtt);
}
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
Value* result) {
ValueType parent_value_type = parent.type;
ValueType rtt_value_type =
ValueType::Rtt(type_index, parent_value_type.depth() + 1);
ValueKind parent_value_kind = parent.type.kind();
ValueKind rtt_value_type = kRttWithDepth;
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
FunctionSig sig(1, 2, sig_reps);
ValueKind sig_reps[] = {rtt_value_type, kI32, parent_value_kind};
ValueKindSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState parent_var =
__ cache_state()->stack_state.end()[-1];
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(type_reg, WasmValue(type_index));
LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
__ CallRuntimeStub(target);
DefineSafepoint();
......@@ -4630,8 +4609,8 @@ class LiftoffCompiler {
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, null_succeeds ? &match : no_match, obj.type,
obj_reg.gp(), tmp1.gp());
__ emit_cond_jump(kEqual, null_succeeds ? &match : no_match,
obj.type.kind(), obj_reg.gp(), tmp1.gp());
}
// Perform a regular type check. Check for exact match first.
......@@ -4641,7 +4620,8 @@ class LiftoffCompiler {
if (decoder->module_->has_signature(rtt.type.ref_index())) {
// Function case: currently, the only way for a function to match an rtt
// is if its map is equal to that rtt.
__ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
__ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
rtt_reg.gp());
__ bind(&match);
return obj_reg;
}
......@@ -4650,7 +4630,7 @@ class LiftoffCompiler {
// Check for rtt equality, and if not, check if the rtt is a struct/array
// rtt.
__ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
__ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp());
// Constant-time subtyping check: load exactly one candidate RTT from the
// supertypes list.
......@@ -4674,7 +4654,8 @@ class LiftoffCompiler {
tmp1.gp(), tmp1.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
pinned);
__ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
__ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
rtt_reg.gp());
} else {
// Preserve {obj_reg} across the call.
LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
......@@ -4683,15 +4664,15 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>(
compilation_zone_);
ValueType sig_reps[] = {kWasmI32, kWasmAnyRef, rtt.type};
FunctionSig sig(1, 2, sig_reps);
ValueKind sig_reps[] = {kI32, kOptRef, rtt.type.kind()};
ValueKindSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0);
LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state});
__ CallRuntimeStub(target);
DefineSafepoint();
__ PopRegisters(saved_regs);
__ Move(tmp1.gp(), kReturnRegister0, kWasmI32);
__ Move(tmp1.gp(), kReturnRegister0, kI32);
__ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
}
......@@ -4716,7 +4697,7 @@ class LiftoffCompiler {
__ bind(&return_false);
__ LoadConstant(result, WasmValue(0));
__ bind(&done);
__ PushRegister(kWasmI32, result);
__ PushRegister(kI32, result);
}
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
......@@ -4725,8 +4706,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister obj_reg =
SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds);
__ PushRegister(
ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()), obj_reg);
__ PushRegister(obj.type.kind(), obj_reg);
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
......@@ -4742,17 +4722,13 @@ class LiftoffCompiler {
LiftoffRegister obj_reg =
SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails);
__ PushRegister(
rtt.type.is_bottom()
? kWasmBottom
: ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()),
obj_reg);
__ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg);
BrOrRet(decoder, depth);
__ bind(&cont_false);
// Drop the branch's value, restore original value.
Drop(decoder);
__ PushRegister(obj.type, obj_reg);
__ PushRegister(obj.type.kind(), obj_reg);
}
// Abstract type checkers. They all return the object register and fall
......@@ -4771,7 +4747,7 @@ class LiftoffCompiler {
if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
__ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
}
__ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
......@@ -4796,7 +4772,7 @@ class LiftoffCompiler {
if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
__ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
}
__ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
......@@ -4841,7 +4817,7 @@ class LiftoffCompiler {
__ bind(&no_match);
__ LoadConstant(result, WasmValue(0));
__ bind(&done);
__ PushRegister(kWasmI32, result);
__ PushRegister(kI32, result);
}
void RefIsData(FullDecoder* /* decoder */, const Value& object,
......@@ -4861,36 +4837,33 @@ class LiftoffCompiler {
template <TypeChecker type_checker>
void AbstractTypeCast(const Value& object, FullDecoder* decoder,
ValueType result_type) {
ValueKind result_kind) {
Label* trap_label = AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapIllegalCast);
Label match;
LiftoffRegister obj_reg =
(this->*type_checker)(object, trap_label, {}, no_reg);
__ bind(&match);
__ PushRegister(result_type, obj_reg);
__ PushRegister(result_kind, obj_reg);
}
void RefAsData(FullDecoder* decoder, const Value& object,
Value* /* result */) {
return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder,
kWasmDataRef);
return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef);
}
void RefAsFunc(FullDecoder* decoder, const Value& object,
Value* /* result */) {
return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(
object, decoder, ValueType::Ref(HeapType::kFunc, kNonNullable));
return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(object, decoder, kRef);
}
void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder,
kWasmI31Ref);
return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
}
template <TypeChecker type_checker>
void BrOnAbstractType(const Value& object, FullDecoder* decoder,
uint32_t br_depth, ValueType result_type) {
uint32_t br_depth, ValueKind result_kind) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (br_depth != decoder->control_depth() - 1) {
......@@ -4903,32 +4876,31 @@ class LiftoffCompiler {
(this->*type_checker)(object, &no_match, {}, no_reg);
__ bind(&match);
__ PushRegister(result_type, obj_reg);
__ PushRegister(result_kind, obj_reg);
BrOrRet(decoder, br_depth);
__ bind(&no_match);
// Drop the branch's value, restore original value.
Drop(decoder);
__ PushRegister(object.type, obj_reg);
__ PushRegister(object.type.kind(), obj_reg);
}
void BrOnData(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::DataCheck>(
object, decoder, br_depth, kWasmDataRef);
return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
br_depth, kRef);
}
void BrOnFunc(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(
object, decoder, br_depth,
ValueType::Ref(HeapType::kFunc, kNonNullable));
return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
br_depth, kRef);
}
void BrOnI31(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
br_depth, kWasmI31Ref);
br_depth, kRef);
}
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
......@@ -4936,10 +4908,20 @@ class LiftoffCompiler {
}
private:
ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
ValueKind* reps =
zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
ValueKind* ptr = reps;
for (ValueType type : sig->all()) *ptr++ = type.kind();
return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
reps);
}
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], CallKind call_kind) {
for (ValueType ret : imm.sig->returns()) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
......@@ -4969,7 +4951,7 @@ class LiftoffCompiler {
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()),
......@@ -4979,11 +4961,11 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(imm.sig, call_descriptor, target);
__ CallIndirect(sig, call_descriptor, target);
}
} else {
// A direct call within this module just gets the current instance.
__ PrepareCall(imm.sig, call_descriptor);
__ PrepareCall(sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
if (call_kind == kReturnCall) {
......@@ -5003,16 +4985,17 @@ class LiftoffCompiler {
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ FinishCall(imm.sig, call_descriptor);
__ FinishCall(sig, call_descriptor);
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
CallKind call_kind) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
if (imm.table_index != 0) {
return unsupported(decoder, kRefTypes, "table index != 0");
}
for (ValueType ret : imm.sig->returns()) {
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
......@@ -5038,8 +5021,8 @@ class LiftoffCompiler {
// {instance->indirect_function_table_size}.
LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
pinned);
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
index, tmp_const);
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
// Mask the index to prevent SSCA.
if (FLAG_untrusted_code_mitigations) {
......@@ -5078,8 +5061,8 @@ class LiftoffCompiler {
Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, sig_mismatch_label,
LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
__ emit_cond_jump(kUnequal, sig_mismatch_label, LiftoffAssembler::kIntPtr,
scratch, tmp_const);
// At this point {index} has already been multiplied by 4.
DEBUG_CODE_COMMENT("Execute indirect call");
......@@ -5117,7 +5100,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()),
......@@ -5127,22 +5110,23 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(imm.sig, call_descriptor, target);
__ CallIndirect(sig, call_descriptor, target);
}
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
__ FinishCall(imm.sig, call_descriptor);
__ FinishCall(sig, call_descriptor);
}
void CallRef(FullDecoder* decoder, ValueType func_ref_type,
const FunctionSig* sig, CallKind call_kind) {
for (ValueType ret : sig->returns()) {
const FunctionSig* type_sig, CallKind call_kind) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
compiler::CallDescriptor* call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, sig);
compiler::GetWasmCallDescriptor(compilation_zone_, type_sig);
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
......@@ -5211,7 +5195,7 @@ class LiftoffCompiler {
imported_function_refs.gp(), pinned);
Label imported;
__ emit_cond_jump(kSignedLessThan, &imported, kWasmI32, func_index.gp(),
__ emit_cond_jump(kSignedLessThan, &imported, kI32, func_index.gp(),
imported_functions_num.gp());
{
......@@ -5293,19 +5277,19 @@ class LiftoffCompiler {
compiler::CallDescriptor* builtin_call_descriptor =
GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>(
compilation_zone_);
ValueType sig_reps[] = {kWasmAnyRef, kWasmAnyRef, kWasmAnyRef};
FunctionSig builtin_sig(1, 2, sig_reps);
ValueKind sig_reps[] = {kOptRef, kOptRef, kOptRef};
ValueKindSig builtin_sig(1, 2, sig_reps);
LiftoffRegister current_instance = instance;
__ FillInstanceInto(current_instance.gp());
LiftoffAssembler::VarState instance_var(kWasmAnyRef, current_instance, 0);
LiftoffAssembler::VarState callable_var(kWasmFuncRef, callable, 0);
LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
__ PrepareBuiltinCall(&builtin_sig, builtin_call_descriptor,
{instance_var, callable_var});
__ CallRuntimeStub(builtin);
DefineSafepoint();
if (instance.gp() != kReturnRegister0) {
__ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kWasmIntPtr);
__ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kIntPtr);
}
// Restore {func_data}, which we saved across the call.
......@@ -5357,7 +5341,7 @@ class LiftoffCompiler {
decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object,
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object,
null.gp());
}
......@@ -5370,8 +5354,8 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
__ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load,
pinned);
__ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label,
kWasmI32, index.gp(), length.gp());
__ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32,
index.gp(), length.gp());
}
int StructFieldOffset(const StructType* struct_type, int field_index) {
......@@ -5380,33 +5364,33 @@ class LiftoffCompiler {
}
void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
int offset, ValueType type, bool is_signed,
int offset, ValueKind kind, bool is_signed,
LiftoffRegList pinned) {
if (type.is_reference_type()) {
if (is_reference_type(kind)) {
__ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
} else {
// Primitive type.
LoadType load_type = LoadType::ForValueType(type, is_signed);
// Primitive kind.
LoadType load_type = LoadType::ForValueKind(kind, is_signed);
__ Load(dst, src, offset_reg, offset, load_type, pinned);
}
}
void StoreObjectField(Register obj, Register offset_reg, int offset,
LiftoffRegister value, LiftoffRegList pinned,
ValueType type) {
if (type.is_reference_type()) {
ValueKind kind) {
if (is_reference_type(kind)) {
__ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
} else {
// Primitive type.
StoreType store_type = StoreType::ForValueType(type);
// Primitive kind.
StoreType store_type = StoreType::ForValueKind(kind);
__ Store(obj, offset_reg, offset, value, store_type, pinned);
}
}
void SetDefaultValue(LiftoffRegister reg, ValueType type,
void SetDefaultValue(LiftoffRegister reg, ValueKind kind,
LiftoffRegList pinned) {
DCHECK(type.is_defaultable());
switch (type.kind()) {
DCHECK(is_defaultable(kind));
switch (kind) {
case kI8:
case kI16:
case kI32:
......
......@@ -45,12 +45,12 @@ static_assert(kNeedS128RegPair == (kFpRegPair != kNoReg),
enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
static inline constexpr bool needs_gp_reg_pair(ValueType type) {
return kNeedI64RegPair && type == kWasmI64;
static inline constexpr bool needs_gp_reg_pair(ValueKind kind) {
return kNeedI64RegPair && kind == kI64;
}
static inline constexpr bool needs_fp_reg_pair(ValueType type) {
return kNeedS128RegPair && type == kWasmS128;
static inline constexpr bool needs_fp_reg_pair(ValueKind kind) {
return kNeedS128RegPair && kind == kS128;
}
static inline constexpr RegClass reg_class_for(ValueKind kind) {
......@@ -72,14 +72,10 @@ static inline constexpr RegClass reg_class_for(ValueKind kind) {
case kRttWithDepth:
return kGpReg;
default:
return kNoReg; // unsupported type
return kNoReg; // unsupported kind
}
}
static inline constexpr RegClass reg_class_for(ValueType type) {
return reg_class_for(type.kind());
}
// Description of LiftoffRegister code encoding.
// This example uses the ARM architecture, which as of writing has:
// - 9 GP registers, requiring 4 bits
......@@ -192,9 +188,9 @@ class LiftoffRegister {
// Shifts the register code depending on the type before converting to a
// LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueType type,
static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) {
if (!kSimpleFPAliasing && type == kWasmF32) {
if (!kSimpleFPAliasing && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
......@@ -202,7 +198,7 @@ class LiftoffRegister {
DCHECK_EQ(0, code % 2);
return LiftoffRegister::from_code(rc, code >> 1);
}
if (kNeedS128RegPair && type == kWasmS128) {
if (kNeedS128RegPair && kind == kS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
......
......@@ -84,8 +84,8 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
ValueType type) {
switch (type.kind()) {
ValueKind kind) {
switch (kind) {
case kI32:
assm->movl(dst.gp(), src);
break;
......@@ -111,8 +111,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
}
inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
ValueType type) {
switch (type.kind()) {
ValueKind kind) {
switch (kind) {
case kI32:
assm->movl(dst, src.gp());
break;
......@@ -133,8 +133,8 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type.kind()) {
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
switch (kind) {
case kI32:
case kI64:
case kRef:
......@@ -243,13 +243,13 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
return type.is_reference_type() ? kSystemPointerSize
: type.element_size_bytes();
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
return is_reference_type(kind) ? kSystemPointerSize
: element_size_bytes(kind);
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.is_reference_type();
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -778,66 +778,66 @@ void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
ValueType type) {
ValueKind kind) {
Operand dst(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Store(this, dst, src, type);
liftoff::Store(this, dst, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
ValueType type) {
ValueKind kind) {
Operand src(rsp, offset);
liftoff::Load(this, reg, src, type);
liftoff::Load(this, reg, src, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
if (type.element_size_log2() == 2) {
if (element_size_log2(kind) == 2) {
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
} else {
DCHECK_EQ(3, type.element_size_log2());
DCHECK_EQ(3, element_size_log2(kind));
movq(kScratchRegister, src);
movq(dst, kScratchRegister);
}
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
if (type == kWasmI32) {
if (kind == kI32) {
movl(dst, src);
} else {
DCHECK(kWasmI64 == type || type.is_reference_type());
DCHECK(kI64 == kind || is_reference_type(kind));
movq(dst, src);
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
ValueKind kind) {
DCHECK_NE(dst, src);
if (type == kWasmF32) {
if (kind == kF32) {
Movss(dst, src);
} else if (type == kWasmF64) {
} else if (kind == kF64) {
Movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
DCHECK_EQ(kS128, kind);
Movapd(dst, src);
}
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) {
switch (kind) {
case kI32:
movl(dst, reg.gp());
break;
......@@ -889,8 +889,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
......@@ -1142,16 +1142,16 @@ void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) {
}
namespace liftoff {
template <ValueKind type>
template <ValueKind kind>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
assm->Move(kScratchRegister, src, ValueType::Primitive(type));
if (amount != rcx) assm->Move(rcx, amount, ValueType::Primitive(type));
assm->Move(kScratchRegister, src, kind);
if (amount != rcx) assm->Move(rcx, amount, kind);
(assm->*emit_shift)(kScratchRegister);
assm->Move(rcx, kScratchRegister, ValueType::Primitive(type));
assm->Move(rcx, kScratchRegister, kind);
return;
}
......@@ -1163,11 +1163,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister;
assm->Move(rcx, amount, ValueType::Primitive(type));
assm->Move(rcx, amount, kind);
}
// Do the actual shift.
if (dst != src) assm->Move(dst, src, ValueType::Primitive(type));
if (dst != src) assm->Move(dst, src, kind);
(assm->*emit_shift)(dst);
// Restore rcx if needed.
......@@ -2050,11 +2050,11 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (type.kind()) {
switch (kind) {
case kI32:
cmpl(lhs, rhs);
break;
......@@ -2071,7 +2071,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
UNREACHABLE();
}
} else {
DCHECK_EQ(type, kWasmI32);
DCHECK_EQ(kind, kI32);
testl(lhs, lhs);
}
......@@ -2160,12 +2160,12 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
ValueType type) {
if (type != kWasmI32 && type != kWasmI64) return false;
ValueKind kind) {
if (kind != kI32 && kind != kI64) return false;
testl(condition, condition);
if (type == kWasmI32) {
if (kind == kI32) {
if (dst == false_value) {
cmovl(not_zero, dst.gp(), true_value.gp());
} else {
......@@ -4385,17 +4385,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_type);
arg_bytes += param_type.element_size_bytes();
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_kind);
arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
......@@ -4420,8 +4420,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) {
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
if (out_argument_kind != kStmt) {
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_kind);
}
addq(rsp, Immediate(stack_bytes));
......@@ -4435,7 +4435,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
near_jmp(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
......@@ -4481,12 +4481,12 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmI32) {
if (src.kind() == kI32) {
// Load i32 values to a register first to ensure they are zero
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister);
} else if (src.type() == kWasmS128) {
} else if (src.kind() == kS128) {
// Since offsets are subtracted from sp, we need a smaller offset to
// push the top of a s128 value.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
......@@ -4500,7 +4500,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
liftoff::push(asm_, src.reg(), src.kind());
break;
case LiftoffAssembler::VarState::kIntConst:
asm_->pushq(Immediate(src.i32_const()));
......
......@@ -179,12 +179,94 @@ enum ValueKind : uint8_t {
#undef DEF_ENUM
};
// A ValueType is encoded by three components: A Kind, a heap representation
// (for reference types), and an inheritance depth (for rtts only). Those are
// encoded into 32 bits using base::BitField. The underlying Kind enumeration
// includes four elements which do not strictly correspond to value types: the
// two packed types i8 and i16, the type of void blocks (stmt), and a bottom
// value (for internal use).
constexpr bool is_reference_type(ValueKind kind) {
return kind == kRef || kind == kOptRef || kind == kRtt ||
kind == kRttWithDepth;
}
constexpr bool is_object_reference_type(ValueKind kind) {
return kind == kRef || kind == kOptRef;
}
constexpr int element_size_log2(ValueKind kind) {
constexpr int8_t kElementSizeLog2[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size_log_2 = kElementSizeLog2[kind];
CONSTEXPR_DCHECK(size_log_2 >= 0);
return size_log_2;
}
constexpr int element_size_bytes(ValueKind kind) {
constexpr int8_t kElementSize[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size = kElementSize[kind];
CONSTEXPR_DCHECK(size > 0);
return size;
}
constexpr char short_name(ValueKind kind) {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
FOREACH_VALUE_TYPE(SHORT_NAME)
#undef SHORT_NAME
};
return kShortName[kind];
}
constexpr const char* name(ValueKind kind) {
constexpr const char* kKindName[] = {
#define KIND_NAME(kind, log2Size, code, machineType, shortName, kindName, ...) \
kindName,
FOREACH_VALUE_TYPE(KIND_NAME)
#undef TYPE_NAME
};
return kKindName[kind];
}
constexpr MachineType machine_type(ValueKind kind) {
CONSTEXPR_DCHECK(kBottom != kind);
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
MachineType::machineType(),
FOREACH_VALUE_TYPE(MACH_TYPE)
#undef MACH_TYPE
};
return kMachineType[kind];
}
constexpr bool is_packed(ValueKind kind) { return kind == kI8 || kind == kI16; }
constexpr ValueKind unpacked(ValueKind kind) {
return is_packed(kind) ? kI32 : kind;
}
constexpr bool is_rtt(ValueKind kind) {
return kind == kRtt || kind == kRttWithDepth;
}
constexpr bool is_defaultable(ValueKind kind) {
CONSTEXPR_DCHECK(kind != kBottom && kind != kStmt);
return kind != kRef && !is_rtt(kind);
}
// A ValueType is encoded by three components: A ValueKind, a heap
// representation (for reference types), and an inheritance depth (for rtts
// only). Those are encoded into 32 bits using base::BitField. The underlying
// ValueKind enumeration includes four elements which do not strictly correspond
// to value types: the two packed types i8 and i16, the type of void blocks
// (stmt), and a bottom value (for internal use).
class ValueType {
public:
/******************************* Constructors *******************************/
......@@ -224,12 +306,11 @@ class ValueType {
/******************************** Type checks *******************************/
constexpr bool is_reference_type() const {
return kind() == kRef || kind() == kOptRef || kind() == kRtt ||
kind() == kRttWithDepth;
return wasm::is_reference_type(kind());
}
constexpr bool is_object_reference_type() const {
return kind() == kRef || kind() == kOptRef;
return wasm::is_object_reference_type(kind());
}
constexpr bool is_nullable() const { return kind() == kOptRef; }
......@@ -239,23 +320,18 @@ class ValueType {
heap_representation() == htype;
}
constexpr bool is_rtt() const {
return kind() == kRtt || kind() == kRttWithDepth;
}
constexpr bool is_rtt() const { return wasm::is_rtt(kind()); }
constexpr bool has_depth() const { return kind() == kRttWithDepth; }
constexpr bool has_index() const {
return is_rtt() || (is_object_reference_type() && heap_type().is_index());
}
constexpr bool is_defaultable() const {
CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
return kind() != kRef && !is_rtt();
}
constexpr bool is_defaultable() const { return wasm::is_defaultable(kind()); }
constexpr bool is_bottom() const { return kind() == kBottom; }
constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
constexpr bool is_packed() const { return wasm::is_packed(kind()); }
constexpr ValueType Unpacked() const {
return is_packed() ? Primitive(kI32) : *this;
......@@ -301,42 +377,16 @@ class ValueType {
}
constexpr int element_size_log2() const {
constexpr int8_t kElementSizeLog2[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size_log_2 = kElementSizeLog2[kind()];
CONSTEXPR_DCHECK(size_log_2 >= 0);
return size_log_2;
return wasm::element_size_log2(kind());
}
constexpr int element_size_bytes() const {
constexpr int8_t kElementSize[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
#undef ELEM_SIZE_LOG2
};
int size = kElementSize[kind()];
CONSTEXPR_DCHECK(size > 0);
return size;
return wasm::element_size_bytes(kind());
}
/*************************** Machine-type related ***************************/
constexpr MachineType machine_type() const {
CONSTEXPR_DCHECK(kBottom != kind());
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
MachineType::machineType(),
FOREACH_VALUE_TYPE(MACH_TYPE)
#undef MACH_TYPE
};
return kMachineType[kind()];
return wasm::machine_type(kind());
}
constexpr MachineRepresentation machine_representation() const {
......@@ -427,15 +477,7 @@ class ValueType {
static constexpr int kLastUsedBit = 30;
/****************************** Pretty-printing *****************************/
constexpr char short_name() const {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
FOREACH_VALUE_TYPE(SHORT_NAME)
#undef SHORT_NAME
};
return kShortName[kind()];
}
constexpr char short_name() const { return wasm::short_name(kind()); }
std::string name() const {
std::ostringstream buf;
......@@ -483,16 +525,7 @@ class ValueType {
constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
constexpr const char* kind_name() const {
constexpr const char* kTypeName[] = {
#define KIND_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
typeName,
FOREACH_VALUE_TYPE(KIND_NAME)
#undef TYPE_NAME
};
return kTypeName[kind()];
}
constexpr const char* kind_name() const { return wasm::name(kind()); }
uint32_t bit_field_;
};
......@@ -573,8 +606,8 @@ class LoadType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; }
static LoadType ForValueType(ValueType type, bool is_signed = false) {
switch (type.kind()) {
static LoadType ForValueKind(ValueKind kind, bool is_signed = false) {
switch (kind) {
case kI32:
return kI32Load;
case kI64:
......@@ -649,8 +682,8 @@ class StoreType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineRepresentation mem_rep() const { return kMemRep[val_]; }
static StoreType ForValueType(ValueType type) {
switch (type.kind()) {
static StoreType ForValueKind(ValueKind kind) {
switch (kind) {
case kI32:
return kI32Store;
case kI64:
......
......@@ -95,8 +95,8 @@ void DebugSideTable::Entry::Print(std::ostream& os) const {
os << std::setw(6) << std::hex << pc_offset_ << std::dec << " stack height "
<< stack_height_ << " [";
for (auto& value : changed_values_) {
os << " " << value.type.name() << ":";
switch (value.kind) {
os << " " << name(value.kind) << ":";
switch (value.storage) {
case kConstant:
os << "const#" << value.i32_const;
break;
......@@ -510,8 +510,8 @@ class DebugInfoImpl {
const auto* value =
debug_side_table->FindValue(debug_side_table_entry, index);
if (value->is_constant()) {
DCHECK(value->type == kWasmI32 || value->type == kWasmI64);
return value->type == kWasmI32 ? WasmValue(value->i32_const)
DCHECK(value->kind == kI32 || value->kind == kI64);
return value->kind == kI32 ? WasmValue(value->i32_const)
: WasmValue(int64_t{value->i32_const});
}
......@@ -523,14 +523,14 @@ class DebugInfoImpl {
reg.code());
};
if (reg.is_gp_pair()) {
DCHECK_EQ(kWasmI64, value->type);
DCHECK_EQ(kI64, value->kind);
uint32_t low_word = ReadUnalignedValue<uint32_t>(gp_addr(reg.low_gp()));
uint32_t high_word =
ReadUnalignedValue<uint32_t>(gp_addr(reg.high_gp()));
return WasmValue((uint64_t{high_word} << 32) | low_word);
}
if (reg.is_gp()) {
return value->type == kWasmI32
return value->kind == kI32
? WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp())))
: WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp())));
}
......@@ -544,11 +544,11 @@ class DebugInfoImpl {
Address spilled_addr =
debug_break_fp +
WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset(code);
if (value->type == kWasmF32) {
if (value->kind == kF32) {
return WasmValue(ReadUnalignedValue<float>(spilled_addr));
} else if (value->type == kWasmF64) {
} else if (value->kind == kF64) {
return WasmValue(ReadUnalignedValue<double>(spilled_addr));
} else if (value->type == kWasmS128) {
} else if (value->kind == kS128) {
return WasmValue(Simd128(ReadUnalignedValue<int16>(spilled_addr)));
} else {
// All other cases should have been handled above.
......@@ -558,7 +558,7 @@ class DebugInfoImpl {
// Otherwise load the value from the stack.
Address stack_address = stack_frame_base - value->stack_offset;
switch (value->type.kind()) {
switch (value->kind) {
case kI32:
return WasmValue(ReadUnalignedValue<int32_t>(stack_address));
case kI64:
......
......@@ -40,11 +40,11 @@ class DebugSideTable {
public:
class Entry {
public:
enum ValueKind : int8_t { kConstant, kRegister, kStack };
enum Storage : int8_t { kConstant, kRegister, kStack };
struct Value {
int index;
ValueType type;
ValueKind kind;
Storage storage;
union {
int32_t i32_const; // if kind == kConstant
int reg_code; // if kind == kRegister
......@@ -53,9 +53,9 @@ class DebugSideTable {
bool operator==(const Value& other) const {
if (index != other.index) return false;
if (type != other.type) return false;
if (kind != other.kind) return false;
switch (kind) {
if (storage != other.storage) return false;
switch (storage) {
case kConstant:
return i32_const == other.i32_const;
case kRegister:
......@@ -66,8 +66,8 @@ class DebugSideTable {
}
bool operator!=(const Value& other) const { return !(*this == other); }
bool is_constant() const { return kind == kConstant; }
bool is_register() const { return kind == kRegister; }
bool is_constant() const { return storage == kConstant; }
bool is_register() const { return storage == kRegister; }
};
Entry(int pc_offset, int stack_height, std::vector<Value> changed_values)
......
......@@ -177,8 +177,8 @@ struct DebugSideTableEntry {
// Check for equality, but ignore exact register and stack offset.
static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
const DebugSideTable::Entry::Value& b) {
return a.index == b.index && a.type == b.type && a.kind == b.kind &&
(a.kind != DebugSideTable::Entry::kConstant ||
return a.index == b.index && a.kind == b.kind && a.kind == b.kind &&
(a.storage != DebugSideTable::Entry::kConstant ||
a.i32_const == b.i32_const);
}
};
......@@ -189,8 +189,8 @@ std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) {
out << "stack height " << entry.stack_height << ", changed: {";
const char* comma = "";
for (auto& v : entry.changed_values) {
out << comma << v.index << ":" << v.type.name() << " ";
switch (v.kind) {
out << comma << v.index << ":" << name(v.kind) << " ";
switch (v.storage) {
case DebugSideTable::Entry::kConstant:
out << "const:" << v.i32_const;
break;
......@@ -213,27 +213,27 @@ std::ostream& operator<<(std::ostream& out,
#endif // DEBUG
// Named constructors to make the tests more readable.
DebugSideTable::Entry::Value Constant(int index, ValueType type,
DebugSideTable::Entry::Value Constant(int index, ValueKind kind,
int32_t constant) {
DebugSideTable::Entry::Value value;
value.index = index;
value.type = type;
value.kind = DebugSideTable::Entry::kConstant;
value.kind = kind;
value.storage = DebugSideTable::Entry::kConstant;
value.i32_const = constant;
return value;
}
DebugSideTable::Entry::Value Register(int index, ValueType type) {
DebugSideTable::Entry::Value Register(int index, ValueKind kind) {
DebugSideTable::Entry::Value value;
value.index = index;
value.type = type;
value.kind = DebugSideTable::Entry::kRegister;
value.kind = kind;
value.storage = DebugSideTable::Entry::kRegister;
return value;
}
DebugSideTable::Entry::Value Stack(int index, ValueType type) {
DebugSideTable::Entry::Value Stack(int index, ValueKind kind) {
DebugSideTable::Entry::Value value;
value.index = index;
value.type = type;
value.kind = DebugSideTable::Entry::kStack;
value.kind = kind;
value.storage = DebugSideTable::Entry::kStack;
return value;
}
......@@ -296,9 +296,9 @@ TEST(Liftoff_debug_side_table_simple) {
CheckDebugSideTable(
{
// function entry, locals in registers.
{2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
{2, {Register(0, kI32), Register(1, kI32)}},
// OOL stack check, locals spilled, stack still empty.
{2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
{2, {Stack(0, kI32), Stack(1, kI32)}},
},
debug_side_table.get());
}
......@@ -312,9 +312,9 @@ TEST(Liftoff_debug_side_table_call) {
CheckDebugSideTable(
{
// function entry, local in register.
{1, {Register(0, kWasmI32)}},
{1, {Register(0, kI32)}},
// call, local spilled, stack empty.
{1, {Stack(0, kWasmI32)}},
{1, {Stack(0, kI32)}},
// OOL stack check, local spilled as before, stack empty.
{1, {}},
},
......@@ -332,11 +332,11 @@ TEST(Liftoff_debug_side_table_call_const) {
CheckDebugSideTable(
{
// function entry, local in register.
{1, {Register(0, kWasmI32)}},
{1, {Register(0, kI32)}},
// call, local is kConst.
{1, {Constant(0, kWasmI32, kConst)}},
{1, {Constant(0, kI32, kConst)}},
// OOL stack check, local spilled.
{1, {Stack(0, kWasmI32)}},
{1, {Stack(0, kI32)}},
},
debug_side_table.get());
}
......@@ -351,13 +351,13 @@ TEST(Liftoff_debug_side_table_indirect_call) {
CheckDebugSideTable(
{
// function entry, local in register.
{1, {Register(0, kWasmI32)}},
{1, {Register(0, kI32)}},
// indirect call, local spilled, stack empty.
{1, {Stack(0, kWasmI32)}},
{1, {Stack(0, kI32)}},
// OOL stack check, local still spilled.
{1, {}},
// OOL trap (invalid index), local still spilled, stack has {kConst}.
{2, {Constant(1, kWasmI32, kConst)}},
{2, {Constant(1, kI32, kConst)}},
// OOL trap (sig mismatch), stack unmodified.
{2, {}},
},
......@@ -373,11 +373,11 @@ TEST(Liftoff_debug_side_table_loop) {
CheckDebugSideTable(
{
// function entry, local in register.
{1, {Register(0, kWasmI32)}},
{1, {Register(0, kI32)}},
// OOL stack check, local spilled, stack empty.
{1, {Stack(0, kWasmI32)}},
{1, {Stack(0, kI32)}},
// OOL loop stack check, local still spilled, stack has {kConst}.
{2, {Constant(1, kWasmI32, kConst)}},
{2, {Constant(1, kI32, kConst)}},
},
debug_side_table.get());
}
......@@ -390,9 +390,9 @@ TEST(Liftoff_debug_side_table_trap) {
CheckDebugSideTable(
{
// function entry, locals in registers.
{2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
{2, {Register(0, kI32), Register(1, kI32)}},
// OOL stack check, local spilled, stack empty.
{2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
{2, {Stack(0, kI32), Stack(1, kI32)}},
// OOL trap (div by zero), stack as before.
{2, {}},
// OOL trap (unrepresentable), stack as before.
......@@ -414,11 +414,11 @@ TEST(Liftoff_breakpoint_simple) {
CheckDebugSideTable(
{
// First break point, locals in registers.
{2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
{2, {Register(0, kI32), Register(1, kI32)}},
// Second break point, locals unchanged, two register stack values.
{4, {Register(2, kWasmI32), Register(3, kWasmI32)}},
{4, {Register(2, kI32), Register(3, kI32)}},
// OOL stack check, locals spilled, stack empty.
{2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
{2, {Stack(0, kI32), Stack(1, kI32)}},
},
debug_side_table.get());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment