Commit b84c7dbd authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[liftoff] Fix implicit conversion to LiftoffRegList

According to the style guide, the implicit conversion of any number of
registers to a LiftoffRegList should not be there. This CL removes it,
and fixes two subideal call sites to use SpillRegister (receiving a
single register) instead of SpillOneRegister (receiving a register list
to choose from).

Plus some semantics-preserving rewrites.

R=jkummerow@chromium.org

Bug: chromium:1337221
Change-Id: Id22043ac1c185bc794dbde7baa4b1d5ab7cce56e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3707286Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81250}
parent 15f372af
......@@ -192,7 +192,7 @@ inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
// Left shift writes {dst_high} then {dst_low}, right shifts write {dst_low}
// then {dst_high}.
Register clobbered_dst_reg = is_left_shift ? dst_high : dst_low;
LiftoffRegList pinned = {clobbered_dst_reg, src};
LiftoffRegList pinned{clobbered_dst_reg, src};
Register amount_capped =
pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
assm->and_(amount_capped, amount, Operand(0x3F));
......@@ -951,7 +951,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Register dst_addr,
StoreType type,
void (*op)(LiftoffAssembler*, Register, Register,
Register)) {
LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
LiftoffRegList pinned{dst_addr, offset_reg, value, result};
switch (type.value()) {
case StoreType::kI64Store8:
__ LoadConstant(result.high(), WasmValue(0));
......@@ -1002,7 +1002,7 @@ inline void AtomicOp64(LiftoffAssembler* lasm, Register dst_addr,
// Make sure {dst_low} and {dst_high} are not occupied by any other value.
Register value_low = value.low_gp();
Register value_high = value.high_gp();
LiftoffRegList pinned = {dst_addr, offset_reg, value_low,
LiftoffRegList pinned{dst_addr, offset_reg, value_low,
value_high, dst_low, dst_high};
__ ClearRegister(dst_low, {&dst_addr, &offset_reg, &value_low, &value_high},
pinned);
......@@ -1273,7 +1273,7 @@ void LiftoffAssembler::AtomicCompareExchange(
void (Assembler::*load)(Register, Register, Condition) = nullptr;
void (Assembler::*store)(Register, Register, Register, Condition) = nullptr;
LiftoffRegList pinned = {dst_addr, offset_reg};
LiftoffRegList pinned{dst_addr, offset_reg};
// We need to remember the high word of {result}, so we can set it to zero in
// the end if necessary.
Register result_high = no_reg;
......@@ -1612,7 +1612,7 @@ inline void GeneratePopCnt(Assembler* assm, Register dst, Register src,
} // namespace liftoff
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
Register scratch1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
liftoff::GeneratePopCnt(this, dst, src, scratch1, scratch2);
......@@ -1853,7 +1853,7 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
// overwrite the second src register before using it.
Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp();
Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp();
LiftoffRegList pinned = {dst, src2};
LiftoffRegList pinned{dst, src2};
Register scratch1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
liftoff::GeneratePopCnt(this, dst.low_gp(), src1, scratch1, scratch2);
......@@ -2996,7 +2996,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
if (used_plus_dst.has(lhs) && used_plus_dst.has(rhs)) {
tmp1 = temps.AcquireQ();
// We only have 1 scratch Q register, so acquire another ourselves.
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
tmp2 = liftoff::GetSimd128Register(unused_pair);
} else if (used_plus_dst.has(lhs)) {
......@@ -3122,7 +3122,7 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
if (cache_state()->is_used(src)) {
// We only have 1 scratch Q register, so try and reuse src.
LiftoffRegList pinned = {src};
LiftoffRegList pinned{src};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
mask = liftoff::GetSimd128Register(unused_pair);
}
......@@ -3307,7 +3307,7 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
if (cache_state()->is_used(src)) {
// We only have 1 scratch Q register, so try and reuse src.
LiftoffRegList pinned = {src};
LiftoffRegList pinned{src};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
mask = liftoff::GetSimd128Register(unused_pair);
}
......@@ -3643,7 +3643,7 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
if (cache_state()->is_used(src)) {
// We only have 1 scratch Q register, so try and reuse src.
LiftoffRegList pinned = {src};
LiftoffRegList pinned{src};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
mask = liftoff::GetSimd128Register(unused_pair);
}
......
......@@ -634,7 +634,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
Register offset_reg, uintptr_t offset_imm,
LiftoffRegister value, LiftoffRegister result,
StoreType type, Binop op) {
LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
LiftoffRegList pinned{dst_addr, offset_reg, value, result};
Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
// {LiftoffCompiler::AtomicBinop} ensures that {result} is unique.
......@@ -820,7 +820,7 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
LiftoffRegList pinned = {dst_addr, offset_reg, expected, new_value};
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value};
Register result_reg = result.gp();
if (pinned.has(result)) {
......
......@@ -676,7 +676,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssembler* lasm, Binop binop,
Register result_reg = is_64_bit_op ? result.low_gp() : result.gp();
bool is_byte_store = type.size() == 1;
LiftoffRegList pinned = {dst_addr, value_reg, offset_reg};
LiftoffRegList pinned{dst_addr, value_reg, offset_reg};
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
......@@ -1041,7 +1041,7 @@ void LiftoffAssembler::AtomicCompareExchange(
}
bool is_byte_store = type.size() == 1;
LiftoffRegList pinned = {dst_addr, value_reg, expected_reg};
LiftoffRegList pinned{dst_addr, value_reg, expected_reg};
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
......@@ -1467,7 +1467,7 @@ namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
void (Assembler::*emit_shift)(Register)) {
LiftoffRegList pinned = {dst, src, amount};
LiftoffRegList pinned{dst, src, amount};
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
......@@ -1698,7 +1698,7 @@ inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
Register amount, void (TurboAssembler::*emit_shift)(Register, Register)) {
// Temporary registers cannot overlap with {dst}.
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
constexpr size_t kMaxRegMoves = 3;
base::SmallVector<LiftoffAssembler::ParallelRegisterMoveTuple, kMaxRegMoves>
......@@ -2241,7 +2241,7 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
}
CpuFeatureScope feature(assm, SSE4_1);
LiftoffRegList pinned = {src, dst};
LiftoffRegList pinned{src, dst};
DoubleRegister rounded =
pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back =
......@@ -2280,7 +2280,7 @@ inline bool EmitSatTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
Label not_nan;
Label src_positive;
LiftoffRegList pinned = {src, dst};
LiftoffRegList pinned{src, dst};
DoubleRegister rounded =
pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back =
......@@ -2388,7 +2388,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
cvtsi2ss(dst.fp(), src.gp());
return true;
case kExprF32UConvertI32: {
LiftoffRegList pinned = {dst, src};
LiftoffRegList pinned{dst, src};
Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
Cvtui2ss(dst.fp(), src.gp(), scratch);
return true;
......@@ -2403,7 +2403,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
Cvtsi2sd(dst.fp(), src.gp());
return true;
case kExprF64UConvertI32: {
LiftoffRegList pinned = {dst, src};
LiftoffRegList pinned{dst, src};
Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
Cvtui2sd(dst.fp(), src.gp(), scratch);
return true;
......
......@@ -470,15 +470,11 @@ class LiftoffAssembler : public TurboAssembler {
if (slot.is_reg()) {
cache_state_.dec_used(slot.reg());
if (slot.reg() == reg) return;
if (cache_state_.is_used(reg)) {
SpillOneRegister(reg);
}
if (cache_state_.is_used(reg)) SpillRegister(reg);
Move(reg, slot.reg(), slot.kind());
return;
}
if (cache_state_.is_used(reg)) {
SpillOneRegister(reg);
}
if (cache_state_.is_used(reg)) SpillRegister(reg);
LoadToRegister(slot, reg);
}
......
......@@ -1557,7 +1557,7 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
if (V8_UNLIKELY(nondeterminism_)) {
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
......@@ -1782,7 +1782,7 @@ class LiftoffCompiler {
LiftoffRegister lhs = __ PopToRegister();
// Either reuse {lhs} for {dst}, or choose a register (pair) which does
// not overlap, for easier code generation.
LiftoffRegList pinned = {lhs};
LiftoffRegList pinned{lhs};
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, pinned)
: __ GetUnusedRegister(result_rc, pinned);
......@@ -1813,7 +1813,7 @@ class LiftoffCompiler {
CallEmitFn(fn, dst, lhs, rhs);
if (V8_UNLIKELY(nondeterminism_)) {
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
......@@ -3011,7 +3011,7 @@ class LiftoffCompiler {
if (index == no_reg) return;
CODE_COMMENT("load from memory");
LiftoffRegList pinned = {index};
LiftoffRegList pinned{index};
// Load the memory start address only now to reduce register pressure
// (important on ia32).
......@@ -3055,7 +3055,7 @@ class LiftoffCompiler {
if (index == no_reg) return;
uintptr_t offset = imm.offset;
LiftoffRegList pinned = {index};
LiftoffRegList pinned{index};
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
......@@ -3434,7 +3434,7 @@ class LiftoffCompiler {
LiftoffRegister src2, LiftoffRegister src3) {
CallEmitFn(fn, dst, src1, src2, src3);
if (V8_UNLIKELY(nondeterminism_)) {
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
......@@ -3524,7 +3524,7 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
if (V8_UNLIKELY(nondeterminism_)) {
LiftoffRegList pinned = {dst};
LiftoffRegList pinned{dst};
CheckS128Nan(dst, pinned, result_lane_kind);
}
__ PushRegister(kS128, dst);
......@@ -3532,9 +3532,10 @@ class LiftoffCompiler {
template <typename EmitFn>
void EmitSimdFmaOp(EmitFn emit_fn) {
LiftoffRegister src3 = __ PopToRegister();
LiftoffRegister src2 = __ PopToRegister({src3});
LiftoffRegister src1 = __ PopToRegister({src2, src3});
LiftoffRegList pinned;
LiftoffRegister src3 = pinned.set(__ PopToRegister(pinned));
LiftoffRegister src2 = pinned.set(__ PopToRegister(pinned));
LiftoffRegister src1 = pinned.set(__ PopToRegister(pinned));
RegClass dst_rc = reg_class_for(kS128);
LiftoffRegister dst = __ GetUnusedRegister(dst_rc, {});
(asm_.*emit_fn)(dst, src1, src2, src3);
......@@ -4098,9 +4099,10 @@ class LiftoffCompiler {
// There is no helper for an instruction with 3 SIMD operands
// and we do not expect to add any more, so inlining it here.
static constexpr RegClass res_rc = reg_class_for(kS128);
LiftoffRegister acc = __ PopToRegister();
LiftoffRegister rhs = __ PopToRegister(LiftoffRegList{acc});
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList{rhs, acc});
LiftoffRegList pinned;
LiftoffRegister acc = pinned.set(__ PopToRegister(pinned));
LiftoffRegister rhs = pinned.set(__ PopToRegister(pinned));
LiftoffRegister lhs = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = __ GetUnusedRegister(res_rc, {lhs, rhs, acc}, {});
__ emit_i32x4_dot_i8x16_i7x16_add_s(dst, lhs, rhs, acc);
......@@ -4225,8 +4227,9 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
static constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList{rhs});
LiftoffRegList pinned;
LiftoffRegister rhs = pinned.set(__ PopToRegister(pinned));
LiftoffRegister lhs = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
uint8_t shuffle[kSimd128Size];
......@@ -4578,7 +4581,7 @@ class LiftoffCompiler {
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
LiftoffRegList pinned = {index};
LiftoffRegList pinned{index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
CODE_COMMENT("atomic load from memory");
......@@ -4647,7 +4650,7 @@ class LiftoffCompiler {
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
LiftoffRegList pinned = {index};
LiftoffRegList pinned{index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
......@@ -4676,7 +4679,7 @@ class LiftoffCompiler {
#else
ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
......@@ -4727,7 +4730,7 @@ class LiftoffCompiler {
BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
{}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = {index_reg};
LiftoffRegList pinned{index_reg};
AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
pinned);
......@@ -4774,7 +4777,7 @@ class LiftoffCompiler {
Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = {index_reg};
LiftoffRegList pinned{index_reg};
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
......@@ -4982,7 +4985,7 @@ class LiftoffCompiler {
const Value&, const Value&) {
Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(__ PopToRegister());
LiftoffRegister size = pinned.set(__ PopToRegister(pinned));
LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst =
PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned);
......@@ -5303,10 +5306,10 @@ class LiftoffCompiler {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
ValueKind field_kind = imm.struct_type->field(i).kind();
LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
reg_class_for(field_kind), pinned));
LiftoffRegister value = pinned.set(
initial_values_on_stack
? __ PopToRegister(pinned)
: __ GetUnusedRegister(reg_class_for(field_kind), pinned));
if (!initial_values_on_stack) {
if (!CheckSupportedType(decoder, field_kind, "default value")) return;
SetDefaultValue(value, field_kind, pinned);
......@@ -5395,7 +5398,7 @@ class LiftoffCompiler {
}
LiftoffRegister obj(kReturnRegister0);
LiftoffRegList pinned = {obj};
LiftoffRegList pinned{obj};
LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(elem_kind), pinned));
......@@ -5550,7 +5553,7 @@ class LiftoffCompiler {
LiftoffRegister array(kReturnRegister0);
if (!CheckSupportedType(decoder, elem_kind, "array.init")) return;
for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
LiftoffRegList pinned = {array};
LiftoffRegList pinned{array};
LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
LiftoffRegister offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
......@@ -6486,7 +6489,7 @@ class LiftoffCompiler {
// Pop the index. We'll modify the register's contents later.
Register index = __ PopToModifiableRegister().gp();
LiftoffRegList pinned = {index};
LiftoffRegList pinned{index};
// Get three temporary registers.
Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......
......@@ -354,7 +354,7 @@ class LiftoffRegList {
typename = std::enable_if_t<std::conjunction_v<std::disjunction<
std::is_same<Register, Regs>, std::is_same<DoubleRegister, Regs>,
std::is_same<LiftoffRegister, Regs>>...>>>
constexpr LiftoffRegList(Regs... regs) {
constexpr explicit LiftoffRegList(Regs... regs) {
(..., set(regs));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment