Commit 7762f53b authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[arm64][cleanup] Remove CPURegister::Is and CPURegister::is

This removes {CPURegister::Is} and {CPURegister::is}, and just uses
{CPURegister::operator==} instead.

Drive-by: Use DCHECK_EQ and DCHECK_NE where possible.

R=mstarzinger@chromium.org

Bug: v8:9810
Change-Id: I03aad8b4223bd4ae37d468326a734f7a5c3c8061
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1916202Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64956}
parent 1ccd1396
......@@ -1856,7 +1856,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Bind(&no_arguments);
{
__ Mov(x0, 0);
DCHECK(receiver.Is(x1));
DCHECK_EQ(receiver, x1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
......@@ -3362,7 +3362,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
const int kLevelOffset = AddressOffset(
ExternalReference::handle_scope_level_address(isolate), next_address);
DCHECK(function_address.is(x1) || function_address.is(x2));
DCHECK(function_address == x1 || function_address == x2);
Label profiler_enabled, end_profiler_check;
__ Mov(x10, ExternalReference::is_profiling_address(isolate));
......
......@@ -270,7 +270,7 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
}
bool Operand::IsHeapObjectRequest() const {
DCHECK_IMPLIES(heap_object_request_.has_value(), reg_.Is(NoReg));
DCHECK_IMPLIES(heap_object_request_.has_value(), reg_ == NoReg);
DCHECK_IMPLIES(heap_object_request_.has_value(),
immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT ||
immediate_.rmode() == RelocInfo::CODE_TARGET);
......@@ -283,7 +283,7 @@ HeapObjectRequest Operand::heap_object_request() const {
}
bool Operand::IsImmediate() const {
return reg_.Is(NoReg) && !IsHeapObjectRequest();
return reg_ == NoReg && !IsHeapObjectRequest();
}
bool Operand::IsShiftedRegister() const {
......@@ -452,11 +452,11 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
}
bool MemOperand::IsImmediateOffset() const {
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
return (addrmode_ == Offset) && regoffset_ == NoReg;
}
bool MemOperand::IsRegisterOffset() const {
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
return (addrmode_ == Offset) && regoffset_ != NoReg;
}
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
......
......@@ -299,7 +299,7 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
DCHECK_LE(access_size_log2, 3);
// Step one: check that they share the same base, that the mode is Offset
// and that the offset is a multiple of access size.
if (!operandA.base().Is(operandB.base()) || (operandA.addrmode() != Offset) ||
if (operandA.base() != operandB.base() || (operandA.addrmode() != Offset) ||
(operandB.addrmode() != Offset) ||
((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
return kNotPair;
......@@ -752,7 +752,7 @@ void Assembler::blr(const Register& xn) {
DCHECK(xn.Is64Bits());
// The pattern 'blr xzr' is used as a guard to detect when execution falls
// through the constant pool. It should not be emitted.
DCHECK(!xn.Is(xzr));
DCHECK_NE(xn, xzr);
Emit(BLR | Rn(xn));
}
......@@ -1209,7 +1209,7 @@ void Assembler::ldpsw(const Register& rt, const Register& rt2,
void Assembler::LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op) {
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(((op & LoadStorePairLBit) == 0) || rt != rt2);
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
int offset = static_cast<int>(addr.offset());
......@@ -1222,8 +1222,8 @@ void Assembler::LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
addrmodeop = LoadStorePairOffsetFixed;
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
DCHECK(!rt2.Is(addr.base()));
DCHECK_NE(rt, addr.base());
DCHECK_NE(rt2, addr.base());
DCHECK_NE(addr.offset(), 0);
if (addr.IsPreIndex()) {
addrmodeop = LoadStorePairPreIndexFixed;
......@@ -1337,7 +1337,7 @@ void Assembler::stlr(const Register& rt, const Register& rn) {
void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
DCHECK(rs != rt && rs != rn);
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
......@@ -1365,7 +1365,7 @@ void Assembler::stlxrb(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
DCHECK(rs != rt && rs != rn);
Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
......@@ -1392,7 +1392,7 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
DCHECK(rs != rt && rs != rn);
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
......@@ -2282,7 +2282,7 @@ void Assembler::LoadStoreStructVerify(const VRegister& vt,
default:
UNREACHABLE();
}
DCHECK(!addr.regoffset().Is(NoReg) || addr.offset() == offset);
DCHECK(addr.regoffset() != NoReg || addr.offset() == offset);
}
#else
USE(vt);
......@@ -3947,7 +3947,7 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr,
ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
DCHECK_NE(rt, addr.base());
if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) {
......
......@@ -390,7 +390,7 @@ void TurboAssembler::CmovX(const Register& rd, const Register& rn,
DCHECK(!rd.IsSP());
DCHECK(rd.Is64Bits() && rn.Is64Bits());
DCHECK((cond != al) && (cond != nv));
if (!rd.is(rn)) {
if (rd != rn) {
csel(rd, rn, rd, cond);
}
}
......@@ -596,7 +596,7 @@ void TurboAssembler::Fmov(VRegister fd, VRegister fn) {
// registers. fmov(s0, s0) is not a no-op because it clears the top word of
// d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
// top of q0, but VRegister does not currently support Q registers.
if (!fd.Is(fn) || !fd.Is64Bits()) {
if (fd != fn || !fd.Is64Bits()) {
fmov(fd, fn);
}
}
......
......@@ -165,7 +165,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
PreShiftImmMode mode = rn == sp ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
if (rd.IsSP()) {
......@@ -327,7 +327,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// this case, the instruction is discarded.
//
// If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) ||
if (rd != operand.reg() ||
(rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
}
......@@ -336,7 +336,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
// Copy the result to the system stack pointer.
if (!dst.Is(rd)) {
if (dst != rd) {
DCHECK(rd.IsSP());
Assembler::mov(rd, dst);
}
......@@ -697,7 +697,7 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
if (operand.IsZero() && rd == rn && rd.Is64Bits() && rn.Is64Bits() &&
!operand.NeedsRelocation(this) && (S == LeaveFlags)) {
// The instruction would be a nop. Avoid generating useless code.
return;
......@@ -720,11 +720,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
if (rd.Is(sp)) {
if (rd == sp) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
} else if (rn.Is(sp)) {
} else if (rn == sp) {
mode = kLimitShiftForSP;
}
......@@ -910,7 +910,7 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
}
void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
DCHECK((reg == NoReg || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
B(static_cast<Condition>(type), label);
......@@ -1487,7 +1487,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
DCHECK_NE(lhs, rhs);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, rhs);
......@@ -1497,7 +1497,7 @@ void TurboAssembler::Swap(Register lhs, Register rhs) {
void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
DCHECK_NE(lhs, rhs);
UseScratchRegisterScope temps(this);
VRegister temp = VRegister::no_reg();
if (lhs.IsS()) {
......@@ -2179,8 +2179,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(function.is(x1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
DCHECK_EQ(function, x1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
// On function call, call into the debugger if necessary.
Label debug_hook, continue_after_hook;
......@@ -2235,7 +2235,7 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
DCHECK(function.is(x1));
DCHECK_EQ(function, x1);
Register expected_parameter_count = x2;
......@@ -2264,7 +2264,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
DCHECK(function.Is(x1));
DCHECK_EQ(function, x1);
// Set up the context.
LoadTaggedPointerField(cp,
......
......@@ -150,19 +150,16 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
return reg_size_ == 128;
}
bool IsNone() const { return reg_type_ == kNoRegister; }
constexpr bool Is(const CPURegister& other) const {
return Aliases(other) && (reg_size_ == other.reg_size_);
}
constexpr bool Aliases(const CPURegister& other) const {
return (reg_code_ == other.reg_code_) && (reg_type_ == other.reg_type_);
}
constexpr bool operator==(const CPURegister& other) const {
return Is(other);
return RegisterBase::operator==(other) && reg_size_ == other.reg_size_ &&
reg_type_ == other.reg_type_;
}
constexpr bool operator!=(const CPURegister& other) const {
return !(*this == other);
return !operator==(other);
}
bool IsZero() const;
......@@ -203,8 +200,6 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
bool IsSameSizeAndType(const CPURegister& other) const;
bool is(const CPURegister& other) const { return Is(other); }
protected:
int reg_size_;
RegisterType reg_type_;
......@@ -455,8 +450,8 @@ class VRegister : public CPURegister {
ASSERT_TRIVIALLY_COPYABLE(VRegister);
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
// these all compare equal. The Register and VRegister variants are provided for
// convenience.
constexpr Register NoReg = Register::no_reg();
constexpr VRegister NoVReg = VRegister::no_reg();
constexpr CPURegister NoCPUReg = CPURegister::no_reg();
......
......@@ -823,7 +823,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchLookupSwitch(instr);
break;
case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0).is(x1));
DCHECK_EQ(i.InputRegister(0), x1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
......@@ -1427,9 +1427,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float64Mod: {
// TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL);
DCHECK(d0.is(i.InputDoubleRegister(0)));
DCHECK(d1.is(i.InputDoubleRegister(1)));
DCHECK(d0.is(i.OutputDoubleRegister()));
DCHECK_EQ(d0, i.InputDoubleRegister(0));
DCHECK_EQ(d1, i.InputDoubleRegister(1));
DCHECK_EQ(d0, i.OutputDoubleRegister());
// TODO(turbofan): make sure this saves all relevant registers.
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
break;
......@@ -1565,11 +1565,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Umov(i.OutputRegister32(), i.InputFloat64Register(0).V2S(), 1);
break;
case kArm64Float64InsertLowWord32:
DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
DCHECK_EQ(i.OutputFloat64Register(), i.InputFloat64Register(0));
__ Ins(i.OutputFloat64Register().V2S(), 0, i.InputRegister32(1));
break;
case kArm64Float64InsertHighWord32:
DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
DCHECK_EQ(i.OutputFloat64Register(), i.InputFloat64Register(0));
__ Ins(i.OutputFloat64Register().V2S(), 1, i.InputRegister32(1));
break;
case kArm64Float64MoveU64:
......@@ -1833,7 +1833,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64F64x2ReplaceLane: {
VRegister dst = i.OutputSimd128Register().V2D(),
src1 = i.InputSimd128Register(0).V2D();
if (!dst.is(src1)) {
if (dst != src1) {
__ Mov(dst, src1);
}
__ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
......@@ -1890,7 +1890,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64F32x4ReplaceLane: {
VRegister dst = i.OutputSimd128Register().V4S(),
src1 = i.InputSimd128Register(0).V4S();
if (!dst.is(src1)) {
if (dst != src1) {
__ Mov(dst, src1);
}
__ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
......@@ -1952,7 +1952,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64I64x2ReplaceLane: {
VRegister dst = i.OutputSimd128Register().V2D(),
src1 = i.InputSimd128Register(0).V2D();
if (!dst.is(src1)) {
if (dst != src1) {
__ Mov(dst, src1);
}
__ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
......@@ -2076,7 +2076,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64I32x4ReplaceLane: {
VRegister dst = i.OutputSimd128Register().V4S(),
src1 = i.InputSimd128Register(0).V4S();
if (!dst.is(src1)) {
if (dst != src1) {
__ Mov(dst, src1);
}
__ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
......@@ -2153,7 +2153,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64I16x8ReplaceLane: {
VRegister dst = i.OutputSimd128Register().V8H(),
src1 = i.InputSimd128Register(0).V8H();
if (!dst.is(src1)) {
if (dst != src1) {
__ Mov(dst, src1);
}
__ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
......@@ -2189,7 +2189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst.is(src1)) {
if (dst == src1) {
__ Mov(temp, src1.V4S());
src1 = temp;
}
......@@ -2241,7 +2241,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst.is(src1)) {
if (dst == src1) {
__ Mov(temp, src1.V4S());
src1 = temp;
}
......@@ -2267,7 +2267,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64I8x16ReplaceLane: {
VRegister dst = i.OutputSimd128Register().V16B(),
src1 = i.InputSimd128Register(0).V16B();
if (!dst.is(src1)) {
if (dst != src1) {
__ Mov(dst, src1);
}
__ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
......@@ -2301,7 +2301,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst.is(src1)) {
if (dst == src1) {
__ Mov(temp, src1.V8H());
src1 = temp;
}
......@@ -2343,7 +2343,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst.is(src1)) {
if (dst == src1) {
__ Mov(temp, src1.V8H());
src1 = temp;
}
......@@ -2388,7 +2388,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64S128Select: {
VRegister dst = i.OutputSimd128Register().V16B();
DCHECK(dst.is(i.InputSimd128Register(0).V16B()));
DCHECK_EQ(dst, i.InputSimd128Register(0).V16B());
__ Bsl(dst, i.InputSimd128Register(1).V16B(),
i.InputSimd128Register(2).V16B());
break;
......@@ -2401,10 +2401,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst.is(src0)) {
if (dst == src0) {
__ Mov(temp, src0);
src0 = temp;
} else if (dst.is(src1)) {
} else if (dst == src1) {
__ Mov(temp, src1);
src1 = temp;
}
......@@ -2457,7 +2457,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Unary shuffle table is in src0, binary shuffle table is in src0, src1,
// which must be consecutive.
int64_t mask = 0;
if (src0.is(src1)) {
if (src0 == src1) {
mask = 0x0F0F0F0F;
} else {
mask = 0x1F1F1F1F;
......@@ -2471,7 +2471,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1);
if (src0.is(src1)) {
if (src0 == src1) {
__ Tbl(dst, src0, temp.V16B());
} else {
__ Tbl(dst, src0, src1, temp.V16B());
......
......@@ -1493,7 +1493,7 @@ void RegExpMacroAssemblerARM64::CheckStackLimit() {
void RegExpMacroAssemblerARM64::Push(Register source) {
DCHECK(source.Is32Bits());
DCHECK(!source.is(backtrack_stackpointer()));
DCHECK_NE(source, backtrack_stackpointer());
__ Str(source,
MemOperand(backtrack_stackpointer(),
-static_cast<int>(kWRegSize),
......@@ -1503,7 +1503,7 @@ void RegExpMacroAssemblerARM64::Push(Register source) {
void RegExpMacroAssemblerARM64::Pop(Register target) {
DCHECK(target.Is32Bits());
DCHECK(!target.is(backtrack_stackpointer()));
DCHECK_NE(target, backtrack_stackpointer());
__ Ldr(target,
MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
}
......@@ -1560,7 +1560,7 @@ void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
break;
case CACHED_LSW: {
Register cached_register = GetCachedRegister(register_index);
if (!source.Is(cached_register.W())) {
if (source != cached_register.W()) {
__ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
}
break;
......
......@@ -11818,27 +11818,27 @@ TEST(register_bit) {
// teardown.
// Simple tests.
CHECK(x0.bit() == (1ULL << 0));
CHECK(x1.bit() == (1ULL << 1));
CHECK(x10.bit() == (1ULL << 10));
CHECK_EQ(x0.bit(), 1ULL << 0);
CHECK_EQ(x1.bit(), 1ULL << 1);
CHECK_EQ(x10.bit(), 1ULL << 10);
// AAPCS64 definitions.
CHECK(fp.bit() == (1ULL << kFramePointerRegCode));
CHECK(lr.bit() == (1ULL << kLinkRegCode));
CHECK_EQ(fp.bit(), 1ULL << kFramePointerRegCode);
CHECK_EQ(lr.bit(), 1ULL << kLinkRegCode);
// Fixed (hardware) definitions.
CHECK(xzr.bit() == (1ULL << kZeroRegCode));
CHECK_EQ(xzr.bit(), 1ULL << kZeroRegCode);
// Internal ABI definitions.
CHECK(sp.bit() == (1ULL << kSPRegInternalCode));
CHECK(sp.bit() != xzr.bit());
CHECK_EQ(sp.bit(), 1ULL << kSPRegInternalCode);
CHECK_NE(sp.bit(), xzr.bit());
// xn.bit() == wn.bit() at all times, for the same n.
CHECK(x0.bit() == w0.bit());
CHECK(x1.bit() == w1.bit());
CHECK(x10.bit() == w10.bit());
CHECK(xzr.bit() == wzr.bit());
CHECK(sp.bit() == wsp.bit());
CHECK_EQ(x0.bit(), w0.bit());
CHECK_EQ(x1.bit(), w1.bit());
CHECK_EQ(x10.bit(), w10.bit());
CHECK_EQ(xzr.bit(), wzr.bit());
CHECK_EQ(sp.bit(), wsp.bit());
}
TEST(peek_poke_simple) {
......@@ -12189,7 +12189,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case 2: __ Pop(r[i], r[i+1]); break;
case 1: __ Pop(r[i]); break;
default:
CHECK(i == reg_count);
CHECK_EQ(i, reg_count);
break;
}
break;
......@@ -12338,7 +12338,7 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
case 2: __ Pop(v[i], v[i+1]); break;
case 1: __ Pop(v[i]); break;
default:
CHECK(i == reg_count);
CHECK_EQ(i, reg_count);
break;
}
break;
......@@ -12872,12 +12872,12 @@ TEST(copy_noop) {
TEST(noreg) {
// This test doesn't generate any code, but it verifies some invariants
// related to NoReg.
CHECK(NoReg.Is(NoVReg));
CHECK(NoVReg.Is(NoReg));
CHECK(NoReg.Is(NoCPUReg));
CHECK(NoCPUReg.Is(NoReg));
CHECK(NoVReg.Is(NoCPUReg));
CHECK(NoCPUReg.Is(NoVReg));
CHECK_EQ(NoReg, NoVReg);
CHECK_EQ(NoVReg, NoReg);
CHECK_EQ(NoReg, NoCPUReg);
CHECK_EQ(NoCPUReg, NoReg);
CHECK_EQ(NoVReg, NoCPUReg);
CHECK_EQ(NoCPUReg, NoVReg);
CHECK(NoReg.IsNone());
CHECK(NoVReg.IsNone());
......@@ -13497,10 +13497,10 @@ TEST(cpureglist_utils_x) {
CHECK(!test.IsEmpty());
CHECK(test.type() == x0.type());
CHECK_EQ(test.type(), x0.type());
CHECK(test.PopHighestIndex().Is(x3));
CHECK(test.PopLowestIndex().Is(x0));
CHECK_EQ(test.PopHighestIndex(), x3);
CHECK_EQ(test.PopLowestIndex(), x0);
CHECK(test.IncludesAliasOf(x1));
CHECK(test.IncludesAliasOf(x2));
......@@ -13511,8 +13511,8 @@ TEST(cpureglist_utils_x) {
CHECK(!test.IncludesAliasOf(w0));
CHECK(!test.IncludesAliasOf(w3));
CHECK(test.PopHighestIndex().Is(x2));
CHECK(test.PopLowestIndex().Is(x1));
CHECK_EQ(test.PopHighestIndex(), x2);
CHECK_EQ(test.PopLowestIndex(), x1);
CHECK(!test.IncludesAliasOf(x1));
CHECK(!test.IncludesAliasOf(x2));
......@@ -13562,10 +13562,10 @@ TEST(cpureglist_utils_w) {
CHECK(!test.IsEmpty());
CHECK(test.type() == w10.type());
CHECK_EQ(test.type(), w10.type());
CHECK(test.PopHighestIndex().Is(w13));
CHECK(test.PopLowestIndex().Is(w10));
CHECK_EQ(test.PopHighestIndex(), w13);
CHECK_EQ(test.PopLowestIndex(), w10);
CHECK(test.IncludesAliasOf(x11));
CHECK(test.IncludesAliasOf(x12));
......@@ -13576,8 +13576,8 @@ TEST(cpureglist_utils_w) {
CHECK(!test.IncludesAliasOf(w10));
CHECK(!test.IncludesAliasOf(w13));
CHECK(test.PopHighestIndex().Is(w12));
CHECK(test.PopLowestIndex().Is(w11));
CHECK_EQ(test.PopHighestIndex(), w12);
CHECK_EQ(test.PopLowestIndex(), w11);
CHECK(!test.IncludesAliasOf(x11));
CHECK(!test.IncludesAliasOf(x12));
......@@ -13628,10 +13628,10 @@ TEST(cpureglist_utils_d) {
CHECK(!test.IsEmpty());
CHECK(test.type() == d20.type());
CHECK_EQ(test.type(), d20.type());
CHECK(test.PopHighestIndex().Is(d23));
CHECK(test.PopLowestIndex().Is(d20));
CHECK_EQ(test.PopHighestIndex(), d23);
CHECK_EQ(test.PopLowestIndex(), d20);
CHECK(test.IncludesAliasOf(d21));
CHECK(test.IncludesAliasOf(d22));
......@@ -13642,8 +13642,8 @@ TEST(cpureglist_utils_d) {
CHECK(!test.IncludesAliasOf(s20));
CHECK(!test.IncludesAliasOf(s23));
CHECK(test.PopHighestIndex().Is(d22));
CHECK(test.PopLowestIndex().Is(d21));
CHECK_EQ(test.PopHighestIndex(), d22);
CHECK_EQ(test.PopLowestIndex(), d21);
CHECK(!test.IncludesAliasOf(d21));
CHECK(!test.IncludesAliasOf(d22));
......@@ -14700,11 +14700,11 @@ TEST(pool_size) {
for (RelocIterator it(*code, pool_mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (RelocInfo::IsConstPool(info->rmode())) {
CHECK(info->data() == constant_pool_size);
CHECK_EQ(info->data(), constant_pool_size);
++pool_count;
}
if (RelocInfo::IsVeneerPool(info->rmode())) {
CHECK(info->data() == veneer_pool_size);
CHECK_EQ(info->data(), veneer_pool_size);
++pool_count;
}
}
......
......@@ -289,7 +289,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
if (reg_list & (1ULL << i)) {
Register xn = Register::Create(i, kXRegSizeInBits);
// We should never write into sp here.
CHECK(!xn.Is(sp));
CHECK_NE(xn, sp);
if (!xn.IsZero()) {
if (!first.is_valid()) {
// This is the first register we've hit, so construct the literal.
......
......@@ -121,7 +121,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
Register dst_slot = test_case.dst_slot;
Operand offset_operand(0);
if (test_case.offset_register.Is(no_reg)) {
if (test_case.offset_register == no_reg) {
offset_operand = Operand(offset);
} else {
__ Mov(test_case.offset_register, Operand(offset));
......@@ -131,7 +131,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
std::stringstream comment;
comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
<< dst_object << ", " << dst_slot << ", " << src_object << ", ";
if (test_case.offset_register.Is(no_reg)) {
if (test_case.offset_register == no_reg) {
comment << "#" << offset;
} else {
comment << test_case.offset_register;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment