Commit 9a882c7e authored by Ivica Bogosavljevic's avatar Ivica Bogosavljevic Committed by Commit Bot

MIPS[64]: Rewrite floating point conditional branches

Summary of work done:
* Simplify complicated CompareAndBranch instructions
* Mark BC1EQZ and BC1NEZ as compact branches in simulator
* Remove unneeded nops for BC1EQZ and BC1NEZ
* Block trampolines for delay slot of BC1T and BC1F

Change-Id: I86f3a497e6e9ac73075bb51653c2d4c6ab8454b9
Reviewed-on: https://chromium-review.googlesource.com/986260
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@mips.com>
Reviewed-by: 's avatarSreten Kovacevic <sreten.kovacevic@mips.com>
Cr-Commit-Position: refs/heads/master@{#52324}
parent 4b46522a
......@@ -331,13 +331,13 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
return OLT;
case kUnsignedGreaterThanOrEqual:
predicate = false;
return ULT;
return OLT;
case kUnsignedLessThanOrEqual:
predicate = true;
return OLE;
case kUnsignedGreaterThan:
predicate = false;
return ULE;
return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
predicate = true;
......@@ -349,6 +349,11 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
#define UNSUPPORTED_COND(opcode, condition) \
OFStream out(stdout); \
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter& i) {
......@@ -1185,9 +1190,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputInt8(2));
break;
case kMipsCmpS:
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsCmpS: {
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
bool predicate;
FPUCondition cc =
FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ CompareF32(cc, left, right);
} break;
case kMipsAddS:
// TODO(plind): add special case: combine mult & add.
__ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......@@ -1235,9 +1251,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsCmpD:
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsCmpD: {
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
bool predicate;
FPUCondition cc =
FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ CompareF64(cc, left, right);
} break;
case kMipsAddPair:
__ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
......@@ -2838,37 +2863,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
OFStream out(stdout); \
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
static bool convertCondition(FlagsCondition condition, Condition& cc) {
switch (condition) {
case kEqual:
cc = eq;
return true;
case kNotEqual:
cc = ne;
return true;
case kUnsignedLessThan:
cc = lt;
return true;
case kUnsignedGreaterThanOrEqual:
cc = uge;
return true;
case kUnsignedLessThanOrEqual:
cc = le;
return true;
case kUnsignedGreaterThan:
cc = ugt;
return true;
default:
break;
}
return false;
}
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
......@@ -2917,28 +2911,15 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kMipsCmpS) {
if (!convertCondition(condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, condition);
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMipsCmpD) {
if (!convertCondition(condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, condition);
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
} else if (instr->arch_opcode() == kMipsCmpS ||
instr->arch_opcode() == kMipsCmpD) {
bool predicate;
FlagsConditionToConditionCmpFPU(predicate, condition);
if (predicate) {
__ BranchTrueF(tlabel);
} else {
__ BranchFalseF(tlabel);
}
__ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
......@@ -3226,27 +3207,15 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
FlagsConditionToConditionCmpFPU(predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
__ li(result, Operand(1));
if (instr->arch_opcode() == kMipsCmpD) {
__ c(cc, D, left, right);
} else {
DCHECK_EQ(kMipsCmpS, instr->arch_opcode());
__ c(cc, S, left, right);
}
if (predicate) {
__ Movf(result, zero_reg);
} else {
__ Movt(result, zero_reg);
}
} else {
if (instr->arch_opcode() == kMipsCmpD) {
__ cmp(cc, L, kDoubleCompareReg, left, right);
} else {
DCHECK_EQ(kMipsCmpS, instr->arch_opcode());
__ cmp(cc, W, kDoubleCompareReg, left, right);
}
__ mfc1(result, kDoubleCompareReg);
if (predicate) {
__ And(result, result, 1); // cmp returns all 1's/0's, use only LSB.
......
......@@ -344,13 +344,13 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
return OLT;
case kUnsignedGreaterThanOrEqual:
predicate = false;
return ULT;
return OLT;
case kUnsignedLessThanOrEqual:
predicate = true;
return OLE;
case kUnsignedGreaterThan:
predicate = false;
return ULE;
return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
predicate = true;
......@@ -1331,9 +1331,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMips64CmpS:
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64CmpS: {
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
bool predicate;
FPUCondition cc =
FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ CompareF32(cc, left, right);
} break;
case kMips64AddS:
// TODO(plind): add special case: combine mult & add.
__ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......@@ -1385,9 +1396,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64CmpD:
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64CmpD: {
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
bool predicate;
FPUCondition cc =
FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ CompareF64(cc, left, right);
} break;
case kMips64AddD:
// TODO(plind): add special case: combine mult & add.
__ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......@@ -3061,31 +3081,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
static bool convertCondition(FlagsCondition condition, Condition& cc) {
switch (condition) {
case kEqual:
cc = eq;
return true;
case kNotEqual:
cc = ne;
return true;
case kUnsignedLessThan:
cc = lt;
return true;
case kUnsignedGreaterThanOrEqual:
cc = uge;
return true;
case kUnsignedLessThanOrEqual:
cc = le;
return true;
case kUnsignedGreaterThan:
cc = ugt;
return true;
default:
break;
}
return false;
}
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
......@@ -3141,28 +3136,15 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kMips64CmpS) {
if (!convertCondition(condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, condition);
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
if (!convertCondition(condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, condition);
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
} else if (instr->arch_opcode() == kMips64CmpS ||
instr->arch_opcode() == kMips64CmpD) {
bool predicate;
FlagsConditionToConditionCmpFPU(predicate, condition);
if (predicate) {
__ BranchTrueF(tlabel);
} else {
__ BranchFalseF(tlabel);
}
__ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
......@@ -3477,15 +3459,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
FlagsConditionToConditionCmpFPU(predicate, condition);
if (kArchVariant != kMips64r6) {
__ li(result, Operand(1));
if (instr->arch_opcode() == kMips64CmpD) {
__ c(cc, D, left, right);
} else {
DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
__ c(cc, S, left, right);
}
if (predicate) {
__ Movf(result, zero_reg);
} else {
......@@ -3493,11 +3469,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
} else {
if (instr->arch_opcode() == kMips64CmpD) {
__ cmp(cc, L, kDoubleCompareReg, left, right);
__ dmfc1(result, kDoubleCompareReg);
} else {
DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
__ cmp(cc, W, kDoubleCompareReg, left, right);
__ mfc1(result, kDoubleCompareReg);
}
if (predicate) {
......
......@@ -3038,14 +3038,14 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
emit(instr, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
emit(instr, CompactBranchType::COMPACT_BRANCH);
}
......@@ -3083,16 +3083,20 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bc1t(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
// ---------- MSA instructions ------------
......
......@@ -264,7 +264,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ BranchF(&done, nullptr, ne, double_result, kDoubleRegZero);
__ CompareF64(EQ, double_result, kDoubleRegZero);
__ BranchFalseShortF(&done);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
......
This diff is collapsed.
......@@ -210,30 +210,30 @@ class TurboAssembler : public Assembler {
#undef COND_TYPED_ARGS
#undef COND_ARGS
// Wrapper functions for the different cmp/branch types.
inline void BranchF32(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT) {
BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
// Floating point branches
void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
CompareF(S, cc, cmp1, cmp2);
}
inline void BranchF64(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT) {
BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
CompareIsNanF(S, cmp1, cmp2);
}
// Alternate (inline) version for better readability with USE_DELAY_SLOT.
inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchF64(target, nan, cc, cmp1, cmp2, bd);
void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
CompareF(D, cc, cmp1, cmp2);
}
inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchF32(target, nan, cc, cmp1, cmp2, bd);
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
CompareIsNanF(D, cmp1, cmp2);
}
void BranchTrueShortF(Label* target);
void BranchFalseShortF(Label* target);
void BranchTrueF(Label* target);
void BranchFalseF(Label* target);
// MSA Branches
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
......@@ -807,17 +807,6 @@ class TurboAssembler : public Assembler {
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
// Alias functions for backward compatibility.
inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
BranchF64(target, nan, cc, cmp1, cmp2, bd);
}
inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchF64(bd, target, nan, cc, cmp1, cmp2);
}
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
......@@ -841,14 +830,11 @@ class TurboAssembler : public Assembler {
void CallCFunctionHelper(Register function_base, int16_t function_offset,
int num_reg_arguments, int num_double_arguments);
// Common implementation of BranchF functions for the different formats.
void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
......
......@@ -6386,10 +6386,10 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case BC1EQZ:
BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
BranchCompactHelper(!(get_fpu_register(ft_reg) & 0x1), 16);
break;
case BC1NEZ:
BranchHelper(get_fpu_register(ft_reg) & 0x1);
BranchCompactHelper(get_fpu_register(ft_reg) & 0x1, 16);
break;
case BZ_V: {
msa_reg_t wt;
......
......@@ -3358,14 +3358,14 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
emit(instr, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
emit(instr, CompactBranchType::COMPACT_BRANCH);
}
......@@ -3404,16 +3404,20 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bc1t(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
// ---------- MSA instructions ------------
......
......@@ -263,7 +263,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ BranchF(&done, nullptr, ne, double_result, kDoubleRegZero);
__ CompareF64(EQ, double_result, kDoubleRegZero);
__ BranchFalseShortF(&done);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
......
This diff is collapsed.
......@@ -239,41 +239,30 @@ class TurboAssembler : public Assembler {
#undef COND_TYPED_ARGS
#undef COND_ARGS
// Wrapper functions for the different cmp/branch types.
inline void BranchF32(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT) {
BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
// Floating point branches
void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
CompareF(S, cc, cmp1, cmp2);
}
inline void BranchF64(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT) {
BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
CompareIsNanF(S, cmp1, cmp2);
}
// Alternate (inline) version for better readability with USE_DELAY_SLOT.
inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchF64(target, nan, cc, cmp1, cmp2, bd);
void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
CompareF(D, cc, cmp1, cmp2);
}
inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchF32(target, nan, cc, cmp1, cmp2, bd);
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
CompareIsNanF(D, cmp1, cmp2);
}
// Alias functions for backward compatibility.
inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
BranchF64(target, nan, cc, cmp1, cmp2, bd);
}
void BranchTrueShortF(Label* target);
void BranchFalseShortF(Label* target);
inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchF64(bd, target, nan, cc, cmp1, cmp2);
}
void BranchTrueF(Label* target);
void BranchFalseF(Label* target);
// MSA branches
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
......@@ -870,6 +859,12 @@ class TurboAssembler : public Assembler {
Handle<HeapObject> code_object_;
bool has_double_zero_reg_set_;
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
......@@ -880,15 +875,6 @@ class TurboAssembler : public Assembler {
bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
Register& scratch, const Operand& rt);
// Common implementation of BranchF functions for the different formats.
void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
......
......@@ -6625,10 +6625,10 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case BC1EQZ:
BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
BranchCompactHelper(!(get_fpu_register(ft_reg) & 0x1), 16);
break;
case BC1NEZ:
BranchHelper(get_fpu_register(ft_reg) & 0x1);
BranchCompactHelper((get_fpu_register(ft_reg) & 0x1), 16);
break;
case BZ_V: {
msa_reg_t wt;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment