Commit c37f439b authored by dusan.milosavljevic's avatar dusan.milosavljevic Committed by Commit bot

MIPS: Fix long branch mode and FPU branches.

- Add long branche mode for FPU branches.
- Fix FPU branches for unordered conditions.
- Provide FPU [un]ordered condition negation schema.

TEST=mjsunit/miror-objects, constant-folding-2, external-array
BUG=

Review URL: https://codereview.chromium.org/1120753010

Cr-Commit-Position: refs/heads/master@{#28241}
parent 2a86d26f
......@@ -555,9 +555,9 @@ enum SecondaryField {
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions.
// On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons.
// Oppposite conditions must be paired as odd/even numbers
// Opposite conditions must be paired as odd/even numbers
// because 'NegateCondition' function flips LSB to negate condition.
enum Condition {
// Any value < 0 is considered no_condition.
......@@ -566,10 +566,10 @@ enum Condition {
no_overflow = 1,
Uless = 2,
Ugreater_equal = 3,
equal = 4,
not_equal = 5,
Uless_equal = 6,
Ugreater = 7,
Uless_equal = 4,
Ugreater = 5,
equal = 6,
not_equal = 7, // Unordered or Not Equal.
negative = 8,
positive = 9,
parity_even = 10,
......@@ -579,7 +579,7 @@ enum Condition {
less_equal = 14,
greater = 15,
ueq = 16, // Unordered or Equal.
nue = 17, // Not (Unordered or Equal).
ogl = 17, // Ordered and Not Equal.
cc_always = 18,
// Aliases.
......@@ -603,6 +603,10 @@ enum Condition {
hs = Ugreater_equal,
lo = Uless,
al = cc_always,
ult = Uless,
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
cc_default = kNoCondition
};
......@@ -617,6 +621,39 @@ inline Condition NegateCondition(Condition cc) {
}
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
case ult:
return ge;
case ugt:
return le;
case uge:
return lt;
case ule:
return gt;
case lt:
return uge;
case gt:
return ule;
case ge:
return ult;
case le:
return ugt;
case eq:
return ne;
case ne:
return eq;
case ueq:
return ogl;
case ogl:
return ueq;
default:
return cc;
}
}
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
switch (cc) {
......
......@@ -268,12 +268,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
......@@ -343,7 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -361,20 +361,20 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, - i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
// Entry with id == kMaxEntriesBranchReach - 1.
__ bind(&trampoline_jump);
__ Branch(USE_DELAY_SLOT, &done_special);
__ BranchShort(USE_DELAY_SLOT, &done_special);
__ li(at, -1);
for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
}
......
......@@ -2104,7 +2104,7 @@ void LCodeGen::EmitBranchF(InstrType instr,
EmitGoto(left_block);
} else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
NegateCondition(condition), src1, src2);
NegateFpuCondition(condition), src1, src2);
} else if (right_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
condition, src1, src2);
......@@ -2152,7 +2152,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, reg, kDoubleRegZero);
EmitBranchF(instr, ogl, reg, kDoubleRegZero);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
......@@ -2172,7 +2172,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
......
......@@ -1430,33 +1430,80 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cc, FPURegister cmp1,
Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
Branch(bd, target);
return;
}
{
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == al) {
Branch(bd, target);
return;
}
if (IsMipsArchVariant(kMips32r6)) {
sizeField = sizeField == D ? L : W;
}
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
if (!IsMipsArchVariant(kMips32r6)) {
c(UN, D, cmp1, cmp2);
bc1t(nan);
} else {
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium register allocator.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (IsMipsArchVariant(kMips32r6)) {
sizeField = sizeField == D ? L : W;
}
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
bool long_branch =
nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
if (!IsMipsArchVariant(kMips32r6)) {
if (long_branch) {
Label skip;
c(UN, D, cmp1, cmp2);
bc1f(&skip);
nop();
Jr(nan, bd);
bind(&skip);
} else {
c(UN, D, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
}
}
} else {
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium register allocator.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
Jr(nan, bd);
bind(&skip);
} else {
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
}
}
}
}
if (target) {
bool long_branch =
target->is_bound() ? is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
Jr(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
}
}
}
}
void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
......@@ -1465,18 +1512,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ult:
c(ULT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt:
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ugt:
c(OLE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge:
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case uge:
c(OLT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le:
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ule:
c(ULE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
......@@ -1485,11 +1548,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ne:
case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
case nue:
case ogl:
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
......@@ -1498,6 +1561,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
}
}
} else {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
......@@ -1510,18 +1574,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ult:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case gt:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ugt:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ge:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case uge:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case le:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ule:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case eq:
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
......@@ -1534,7 +1614,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case nue:
case ogl:
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
......@@ -1543,7 +1623,6 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
}
}
}
if (bd == PROTECT) {
nop();
}
......
......@@ -1641,6 +1641,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
......
......@@ -580,10 +580,10 @@ enum Condition {
no_overflow = 1,
Uless = 2,
Ugreater_equal = 3,
equal = 4,
not_equal = 5,
Uless_equal = 6,
Ugreater = 7,
Uless_equal = 4,
Ugreater = 5,
equal = 6,
not_equal = 7, // Unordered or Not Equal.
negative = 8,
positive = 9,
parity_even = 10,
......@@ -593,7 +593,7 @@ enum Condition {
less_equal = 14,
greater = 15,
ueq = 16, // Unordered or Equal.
nue = 17, // Not (Unordered or Equal).
ogl = 17, // Ordered and Not Equal.
cc_always = 18,
// Aliases.
......@@ -617,6 +617,10 @@ enum Condition {
hs = Ugreater_equal,
lo = Uless,
al = cc_always,
ult = Uless,
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
cc_default = kNoCondition
};
......@@ -631,6 +635,39 @@ inline Condition NegateCondition(Condition cc) {
}
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
case ult:
return ge;
case ugt:
return le;
case uge:
return lt;
case ule:
return gt;
case lt:
return uge;
case gt:
return ule;
case ge:
return ult;
case le:
return ugt;
case eq:
return ne;
case ne:
return eq;
case ueq:
return ogl;
case ogl:
return ueq;
default:
return cc;
}
}
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
switch (cc) {
......
......@@ -272,12 +272,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
__ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ ld(a2, MemOperand(a4, 0)); // output_[ix]
__ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
......@@ -347,7 +347,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -365,13 +365,13 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, -i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
// Entry with id == kMaxEntriesBranchReach - 1.
__ bind(&trampoline_jump);
__ Branch(USE_DELAY_SLOT, &done_special);
__ BranchShort(USE_DELAY_SLOT, &done_special);
__ li(at, -1);
for (int i = kMaxEntriesBranchReach; i < count(); i++) {
......
......@@ -2100,7 +2100,7 @@ void LCodeGen::EmitBranchF(InstrType instr,
EmitGoto(left_block);
} else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
NegateCondition(condition), src1, src2);
NegateFpuCondition(condition), src1, src2);
} else if (right_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
condition, src1, src2);
......@@ -2148,7 +2148,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, reg, kDoubleRegZero);
EmitBranchF(instr, ogl, reg, kDoubleRegZero);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
......@@ -2168,7 +2168,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
__ ld(at, FieldMemOperand(reg, String::kLengthOffset));
......
......@@ -1629,10 +1629,10 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cc, FPURegister cmp1,
Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
if (cond == al) {
Branch(bd, target);
return;
}
......@@ -1640,22 +1640,69 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
}
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
if (kArchVariant != kMips64r6) {
c(UN, D, cmp1, cmp2);
bc1t(nan);
if (long_branch) {
Label skip;
c(UN, D, cmp1, cmp2);
bc1f(&skip);
nop();
Jr(nan, bd);
bind(&skip);
} else {
c(UN, D, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
}
}
} else {
// Use f31 for comparison result. It has to be unavailable to lithium
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium
// register allocator.
DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
cmp(UN, L, f31, cmp1, cmp2);
bc1nez(nan, f31);
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
Jr(nan, bd);
bind(&skip);
} else {
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
}
}
}
}
if (target) {
bool long_branch =
target->is_bound() ? is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
Jr(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
}
}
}
void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (kArchVariant != kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
......@@ -1664,18 +1711,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ult:
c(ULT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt:
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ugt:
c(OLE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge:
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case uge:
c(OLT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le:
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ule:
c(ULE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
......@@ -1684,11 +1747,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ne:
case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
case nue:
case ogl:
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
......@@ -1697,44 +1760,62 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
}
}
} else {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
// Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
// Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
// 1) mode.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
switch (cc) {
case lt:
cmp(OLT, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ult:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case gt:
cmp(ULE, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ugt:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ge:
cmp(ULT, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case uge:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case le:
cmp(OLE, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ule:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case eq:
cmp(EQ, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ueq:
cmp(UEQ, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ne:
cmp(EQ, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case nue:
cmp(UEQ, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
case ogl:
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
default:
CHECK(0);
......
......@@ -1710,6 +1710,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment