Commit c37f439b authored by dusan.milosavljevic's avatar dusan.milosavljevic Committed by Commit bot

MIPS: Fix long branch mode and FPU branches.

- Add long branche mode for FPU branches.
- Fix FPU branches for unordered conditions.
- Provide FPU [un]ordered condition negation schema.

TEST=mjsunit/miror-objects, constant-folding-2, external-array
BUG=

Review URL: https://codereview.chromium.org/1120753010

Cr-Commit-Position: refs/heads/master@{#28241}
parent 2a86d26f
...@@ -555,9 +555,9 @@ enum SecondaryField { ...@@ -555,9 +555,9 @@ enum SecondaryField {
// ----- Emulated conditions. // ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions. // On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons. // The 'U' prefix is used to specify unsigned comparisons.
// Oppposite conditions must be paired as odd/even numbers // Opposite conditions must be paired as odd/even numbers
// because 'NegateCondition' function flips LSB to negate condition. // because 'NegateCondition' function flips LSB to negate condition.
enum Condition { enum Condition {
// Any value < 0 is considered no_condition. // Any value < 0 is considered no_condition.
...@@ -566,10 +566,10 @@ enum Condition { ...@@ -566,10 +566,10 @@ enum Condition {
no_overflow = 1, no_overflow = 1,
Uless = 2, Uless = 2,
Ugreater_equal = 3, Ugreater_equal = 3,
equal = 4, Uless_equal = 4,
not_equal = 5, Ugreater = 5,
Uless_equal = 6, equal = 6,
Ugreater = 7, not_equal = 7, // Unordered or Not Equal.
negative = 8, negative = 8,
positive = 9, positive = 9,
parity_even = 10, parity_even = 10,
...@@ -579,7 +579,7 @@ enum Condition { ...@@ -579,7 +579,7 @@ enum Condition {
less_equal = 14, less_equal = 14,
greater = 15, greater = 15,
ueq = 16, // Unordered or Equal. ueq = 16, // Unordered or Equal.
nue = 17, // Not (Unordered or Equal). ogl = 17, // Ordered and Not Equal.
cc_always = 18, cc_always = 18,
// Aliases. // Aliases.
...@@ -603,6 +603,10 @@ enum Condition { ...@@ -603,6 +603,10 @@ enum Condition {
hs = Ugreater_equal, hs = Ugreater_equal,
lo = Uless, lo = Uless,
al = cc_always, al = cc_always,
ult = Uless,
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
cc_default = kNoCondition cc_default = kNoCondition
}; };
...@@ -617,6 +621,39 @@ inline Condition NegateCondition(Condition cc) { ...@@ -617,6 +621,39 @@ inline Condition NegateCondition(Condition cc) {
} }
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
case ult:
return ge;
case ugt:
return le;
case uge:
return lt;
case ule:
return gt;
case lt:
return uge;
case gt:
return ule;
case ge:
return ult;
case le:
return ugt;
case eq:
return ne;
case ne:
return eq;
case ueq:
return ogl;
case ogl:
return ueq;
default:
return cc;
}
}
// Commute a condition such that {a cond b == b cond' a}. // Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) { inline Condition CommuteCondition(Condition cc) {
switch (cc) { switch (cc) {
......
...@@ -268,12 +268,12 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -268,12 +268,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset. __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, t0, a1); // a1 = one past the last FrameDescription**. __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header); __ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop); __ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index. // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(t0, 0)); // output_[ix] __ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header); __ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop); __ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t))); __ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3)); __ Addu(t2, a2, Operand(a3));
...@@ -343,7 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -343,7 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start; Label start;
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &done); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(at, i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
...@@ -361,20 +361,20 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -361,20 +361,20 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start; Label start;
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, - i); // In the delay slot. __ li(at, - i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
} }
// Entry with id == kMaxEntriesBranchReach - 1. // Entry with id == kMaxEntriesBranchReach - 1.
__ bind(&trampoline_jump); __ bind(&trampoline_jump);
__ Branch(USE_DELAY_SLOT, &done_special); __ BranchShort(USE_DELAY_SLOT, &done_special);
__ li(at, -1); __ li(at, -1);
for (int i = kMaxEntriesBranchReach ; i < count(); i++) { for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
Label start; Label start;
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &done); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(at, i); // In the delay slot.
} }
......
...@@ -2104,7 +2104,7 @@ void LCodeGen::EmitBranchF(InstrType instr, ...@@ -2104,7 +2104,7 @@ void LCodeGen::EmitBranchF(InstrType instr,
EmitGoto(left_block); EmitGoto(left_block);
} else if (left_block == next_block) { } else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
NegateCondition(condition), src1, src2); NegateFpuCondition(condition), src1, src2);
} else if (right_block == next_block) { } else if (right_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
condition, src1, src2); condition, src1, src2);
...@@ -2152,7 +2152,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2152,7 +2152,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value()); DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false. // Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, reg, kDoubleRegZero); EmitBranchF(instr, ogl, reg, kDoubleRegZero);
} else { } else {
DCHECK(r.IsTagged()); DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value()); Register reg = ToRegister(instr->value());
...@@ -2172,7 +2172,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2172,7 +2172,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DoubleRegister dbl_scratch = double_scratch0(); DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false. // Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) { } else if (type.IsString()) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
__ lw(at, FieldMemOperand(reg, String::kLengthOffset)); __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
......
...@@ -1430,33 +1430,80 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) { ...@@ -1430,33 +1430,80 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cc, FPURegister cmp1, Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) { FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this); {
if (cc == al) { BlockTrampolinePoolScope block_trampoline_pool(this);
Branch(bd, target); if (cond == al) {
return; Branch(bd, target);
} return;
}
if (IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r6)) {
sizeField = sizeField == D ? L : W; sizeField = sizeField == D ? L : W;
} }
DCHECK(nan || target); DCHECK(nan || target);
// Check for unordered (NaN) cases. // Check for unordered (NaN) cases.
if (nan) { if (nan) {
if (!IsMipsArchVariant(kMips32r6)) { bool long_branch =
c(UN, D, cmp1, cmp2); nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
bc1t(nan); if (!IsMipsArchVariant(kMips32r6)) {
} else { if (long_branch) {
// Use kDoubleCompareReg for comparison result. It has to be unavailable Label skip;
// to lithium register allocator. c(UN, D, cmp1, cmp2);
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg)); bc1f(&skip);
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2); nop();
bc1nez(nan, kDoubleCompareReg); Jr(nan, bd);
bind(&skip);
} else {
c(UN, D, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
}
}
} else {
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium register allocator.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
Jr(nan, bd);
bind(&skip);
} else {
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
}
}
}
}
if (target) {
bool long_branch =
target->is_bound() ? is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
Jr(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
}
} }
} }
}
void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (!IsMipsArchVariant(kMips32r6)) { if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) { if (target) {
// Here NaN cases were either handled by this function or are assumed to // Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller. // have been handled by the caller.
...@@ -1465,18 +1512,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1465,18 +1512,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(OLT, sizeField, cmp1, cmp2); c(OLT, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ult:
c(ULT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt: case gt:
c(ULE, sizeField, cmp1, cmp2); c(ULE, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case ugt:
c(OLE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge: case ge:
c(ULT, sizeField, cmp1, cmp2); c(ULT, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case uge:
c(OLT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le: case le:
c(OLE, sizeField, cmp1, cmp2); c(OLE, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ule:
c(ULE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq: case eq:
c(EQ, sizeField, cmp1, cmp2); c(EQ, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
...@@ -1485,11 +1548,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1485,11 +1548,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(UEQ, sizeField, cmp1, cmp2); c(UEQ, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ne: case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2); c(EQ, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case nue: case ogl:
c(UEQ, sizeField, cmp1, cmp2); c(UEQ, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
...@@ -1498,6 +1561,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1498,6 +1561,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
} }
} }
} else { } else {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) { if (target) {
// Here NaN cases were either handled by this function or are assumed to // Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller. // have been handled by the caller.
...@@ -1510,18 +1574,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1510,18 +1574,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg); bc1nez(target, kDoubleCompareReg);
break; break;
case ult:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case gt: case gt:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg); bc1eqz(target, kDoubleCompareReg);
break; break;
case ugt:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ge: case ge:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg); bc1eqz(target, kDoubleCompareReg);
break; break;
case uge:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case le: case le:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg); bc1nez(target, kDoubleCompareReg);
break; break;
case ule:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case eq: case eq:
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg); bc1nez(target, kDoubleCompareReg);
...@@ -1534,7 +1614,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1534,7 +1614,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg); bc1eqz(target, kDoubleCompareReg);
break; break;
case nue: case ogl:
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2); cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg); bc1eqz(target, kDoubleCompareReg);
break; break;
...@@ -1543,7 +1623,6 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1543,7 +1623,6 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
} }
} }
} }
if (bd == PROTECT) { if (bd == PROTECT) {
nop(); nop();
} }
......
...@@ -1641,6 +1641,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT ...@@ -1641,6 +1641,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Condition cc, FPURegister cmp1, FPURegister cmp2, Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT); BranchDelaySlot bd = PROTECT);
void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
......
...@@ -580,10 +580,10 @@ enum Condition { ...@@ -580,10 +580,10 @@ enum Condition {
no_overflow = 1, no_overflow = 1,
Uless = 2, Uless = 2,
Ugreater_equal = 3, Ugreater_equal = 3,
equal = 4, Uless_equal = 4,
not_equal = 5, Ugreater = 5,
Uless_equal = 6, equal = 6,
Ugreater = 7, not_equal = 7, // Unordered or Not Equal.
negative = 8, negative = 8,
positive = 9, positive = 9,
parity_even = 10, parity_even = 10,
...@@ -593,7 +593,7 @@ enum Condition { ...@@ -593,7 +593,7 @@ enum Condition {
less_equal = 14, less_equal = 14,
greater = 15, greater = 15,
ueq = 16, // Unordered or Equal. ueq = 16, // Unordered or Equal.
nue = 17, // Not (Unordered or Equal). ogl = 17, // Ordered and Not Equal.
cc_always = 18, cc_always = 18,
// Aliases. // Aliases.
...@@ -617,6 +617,10 @@ enum Condition { ...@@ -617,6 +617,10 @@ enum Condition {
hs = Ugreater_equal, hs = Ugreater_equal,
lo = Uless, lo = Uless,
al = cc_always, al = cc_always,
ult = Uless,
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
cc_default = kNoCondition cc_default = kNoCondition
}; };
...@@ -631,6 +635,39 @@ inline Condition NegateCondition(Condition cc) { ...@@ -631,6 +635,39 @@ inline Condition NegateCondition(Condition cc) {
} }
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
case ult:
return ge;
case ugt:
return le;
case uge:
return lt;
case ule:
return gt;
case lt:
return uge;
case gt:
return ule;
case ge:
return ult;
case le:
return ugt;
case eq:
return ne;
case ne:
return eq;
case ueq:
return ogl;
case ogl:
return ueq;
default:
return cc;
}
}
// Commute a condition such that {a cond b == b cond' a}. // Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) { inline Condition CommuteCondition(Condition cc) {
switch (cc) { switch (cc) {
......
...@@ -272,12 +272,12 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -272,12 +272,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. __ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ dsll(a1, a1, kPointerSizeLog2); // Count to offset. __ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
__ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**. __ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header); __ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop); __ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index. // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ ld(a2, MemOperand(a4, 0)); // output_[ix] __ ld(a2, MemOperand(a4, 0)); // output_[ix]
__ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset())); __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header); __ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop); __ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t))); __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3)); __ Daddu(a6, a2, Operand(a3));
...@@ -347,7 +347,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -347,7 +347,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start; Label start;
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &done); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(at, i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
...@@ -365,13 +365,13 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -365,13 +365,13 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start; Label start;
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
__ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, -i); // In the delay slot. __ li(at, -i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
} }
// Entry with id == kMaxEntriesBranchReach - 1. // Entry with id == kMaxEntriesBranchReach - 1.
__ bind(&trampoline_jump); __ bind(&trampoline_jump);
__ Branch(USE_DELAY_SLOT, &done_special); __ BranchShort(USE_DELAY_SLOT, &done_special);
__ li(at, -1); __ li(at, -1);
for (int i = kMaxEntriesBranchReach; i < count(); i++) { for (int i = kMaxEntriesBranchReach; i < count(); i++) {
......
...@@ -2100,7 +2100,7 @@ void LCodeGen::EmitBranchF(InstrType instr, ...@@ -2100,7 +2100,7 @@ void LCodeGen::EmitBranchF(InstrType instr,
EmitGoto(left_block); EmitGoto(left_block);
} else if (left_block == next_block) { } else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
NegateCondition(condition), src1, src2); NegateFpuCondition(condition), src1, src2);
} else if (right_block == next_block) { } else if (right_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
condition, src1, src2); condition, src1, src2);
...@@ -2148,7 +2148,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2148,7 +2148,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value()); DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false. // Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, reg, kDoubleRegZero); EmitBranchF(instr, ogl, reg, kDoubleRegZero);
} else { } else {
DCHECK(r.IsTagged()); DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value()); Register reg = ToRegister(instr->value());
...@@ -2168,7 +2168,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2168,7 +2168,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DoubleRegister dbl_scratch = double_scratch0(); DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false. // Test the double value. Zero and NaN are false.
EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) { } else if (type.IsString()) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
__ ld(at, FieldMemOperand(reg, String::kLengthOffset)); __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
......
...@@ -1629,10 +1629,10 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, ...@@ -1629,10 +1629,10 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cc, FPURegister cmp1, Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) { FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) { if (cond == al) {
Branch(bd, target); Branch(bd, target);
return; return;
} }
...@@ -1640,22 +1640,69 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1640,22 +1640,69 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (kArchVariant == kMips64r6) { if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W; sizeField = sizeField == D ? L : W;
} }
DCHECK(nan || target); DCHECK(nan || target);
// Check for unordered (NaN) cases. // Check for unordered (NaN) cases.
if (nan) { if (nan) {
bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
if (kArchVariant != kMips64r6) { if (kArchVariant != kMips64r6) {
c(UN, D, cmp1, cmp2); if (long_branch) {
bc1t(nan); Label skip;
c(UN, D, cmp1, cmp2);
bc1f(&skip);
nop();
Jr(nan, bd);
bind(&skip);
} else {
c(UN, D, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
}
}
} else { } else {
// Use f31 for comparison result. It has to be unavailable to lithium // Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium
// register allocator. // register allocator.
DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
cmp(UN, L, f31, cmp1, cmp2); if (long_branch) {
bc1nez(nan, f31); Label skip;
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
Jr(nan, bd);
bind(&skip);
} else {
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
}
}
}
}
if (target) {
bool long_branch =
target->is_bound() ? is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
Jr(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
} }
} }
}
void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (kArchVariant != kMips64r6) { if (kArchVariant != kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) { if (target) {
// Here NaN cases were either handled by this function or are assumed to // Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller. // have been handled by the caller.
...@@ -1664,18 +1711,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1664,18 +1711,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(OLT, sizeField, cmp1, cmp2); c(OLT, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ult:
c(ULT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt: case gt:
c(ULE, sizeField, cmp1, cmp2); c(ULE, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case ugt:
c(OLE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge: case ge:
c(ULT, sizeField, cmp1, cmp2); c(ULT, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case uge:
c(OLT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le: case le:
c(OLE, sizeField, cmp1, cmp2); c(OLE, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ule:
c(ULE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq: case eq:
c(EQ, sizeField, cmp1, cmp2); c(EQ, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
...@@ -1684,11 +1747,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1684,11 +1747,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(UEQ, sizeField, cmp1, cmp2); c(UEQ, sizeField, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ne: case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2); c(EQ, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case nue: case ogl:
c(UEQ, sizeField, cmp1, cmp2); c(UEQ, sizeField, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
...@@ -1697,44 +1760,62 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, ...@@ -1697,44 +1760,62 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
} }
} }
} else { } else {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) { if (target) {
// Here NaN cases were either handled by this function or are assumed to // Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller. // have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart. // Unsigned conditions are treated as their signed counterpart.
// Use f31 for comparison result, it is valid in fp64 (FR = 1) mode. // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); // 1) mode.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
switch (cc) { switch (cc) {
case lt: case lt:
cmp(OLT, sizeField, f31, cmp1, cmp2); cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, f31); bc1nez(target, kDoubleCompareReg);
break;
case ult:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break; break;
case gt: case gt:
cmp(ULE, sizeField, f31, cmp1, cmp2); cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, f31); bc1eqz(target, kDoubleCompareReg);
break;
case ugt:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break; break;
case ge: case ge:
cmp(ULT, sizeField, f31, cmp1, cmp2); cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, f31); bc1eqz(target, kDoubleCompareReg);
break;
case uge:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break; break;
case le: case le:
cmp(OLE, sizeField, f31, cmp1, cmp2); cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, f31); bc1nez(target, kDoubleCompareReg);
break;
case ule:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break; break;
case eq: case eq:
cmp(EQ, sizeField, f31, cmp1, cmp2); cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, f31); bc1nez(target, kDoubleCompareReg);
break; break;
case ueq: case ueq:
cmp(UEQ, sizeField, f31, cmp1, cmp2); cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, f31); bc1nez(target, kDoubleCompareReg);
break; break;
case ne: case ne:
cmp(EQ, sizeField, f31, cmp1, cmp2); cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, f31); bc1eqz(target, kDoubleCompareReg);
break; break;
case nue: case ogl:
cmp(UEQ, sizeField, f31, cmp1, cmp2); cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, f31); bc1eqz(target, kDoubleCompareReg);
break; break;
default: default:
CHECK(0); CHECK(0);
......
...@@ -1710,6 +1710,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT ...@@ -1710,6 +1710,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Condition cc, FPURegister cmp1, FPURegister cmp2, Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT); BranchDelaySlot bd = PROTECT);
void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment