Commit 79e35416 authored by plind44@gmail.com's avatar plind44@gmail.com

MIPS: Fixes for patch sites if long branches are emitted.

This resolves many mjsunit failures if long branches are forced to be emitted.

TEST=
BUG=
R=plind44@gmail.com

Review URL: https://codereview.chromium.org/153983002

Patch from Dusan Milosavljevic <Dusan.Milosavljevic@rt-rk.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19126 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent bae7a2d8
......@@ -1023,11 +1023,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Move(double_scratch1, temp3, temp1);
}
__ mul_d(result, result, double_scratch1);
__ Branch(&done);
__ BranchShort(&done);
__ bind(&zero);
__ Move(result, kDoubleRegZero);
__ Branch(&done);
__ BranchShort(&done);
__ bind(&infinity);
__ ldc1(result, ExpConstant(2, temp3));
......
......@@ -239,13 +239,13 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ Branch(&pop_loop_header);
__ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
__ addiu(a3, a3, sizeof(uint32_t));
__ bind(&pop_loop_header);
__ Branch(&pop_loop, ne, a2, Operand(sp));
__ BranchShort(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
......@@ -280,11 +280,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
__ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(t0, t0, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, t0, Operand(a1));
__ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
......
......@@ -84,7 +84,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Always taken before patched.
__ Branch(target, eq, at, Operand(zero_reg));
__ BranchShort(target, eq, at, Operand(zero_reg));
}
// When initially emitting this ensure that a jump is never generated to skip
......@@ -95,7 +95,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Never taken before patched.
__ Branch(target, ne, at, Operand(zero_reg));
__ BranchShort(target, ne, at, Operand(zero_reg));
}
void EmitPatchInfo() {
......@@ -2347,13 +2347,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// recording binary operation stub, see
switch (op) {
case Token::SAR:
__ Branch(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(right, left, scratch1);
__ And(v0, right, Operand(~kSmiTagMask));
break;
case Token::SHL: {
__ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ sllv(scratch1, scratch1, scratch2);
......@@ -2363,7 +2361,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::SHR: {
__ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ srlv(scratch1, scratch1, scratch2);
......@@ -3055,8 +3052,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
__ And(at, a1, Operand(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ And(at, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
......
......@@ -2613,10 +2613,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
__ Branch(&cache_miss, ne, map, Operand(at));
__ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
// with true or false. The distance from map check has to be constant.
__ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
......@@ -3755,6 +3755,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
......
......@@ -1563,19 +1563,27 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
} else {
Jr(L, bdslot);
}
}
} else {
if (is_trampoline_emitted()) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
} else {
Jr(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
......
......@@ -169,6 +169,7 @@ class MacroAssembler: public Assembler {
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
DECLARE_BRANCH_PROTOTYPES(BranchShort)
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
......@@ -1570,14 +1571,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
int num_reg_arguments,
int num_double_arguments);
void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
void BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment