Commit a9b01bb2 authored by paul.lind@imgtec.com's avatar paul.lind@imgtec.com

MIPS: Minor fixes and additions needed for Turbofan.

These small changes can be landed indendently to avoid
cluttering up the initial turbofan CL.

BUG=
R=balazs.kilvady@imgtec.com

Review URL: https://codereview.chromium.org/602603005

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24193 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a421506f
......@@ -45,11 +45,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects s0 to contain the number of arguments
// JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
__ Addu(s0, a0, num_extra_args + 1);
__ sll(s1, s0, kPointerSizeLog2);
__ Subu(s1, s1, kPointerSize);
__ Addu(a0, a0, num_extra_args + 1);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
......
......@@ -1099,22 +1099,18 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// s0: number of arguments including receiver
// s1: size of arguments excluding receiver
// s2: pointer to builtin function
// a0: number of arguments including receiver
// a1: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
// The reason for this is that these arguments would need to be saved anyway
// so it's faster to set them up directly.
// See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
// Compute the argv pointer in a callee-saved register.
__ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
__ Subu(s1, s1, kPointerSize);
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
......@@ -1126,7 +1122,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Prepare arguments for C routine.
// a0 = argc
__ mov(a0, s0);
__ mov(s0, a0);
__ mov(s2, a1);
// a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
......@@ -1417,8 +1414,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// ReturnTrueFalse is only implemented for inlined call sites.
DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = a0; // Object (lhs).
......@@ -1443,7 +1438,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
__ Branch(&miss, ne, function, Operand(at));
......@@ -1502,6 +1497,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
}
} else {
// Patch the call site to return true.
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
......@@ -1520,6 +1518,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
}
} else {
// Patch the call site to return false.
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
......@@ -1547,19 +1548,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ne,
scratch,
Operand(isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
} else {
__ li(v0, Operand(Smi::FromInt(1)));
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
__ li(v0, Operand(Smi::FromInt(1)));
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
} else {
__ li(v0, Operand(Smi::FromInt(1)));
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
__ li(v0, Operand(Smi::FromInt(1)));
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
} else {
__ li(v0, Operand(Smi::FromInt(1)));
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
......
......@@ -102,9 +102,8 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(s0.code(), params);
output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
output_frame->SetRegister(s2.code(), handler);
output_frame->SetRegister(a0.code(), params);
output_frame->SetRegister(a1.code(), handler);
}
......
......@@ -790,6 +790,28 @@ void MacroAssembler::Div(Register rem, Register res,
}
void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, rt.rm());
mflo(res);
} else {
div(res, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, at);
mflo(res);
} else {
div(res, rs, at);
}
}
}
void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
......@@ -812,6 +834,28 @@ void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
}
void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, rt.rm());
mfhi(rd);
} else {
modu(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, at);
mfhi(rd);
} else {
modu(rd, rs, at);
}
}
}
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
......@@ -824,6 +868,28 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
}
void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, rt.rm());
mflo(res);
} else {
divu(res, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, at);
mflo(res);
} else {
divu(res, rs, at);
}
}
}
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
......@@ -1904,7 +1970,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
bgtz(rs, offset);
bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
......@@ -1912,7 +1978,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
bgez(rs, offset);
b(offset);
} else {
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
......@@ -1929,7 +1995,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (r2.is(zero_reg)) {
b(offset);
beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
......@@ -2011,7 +2077,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (rt.imm32_ == 0) {
bgtz(rs, offset);
bne(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
......@@ -2021,7 +2087,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (rt.imm32_ == 0) {
bgez(rs, offset);
b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
beq(scratch, zero_reg, offset);
......@@ -2048,7 +2114,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (rt.imm32_ == 0) {
b(offset);
beq(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
......@@ -2150,7 +2216,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bgtz(rs, offset);
bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
......@@ -2160,7 +2226,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
b(offset);
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
......@@ -2180,7 +2246,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Uless_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
b(offset);
beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
......@@ -2292,7 +2358,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
......@@ -4477,8 +4543,34 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
}
void MacroAssembler::AdduAndCheckForOverflow(Register dst,
Register left,
void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
const Operand& right,
Register overflow_dst,
Register scratch) {
if (right.is_reg()) {
AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
} else {
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
addiu(dst, left, right.immediate()); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
// Load right since xori takes uint16 as immediate.
addiu(t9, zero_reg, right.immediate());
xor_(overflow_dst, dst, t9);
and_(overflow_dst, overflow_dst, scratch);
} else {
addiu(dst, left, right.immediate());
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
addiu(t9, zero_reg, right.immediate());
xor_(scratch, dst, t9);
and_(overflow_dst, scratch, overflow_dst);
}
}
}
void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
......@@ -4519,8 +4611,34 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
}
void MacroAssembler::SubuAndCheckForOverflow(Register dst,
Register left,
void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right,
Register overflow_dst,
Register scratch) {
if (right.is_reg()) {
SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
} else {
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
addiu(dst, left, -(right.immediate())); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
// Load right since xori takes uint16 as immediate.
addiu(t9, zero_reg, right.immediate());
xor_(scratch, scratch, t9); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
addiu(dst, left, -(right.immediate()));
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
addiu(t9, zero_reg, right.immediate());
xor_(scratch, left, t9);
and_(overflow_dst, scratch, overflow_dst);
}
}
}
void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
......
......@@ -587,7 +587,10 @@ class MacroAssembler: public Assembler {
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION(Div);
DEFINE_INSTRUCTION(Divu);
DEFINE_INSTRUCTION(Mod);
DEFINE_INSTRUCTION(Modu);
DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
......@@ -1142,12 +1145,20 @@ class MacroAssembler: public Assembler {
Register overflow_dst,
Register scratch = at);
void AdduAndCheckForOverflow(Register dst, Register left,
const Operand& right, Register overflow_dst,
Register scratch = at);
void SubuAndCheckForOverflow(Register dst,
Register left,
Register right,
Register overflow_dst,
Register scratch = at);
void SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right, Register overflow_dst,
Register scratch = at);
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
......@@ -1172,13 +1183,10 @@ class MacroAssembler: public Assembler {
// Runtime calls.
// See comments at the beginning of CEntryStub::Generate.
inline void PrepareCEntryArgs(int num_args) {
li(s0, num_args);
li(s1, (num_args - 1) * kPointerSize);
}
inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
inline void PrepareCEntryFunction(const ExternalReference& ref) {
li(s2, Operand(ref));
li(a1, Operand(ref));
}
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment