Commit c579bfe6 authored by danno@chromium.org's avatar danno@chromium.org

MIPS: pre-crankshaft updates to macro-assembler and related files. (2/3)

Highlights:
- Better support for FP compares and branches (BranchF macro)
- Add EmitFPUTruncate() macro, similar to EmitVFPTruncate on Arm.
- Some improvements to long-branch mechanism for mips.
- Add ClampUint8() and ClampDoubleToUint8()

- Minor changes to ic-mips, full-codegen-mips mostly, for improved
code-patching with BinaryOpStub.
- Small changes to stack checking in full-codegen-mips and
regexp-macro-assembler-mips

BUG=
TEST=

Review URL: http://codereview.chromium.org/7888004
Patch from Paul Lind <plind44@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9307 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2517b0ef
......@@ -62,9 +62,11 @@ static unsigned GetPropertyId(Property* property) {
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
// bit immediate value is used) is the delta from the pc to the first
// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
// (raw 16 bit immediate value is used) is the delta from the pc to the first
// instruction of the patchable code.
// The marker instruction is effectively a NOP (dest is zero_reg) and will
// never be emitted by normal code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
......@@ -103,7 +105,7 @@ class JumpPatchSite BASE_EMBEDDED {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
__ andi(at, reg, delta_to_patch_site % kImm16Mask);
__ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
......@@ -315,17 +317,25 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
__ sltu(at, sp, t0);
__ beq(at, zero_reg, &ok);
// CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
__ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
......
......@@ -1574,7 +1574,8 @@ void PatchInlinedSmiCode(Address address) {
// If the instruction following the call is not a andi at, rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!Assembler::IsAndImmediate(instr)) {
if (!(Assembler::IsAndImmediate(instr) &&
Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
return;
}
......
......@@ -120,7 +120,9 @@ void MacroAssembler::PushSafepointRegisters() {
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ASSERT(num_unsaved >= 0);
Subu(sp, sp, Operand(num_unsaved * kPointerSize));
if (num_unsaved > 0) {
Subu(sp, sp, Operand(num_unsaved * kPointerSize));
}
MultiPush(kSafepointSavedRegisters);
}
......@@ -128,7 +130,9 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
Addu(sp, sp, Operand(num_unsaved * kPointerSize));
if (num_unsaved > 0) {
Addu(sp, sp, Operand(num_unsaved * kPointerSize));
}
}
......@@ -181,6 +185,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
......@@ -188,8 +193,6 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
......@@ -708,7 +711,7 @@ void MacroAssembler::MultiPush(RegList regs) {
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters; i > 0; i--) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
......@@ -747,7 +750,7 @@ void MacroAssembler::MultiPop(RegList regs) {
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters; i > 0; i--) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
......@@ -941,11 +944,9 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
// Test if scratch > fd.
c(OLT, D, fd, scratch);
Label simple_convert;
// If fd < 2^31 we can convert it normally.
bc1t(&simple_convert);
Label simple_convert;
BranchF(&simple_convert, NULL, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
......@@ -965,6 +966,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd) {
if (cc == al) {
Branch(bd, target);
return;
}
ASSERT(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
c(UN, D, cmp1, cmp2);
bc1t(nan);
}
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
switch (cc) {
case Uless:
case less:
c(OLT, D, cmp1, cmp2);
bc1t(target);
break;
case Ugreater:
case greater:
c(ULE, D, cmp1, cmp2);
bc1f(target);
break;
case Ugreater_equal:
case greater_equal:
c(ULT, D, cmp1, cmp2);
bc1f(target);
break;
case Uless_equal:
case less_equal:
c(OLE, D, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, D, cmp1, cmp2);
bc1t(target);
break;
case ne:
c(EQ, D, cmp1, cmp2);
bc1f(target);
break;
default:
CHECK(0);
};
}
if (bd == PROTECT) {
nop();
}
}
void MacroAssembler::Move(FPURegister dst, double imm) {
ASSERT(CpuFeatures::IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
// Handle special values first.
bool force_load = dst.is(kDoubleRegZero);
if (value.bits == zero.bits && !force_load) {
mov_d(dst, kDoubleRegZero);
} else if (value.bits == minus_zero.bits && !force_load) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
// Move the low part of the double into the lower of the corresponding FPU
// register of FPU register pair.
if (lo != 0) {
li(at, Operand(lo));
mtc1(at, dst);
} else {
mtc1(zero_reg, dst);
}
// Move the high part of the double into the higher of the corresponding FPU
// register of FPU register pair.
if (hi != 0) {
li(at, Operand(hi));
mtc1(at, dst.high());
} else {
mtc1(zero_reg, dst.high());
}
}
}
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
......@@ -1063,6 +1160,53 @@ void MacroAssembler::ConvertToInt32(Register source,
}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
FPURegister result,
DoubleRegister double_input,
Register scratch1,
Register except_flag,
CheckForInexactConversion check_inexact) {
ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
// Ingore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
cfc1(scratch1, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
round_w_d(result, double_input);
break;
case kRoundToZero:
trunc_w_d(result, double_input);
break;
case kRoundToPlusInf:
ceil_w_d(result, double_input);
break;
case kRoundToMinusInf:
floor_w_d(result, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
ctc1(scratch1, FCSR);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
}
void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
......@@ -1149,22 +1293,21 @@ void MacroAssembler::EmitECMATruncate(Register result,
FPURegister double_input,
FPURegister single_scratch,
Register scratch,
Register input_high,
Register input_low) {
Register scratch2,
Register scratch3) {
CpuFeatures::Scope scope(FPU);
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
ASSERT(!scratch2.is(result));
ASSERT(!scratch3.is(result));
ASSERT(!scratch3.is(scratch2));
ASSERT(!scratch.is(result) &&
!scratch.is(input_high) &&
!scratch.is(input_low));
!scratch.is(scratch2) &&
!scratch.is(scratch3));
ASSERT(!single_scratch.is(double_input));
Label done;
Label manual;
// Clear cumulative exception flags and save the FCSR.
Register scratch2 = input_high;
cfc1(scratch2, FCSR);
ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
......@@ -1181,6 +1324,8 @@ void MacroAssembler::EmitECMATruncate(Register result,
Branch(&done, eq, scratch, Operand(zero_reg));
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
Move(input_low, input_high, double_input);
EmitOutOfInt32RangeTruncate(result,
input_high,
......@@ -1212,15 +1357,6 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
bool MacroAssembler::UseAbsoluteCodePointers() {
if (is_trampoline_emitted()) {
return true;
} else {
return false;
}
}
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
BranchShort(offset, bdslot);
}
......@@ -1234,11 +1370,18 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
bool is_label_near = is_near(L);
if (UseAbsoluteCodePointers() && !is_label_near) {
Jr(L, bdslot);
if (L->is_bound()) {
if (is_near(L)) {
BranchShort(L, bdslot);
} else {
Jr(L, bdslot);
}
} else {
BranchShort(L, bdslot);
if (is_trampoline_emitted()) {
Jr(L, bdslot);
} else {
BranchShort(L, bdslot);
}
}
}
......@@ -1246,15 +1389,26 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
bool is_label_near = is_near(L);
if (UseAbsoluteCodePointers() && !is_label_near) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
if (L->is_bound()) {
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
if (is_trampoline_emitted()) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
}
}
......@@ -1277,8 +1431,8 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
Register scratch = at;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
// NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
// rt.
r2 = rt.rm_;
switch (cond) {
case cc_always:
......@@ -1780,11 +1934,18 @@ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
bool is_label_near = is_near(L);
if (UseAbsoluteCodePointers() && !is_label_near) {
Jalr(L, bdslot);
if (L->is_bound()) {
if (is_near(L)) {
BranchAndLinkShort(L, bdslot);
} else {
Jalr(L, bdslot);
}
} else {
BranchAndLinkShort(L, bdslot);
if (is_trampoline_emitted()) {
Jalr(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
}
}
......@@ -1792,15 +1953,26 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
bool is_label_near = is_near(L);
if (UseAbsoluteCodePointers() && !is_label_near) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jalr(L, bdslot);
bind(&skip);
if (L->is_bound()) {
if (is_near(L)) {
BranchAndLinkShort(L, cond, rs, rt, bdslot);
} else {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jalr(L, bdslot);
bind(&skip);
}
} else {
BranchAndLinkShort(L, cond, rs, rt, bdslot);
if (is_trampoline_emitted()) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jalr(L, bdslot);
bind(&skip);
} else {
BranchAndLinkShort(L, cond, rs, rt, bdslot);
}
}
}
......@@ -3180,8 +3352,10 @@ void MacroAssembler::InvokeCode(Register code,
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
SetCallKind(t1, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
SetCallKind(t1, call_kind);
......@@ -3262,7 +3436,11 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
if (V8::UseCrankshaft()) {
UNIMPLEMENTED_MIPS();
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
} else {
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
}
......@@ -3584,7 +3762,16 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
ASSERT(!left.is(right));
if (left.is(right) && dst.is(left)) {
ASSERT(!dst.is(t9));
ASSERT(!scratch.is(t9));
ASSERT(!left.is(t9));
ASSERT(!right.is(t9));
ASSERT(!overflow_dst.is(t9));
mov(t9, right);
right = t9;
}
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
......@@ -3617,10 +3804,17 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
ASSERT(!left.is(right));
ASSERT(!scratch.is(left));
ASSERT(!scratch.is(right));
// This happens with some crankshaft code. Since Subu works fine if
// left == right, let's not make that restriction here.
if (left.is(right)) {
mov(dst, zero_reg);
mov(overflow_dst, zero_reg);
return;
}
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
subu(dst, left, right); // Left is overwritten.
......@@ -4272,7 +4466,23 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
static const int kRegisterPassedArguments = 4;
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
num_reg_arguments += 2 * num_double_arguments;
// Up to four simple arguments are passed in registers a0..a3.
if (num_reg_arguments > kRegisterPassedArguments) {
stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
}
stack_passed_words += kCArgSlotCount;
return stack_passed_words;
}
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
// Up to four simple arguments are passed in registers a0..a3.
......@@ -4280,9 +4490,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// mips, even though those argument slots are not normally used.
// Remaining arguments are pushed on the stack, above (higher address than)
// the argument slots.
int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
0 : num_arguments - kRegisterPassedArguments) +
kCArgSlotCount;
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
......@@ -4297,26 +4506,53 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunctionHelper(no_reg, function, t8, num_arguments);
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(no_reg,
function,
t8,
num_reg_arguments,
num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_arguments) {
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function,
ExternalReference::the_hole_value_location(isolate()),
scratch,
num_arguments);
num_reg_arguments,
num_double_arguments);
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_arguments) {
CallCFunction(function, scratch, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_arguments) {
int num_reg_arguments,
int num_double_arguments) {
ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
......@@ -4355,9 +4591,8 @@ void MacroAssembler::CallCFunctionHelper(Register function,
Call(function);
int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
0 : num_arguments - kRegisterPassedArguments) +
kCArgSlotCount;
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
......@@ -4381,6 +4616,49 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
ASSERT(!output_reg.is(input_reg));
Label done;
li(output_reg, Operand(255));
// Normal branch: nop in delay slot.
Branch(&done, gt, input_reg, Operand(output_reg));
// Use delay slot in this branch.
Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
mov(output_reg, zero_reg); // In delay slot.
mov(output_reg, input_reg); // Value is in range 0..255.
bind(&done);
}
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
DoubleRegister input_reg,
DoubleRegister temp_double_reg) {
Label above_zero;
Label done;
Label in_bounds;
Move(temp_double_reg, 0.0);
BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
// Double value is less than zero, NaN or Inf, return 0.
mov(result_reg, zero_reg);
Branch(&done);
// Double value is >= 255, return 255.
bind(&above_zero);
Move(temp_double_reg, 255.0);
BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
li(result_reg, Operand(255));
Branch(&done);
// In 0-255 range, round and truncate.
bind(&in_bounds);
round_w_d(temp_double_reg, input_reg);
mfc1(result_reg, temp_double_reg);
bind(&done);
}
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
......
......@@ -50,15 +50,16 @@ class JumpTarget;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
// Registers aliases
// Register aliases.
// cp is assumed to be a callee saved register.
const Register lithiumScratchReg = s3; // Scratch register.
const Register lithiumScratchReg2 = s4; // Scratch register.
const Register condReg = s5; // Simulated (partial) condition code for mips.
const Register roots = s6; // Roots array pointer.
const Register cp = s7; // JavaScript context pointer.
const Register fp = s8_fp; // Alias for fp.
// Registers used for condition evaluation.
const Register condReg1 = s4;
const Register condReg2 = s5;
const DoubleRegister lithiumScratchDouble = f30; // Double scratch register.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
......@@ -90,6 +91,36 @@ enum BranchDelaySlot {
PROTECT
};
// -----------------------------------------------------------------------------
// Static helper functions.
static MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
static inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
static inline MemOperand CFunctionArgumentOperand(int index) {
ASSERT(index > kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
}
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
......@@ -138,21 +169,22 @@ class MacroAssembler: public Assembler {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Register target, COND_ARGS);
static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
static int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd) {
Ret(al, zero_reg, Operand(zero_reg), bd);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
#undef COND_ARGS
......@@ -197,6 +229,8 @@ class MacroAssembler: public Assembler {
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
}
void Move(FPURegister dst, double imm);
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
......@@ -517,6 +551,14 @@ class MacroAssembler: public Assembler {
Addu(sp, sp, 2 * kPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
lw(src3, MemOperand(sp, 0 * kPointerSize));
lw(src2, MemOperand(sp, 1 * kPointerSize));
lw(src1, MemOperand(sp, 2 * kPointerSize));
Addu(sp, sp, 3 * kPointerSize);
}
void Pop(uint32_t count = 1) {
Addu(sp, sp, Operand(count * kPointerSize));
}
......@@ -539,6 +581,9 @@ class MacroAssembler: public Assembler {
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
......@@ -547,6 +592,24 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
// Alternate (inline) version for better readability with USE_DELAY_SLOT.
inline void BranchF(BranchDelaySlot bd,
Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2) {
BranchF(target, nan, cc, cmp1, cmp2, bd);
};
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. If FPU is available double_scratch is used but not
......@@ -558,6 +621,18 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
// Truncates a double using a specific rounding mode.
// The except_flag will contain any exceptions caused by the instruction.
// If check_inexact is kDontCheckForInexactConversion, then the inexacat
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
FPURegister result,
DoubleRegister double_input,
Register scratch1,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
......@@ -605,6 +680,7 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
// -------------------------------------------------------------------------
// JavaScript invokes.
......@@ -745,6 +821,21 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
Condition IsObjectStringType(Register obj,
Register type,
Register result) {
lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
ASSERT_EQ(0, kStringTag);
return eq;
}
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
......@@ -870,6 +961,9 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Before calling a C-function from generated code, align arguments on stack
// and add space for the four mips argument slots.
// After aligning the frame, non-register arguments must be stored on the
......@@ -879,7 +973,11 @@ class MacroAssembler: public Assembler {
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_arguments, Register scratch);
void PrepareCallCFunction(int num_reg_arguments,
int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments,
Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
......@@ -892,6 +990,12 @@ class MacroAssembler: public Assembler {
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, Register scratch,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
......@@ -997,6 +1101,13 @@ class MacroAssembler: public Assembler {
Addu(reg, reg, reg);
}
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
void SmiTagCheckOverflow(Register reg, Register overflow) {
mov(overflow, reg); // Save original value.
addu(reg, reg, reg);
xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
}
void SmiTag(Register dst, Register src) {
Addu(dst, src, src);
}
......@@ -1011,10 +1122,11 @@ class MacroAssembler: public Assembler {
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
Register scratch = at) {
Register scratch = at,
BranchDelaySlot bd = PROTECT) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(smi_label, eq, scratch, Operand(zero_reg));
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
// Jump if the register contains a non-smi.
......@@ -1084,8 +1196,16 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* failure);
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
DoubleRegister input_reg,
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
......@@ -1094,7 +1214,8 @@ class MacroAssembler: public Assembler {
void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_arguments);
int num_reg_arguments,
int num_double_arguments);
void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchShort(int16_t offset, Condition cond, Register rs,
......@@ -1141,8 +1262,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool UseAbsoluteCodePointers();
bool generating_stub_;
bool allow_stub_calls_;
bool has_frame_;
......@@ -1186,34 +1305,6 @@ class CodePatcher {
};
// -----------------------------------------------------------------------------
// Static helper functions.
static MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
static inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
static inline MemOperand CFunctionArgumentOperand(int index) {
ASSERT(index > kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
......
......@@ -1253,13 +1253,14 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
// Stack is already aligned for call, so decrement by alignment
// to make room for storing the return address.
__ Subu(sp, sp, Operand(stack_alignment));
__ sw(ra, MemOperand(sp, 0));
__ mov(a0, sp);
__ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
const int return_address_offset = kCArgsSlotsSize;
__ Addu(a0, sp, return_address_offset);
__ sw(ra, MemOperand(a0, 0));
__ mov(t9, t1);
__ Call(t9);
__ lw(ra, MemOperand(sp, 0));
__ Addu(sp, sp, Operand(stack_alignment));
__ lw(ra, MemOperand(sp, return_address_offset));
__ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
__ Jump(ra);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment