Commit c579bfe6 authored by danno@chromium.org's avatar danno@chromium.org

MIPS: pre-crankshaft updates to macro-assembler and related files. (2/3)

Highlights:
- Better support for FP compares and branches (BranchF macro)
- Add EmitFPUTruncate() macro, similar to EmitVFPTruncate on Arm.
- Some improvements to long-branch mechanism for mips.
- Add ClampUint8() and ClampDoubleToUint8()

- Minor changes to ic-mips, full-codegen-mips mostly, for improved
code-patching with BinaryOpStub.
- Small changes to stack checking in full-codegen-mips and
regexp-macro-assembler-mips

BUG=
TEST=

Review URL: http://codereview.chromium.org/7888004
Patch from Paul Lind <plind44@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9307 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2517b0ef
......@@ -62,9 +62,11 @@ static unsigned GetPropertyId(Property* property) {
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
// bit immediate value is used) is the delta from the pc to the first
// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
// (raw 16 bit immediate value is used) is the delta from the pc to the first
// instruction of the patchable code.
// The marker instruction is effectively a NOP (dest is zero_reg) and will
// never be emitted by normal code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
......@@ -103,7 +105,7 @@ class JumpPatchSite BASE_EMBEDDED {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
__ andi(at, reg, delta_to_patch_site % kImm16Mask);
__ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
......@@ -315,17 +317,25 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
__ sltu(at, sp, t0);
__ beq(at, zero_reg, &ok);
// CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
__ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
......
......@@ -1574,7 +1574,8 @@ void PatchInlinedSmiCode(Address address) {
// If the instruction following the call is not a andi at, rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!Assembler::IsAndImmediate(instr)) {
if (!(Assembler::IsAndImmediate(instr) &&
Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
return;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -1253,13 +1253,14 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
// Stack is already aligned for call, so decrement by alignment
// to make room for storing the return address.
__ Subu(sp, sp, Operand(stack_alignment));
__ sw(ra, MemOperand(sp, 0));
__ mov(a0, sp);
__ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
const int return_address_offset = kCArgsSlotsSize;
__ Addu(a0, sp, return_address_offset);
__ sw(ra, MemOperand(a0, 0));
__ mov(t9, t1);
__ Call(t9);
__ lw(ra, MemOperand(sp, 0));
__ Addu(sp, sp, Operand(stack_alignment));
__ lw(ra, MemOperand(sp, return_address_offset));
__ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
__ Jump(ra);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment