Commit dabc5905 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

ARM: Add type-feedback recording for compare

Change the comparison in the full code generator to use CompareIC instead of the CompareStub to record the types. This also implements the patching in the full code generator where the inlined smi code is de-activated by default to call the CompareIC once and then activating the inlined smi code by patching the code.

Fixed the smi comparison in the ICCompareStub.

Fixed ToBooleanStub to ensure that the scratch register used is not the input. Use r9 as default as that will never be input with Crankshaft.

Implemented lithium instruction CmpTAndBranch.

Make sure that the lithium instruction CmpID have operands in registrers as the current optimized code expects that.
Review URL: http://codereview.chromium.org/6461017

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6704 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent d5851dcd
......@@ -352,6 +352,11 @@ void Assembler::CodeTargetAlign() {
}
Condition Assembler::GetCondition(Instr instr) {
return Instruction::ConditionField(instr);
}
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
......@@ -428,6 +433,20 @@ Register Assembler::GetRd(Instr instr) {
}
Register Assembler::GetRn(Instr instr) {
Register reg;
reg.code_ = Instruction::RnValue(instr);
return reg;
}
Register Assembler::GetRm(Instr instr) {
Register reg;
reg.code_ = Instruction::RmValue(instr);
return reg;
}
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
......@@ -465,6 +484,35 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
}
bool Assembler::IsCmpRegister(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
(CMP | S);
}
bool Assembler::IsCmpImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | CMP | S);
}
Register Assembler::GetCmpImmediateRegister(Instr instr) {
ASSERT(IsCmpImmediate(instr));
return GetRn(instr);
}
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
ASSERT(IsCmpImmediate(instr));
return instr & kOff12Mask;
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
......@@ -1052,6 +1100,13 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
}
void Assembler::cmp_raw_immediate(
Register src, int raw_immediate, Condition cond) {
ASSERT(is_uint12(raw_immediate));
emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
}
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMN | S, src1, r0, src2);
}
......@@ -2363,7 +2418,7 @@ void Assembler::nop(int type) {
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
// Check for mov rx, rx where x = type.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
......
......@@ -729,6 +729,7 @@ class Assembler : public Malloced {
void cmp(Register src1, Register src2, Condition cond = al) {
cmp(src1, Operand(src2), cond);
}
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al);
......@@ -1099,6 +1100,7 @@ class Assembler : public Malloced {
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
......@@ -1109,6 +1111,8 @@ class Assembler : public Malloced {
static bool IsAddRegisterImmediate(Instr instr);
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
static Register GetRn(Instr instr);
static Register GetRm(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
static bool IsStrRegFpOffset(Instr instr);
......@@ -1116,6 +1120,11 @@ class Assembler : public Malloced {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries
......
......@@ -1298,7 +1298,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result;
Label not_heap_number;
Register scratch = r7;
Register scratch = r9.is(tos_) ? r7 : r9;
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
......@@ -5809,10 +5809,9 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
// For equality we do not care about the sign of the result.
__ sub(r0, r0, r1, SetCC);
} else {
__ sub(r1, r1, r0, SetCC);
// Correct sign of result in case of overflow.
__ rsb(r1, r1, Operand(0), SetCC, vs);
__ mov(r0, r1);
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(r1);
__ sub(r0, r1, SmiUntagOperand(r0));
}
__ Ret();
......
......@@ -582,6 +582,7 @@ class Instruction {
inline int TypeValue() const { return Bits(27, 25); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);
inline int RdValue() const { return Bits(15, 12); }
DECLARE_STATIC_ACCESSOR(RdValue);
......@@ -625,6 +626,7 @@ class Instruction {
inline int SValue() const { return Bit(20); }
// with register
inline int RmValue() const { return Bits(3, 0); }
DECLARE_STATIC_ACCESSOR(RmValue);
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline ShiftOp ShiftField() const {
return static_cast<ShiftOp>(BitField(6, 5));
......
......@@ -45,6 +45,67 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
// immediate value is used) is the delta from the pc to the first instruction of
// the patchable code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
}
~JumpPatchSite() {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
// When initially emitting this ensure that a jump is always generated to skip
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
// branch. After patching when the branch is no longer unconditional
// execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
// When initially emitting this ensure that a jump is never generated to skip
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
__ b(ne, target); // Never taken before patched.
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
reg.set_code(delta_to_patch_site / kOff12Mask);
__ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
bool info_emitted_;
#endif
};
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
......@@ -753,24 +814,24 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
__ bind(&slow_case);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(eq, true, flags, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site);
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
......@@ -3511,21 +3572,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
__ JumpIfNotSmi(r2, &slow_case);
patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub);
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
}
......@@ -3592,6 +3654,16 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
......
......@@ -1700,11 +1700,78 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_));
}
#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address) {
// Currently there is no smi inlining in the ARM full code generator.
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(cmp_instruction_address);
if (!Assembler::IsCmpImmediate(instr)) {
return;
}
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
int delta = Assembler::GetCmpImmediateRawImmediate(instr);
delta +=
Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
// If the delta is 0 the instruction is cmp r0, #0 which also signals that
// nothing was inlined.
if (delta == 0) {
return;
}
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx
// b eq, <target>
// to
// tst rx, #kSmiTagMask
// b ne, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}
......
......@@ -1021,7 +1021,7 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right));
UseRegisterAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
......@@ -1419,7 +1419,7 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
......
......@@ -1605,7 +1605,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToOperand(right));
__ cmp(ToRegister(left), ToRegister(right));
}
......@@ -1619,8 +1619,7 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to unordered to return false.
__ b(vs, &unordered);
......@@ -1647,8 +1646,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
......@@ -2180,12 +2178,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
......@@ -2196,7 +2194,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
Abort("DoCmpTAndBranch unimplemented.");
Token::Value op = instr->op();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
EmitBranch(true_block, false_block, condition);
}
......
......@@ -2381,7 +2381,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
......@@ -2404,15 +2403,21 @@ CodePatcher::~CodePatcher() {
}
void CodePatcher::Emit(Instr x) {
masm()->emit(x);
void CodePatcher::Emit(Instr instr) {
masm()->emit(instr);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
#endif // ENABLE_DEBUGGER_SUPPORT
void CodePatcher::EmitCondition(Condition cond) {
Instr instr = Assembler::instr_at(masm_.pc_);
instr = (instr & ~kCondMask) | cond;
masm_.emit(instr);
}
} } // namespace v8::internal
......
......@@ -45,6 +45,12 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
}
static inline Operand SmiUntagOperand(Register object) {
return Operand(object, ASR, kSmiTagSize);
}
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
......@@ -886,11 +892,15 @@ class CodePatcher {
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
void Emit(Instr x);
void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
// Emit the condition part of an instruction leaving the rest of the current
// instruction unchanged.
void EmitCondition(Condition cond);
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
......
......@@ -544,7 +544,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Calling an IC stub with a patch site. Passing NULL for patch_site
// indicates no inlined smi code and emits a nop after the IC call.
// or non NULL patch_site which is not activated indicates no inlined smi code
// and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
// Set fields in the stack frame. Offsets are the frame pointer relative
......
......@@ -47,8 +47,7 @@ namespace internal {
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
: masm_(masm) {
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
......@@ -60,7 +59,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment