Commit 931f0a03 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Changed inlined property load detection on ARM

Instaed of having a nop after all non-inlined calls to load IC use a different nop (mov r1, r1 instead of mov r0, r0) to detect an inlined load IC.

Added more infrastructure to the deferred code handling to make it possbile to block constant pool emitting in a deferred code block, including the branch instruction ending the deferred code block.

Addressed a couple of comments to http://codereview.chromium.org/1715003, including adding an assert to make sure that the patching of an ldr instruction is always possible.
Review URL: http://codereview.chromium.org/1758003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4480 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c20fcec3
......@@ -318,6 +318,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_;
......@@ -349,13 +350,20 @@ void Assembler::Align(int m) {
}
bool Assembler::IsB(Instr instr) {
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
int Assembler::GetBOffset(Instr instr) {
ASSERT(IsB(instr));
int Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
return ((instr & Imm24Mask) << 8) >> 6;
......@@ -941,6 +949,10 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
}
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int)
// pseudo instructions.
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
addrmod1(cond | 13*B21 | s, r0, dst, src);
}
......@@ -1730,6 +1742,13 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Pseudo instructions.
void Assembler::nop(int type) {
// This is mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
emit(al | 13*B21 | type*B12 | type);
}
void Assembler::lea(Register dst,
const MemOperand& x,
SBit s,
......
......@@ -896,7 +896,7 @@ class Assembler : public Malloced {
const Condition cond = al);
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
void nop(int type = 0);
void push(Register src, Condition cond = al) {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
......@@ -929,10 +929,10 @@ class Assembler : public Malloced {
class BlockConstPoolScope {
public:
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->const_pool_blocked_nesting_++;
assem_->StartBlockConstPool();
}
~BlockConstPoolScope() {
assem_->const_pool_blocked_nesting_--;
assem_->EndBlockConstPool();
}
private:
......@@ -958,17 +958,26 @@ class Assembler : public Malloced {
int current_position() const { return current_position_; }
int current_statement_position() const { return current_statement_position_; }
void StartBlockConstPool() {
const_pool_blocked_nesting_++;
}
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
}
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
static bool IsB(Instr instr);
static int GetBOffset(Instr instr);
static bool IsNop(Instr instr, int type = 0);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
......
......@@ -5229,22 +5229,34 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void BeforeGenerate();
virtual void Generate();
virtual void AfterGenerate();
private:
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::BeforeGenerate() {
__ StartBlockConstPool();
}
void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
// Setup the name register and call load IC.
__ mov(r2, Operand(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a b instruction to indicate that the inobject
// property case was inlined. Jumping back from the deferred code ensures
// that.
// The call must be followed by a nop(1) instruction to indicate that the
// inobject has been inlined.
__ nop(NAMED_PROPERTY_LOAD_INLINED);
}
void DeferredReferenceGetNamedValue::AfterGenerate() {
__ EndBlockConstPool();
}
......
......@@ -153,6 +153,14 @@ enum ArgumentsAllocationMode {
};
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
enum NopMarkerTypes {
NON_MARKING_NOP = 0,
NAMED_PROPERTY_LOAD_INLINED
};
// -------------------------------------------------------------------------
// CodeGenerator
......@@ -305,7 +313,7 @@ class CodeGenerator: public AstVisitor {
void StoreToSlot(Slot* slot, InitState init_state);
// Load a named property, leaving it in r0. The receiver is passed on the
// stack, and remain there.
// stack, and remains there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
// Load a keyed property, leaving it in r0. The receiver and key are
......
......@@ -700,12 +700,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ push(ip);
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A B instruction following the call signals that the load was inlined.
// Ensure that there is not a B instruction here.
__ nop();
}
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndApply(1, context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
......@@ -1003,12 +998,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ Call(ic, RelocInfo::CODE_TARGET);
// A B instruction following the call signals that the load was inlined.
// Ensure that there is not a B instruction here.
__ nop();
}
__ Call(ic, RelocInfo::CODE_TARGET);
}
......@@ -1445,12 +1435,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ Call(ic, RelocInfo::CODE_TARGET);
// A B instruction following the call signals that the load was
// inlined. Ensure that there is not a B instruction here.
__ nop();
}
__ Call(ic, RelocInfo::CODE_TARGET);
__ str(r0, MemOperand(sp));
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
......
......@@ -572,22 +572,30 @@ void LoadIC::ClearInlinedVersion(Address address) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// If the instruction after the call site is not a B instruction then this is
// not related to an inlined in-object property load. The B instructions is
// located just after the call to the IC in the deferred code handling the
// miss in the inlined code. All other calls to a load IC should ensure there
// in no B instruction directly following the call.
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
// handling the miss in the inlined code. After the nop1 instruction there is
// a B instruction for jumping back from the deferred code.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
if (!Assembler::IsB(instr_after_call)) return false;
if (!Assembler::IsNop(instr_after_call, NAMED_PROPERTY_LOAD_INLINED)) {
return false;
}
ASSERT_EQ(0, RegisterAllocator::kNumRegisters);
Address address_after_nop1 = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop1 = Assembler::instr_at(address_after_nop1);
ASSERT(Assembler::IsBranch(instr_after_nop1));
// Find the end of the inlined code for handling the load.
int b_offset =
Assembler::GetBOffset(instr_after_call) + Assembler::kPcLoadDelta;
Assembler::GetBranchOffset(instr_after_nop1) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
Address inline_end_address = address_after_call + b_offset;
Address inline_end_address = address_after_nop1 + b_offset;
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
// The immediate must be represenatble in 12 bits.
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
Address ldr_property_instr_address = inline_end_address - 4;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
......
......@@ -299,17 +299,9 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
}
void VirtualFrame::CallLoadIC(RelocInfo::Mode mode, bool load_inlined) {
// If a nop is generated later make sure the it follows the call directly.
Assembler::BlockConstPoolScope block_const_pool(masm());
void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
CallCodeObject(ic, mode, 0);
if (!load_inlined) {
// A B instruction following the call signals that the load was inlined.
// Ensure that there is not a B instruction here.
__ nop();
}
}
......
......@@ -309,9 +309,8 @@ class VirtualFrame : public ZoneObject {
int arg_count);
// Call load IC. Receiver on stack and property name in r2. Result returned in
// r0. If load_inlined is false the code generated will make sure that the IC
// handling will not see this load as having an inlined counterpart.
void CallLoadIC(RelocInfo::Mode mode, bool load_inlined = false);
// r0.
void CallLoadIC(RelocInfo::Mode mode);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
......
......@@ -77,11 +77,13 @@ void CodeGenerator::ProcessDeferred() {
}
// Generate the code.
Comment cmnt(masm_, code->comment());
code->BeforeGenerate();
masm_->bind(code->entry_label());
code->SaveRegisters();
code->Generate();
code->RestoreRegisters();
masm_->jmp(code->exit_label());
code->AfterGenerate();
}
}
......
......@@ -212,6 +212,9 @@ class DeferredCode: public ZoneObject {
void SaveRegisters();
void RestoreRegisters();
virtual void BeforeGenerate() { }
virtual void AfterGenerate() { }
protected:
MacroAssembler* masm_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment