Commit 9b5ca375 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Inline keyed load on ARM

This uses the same infrastructure as is used by the inlining of named property load. The code patching if the inlined code is simpler as the key is provided in a register os the only patching required is the map check directing the inlined code to the deferred code block or not.
Review URL: http://codereview.chromium.org/1735007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4510 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2e129894
......@@ -800,9 +800,10 @@ void Assembler::b(int branch_offset, Condition cond) {
ASSERT(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al)
if (cond == al) {
// Dead code is a good location to emit the constant pool.
CheckConstPool(false, false);
}
}
......
......@@ -3703,7 +3703,7 @@ void CodeGenerator::VisitCall(Call* node) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
EmitKeyedLoad(false);
EmitKeyedLoad();
frame_->Drop(); // key
// Put the function below the receiver.
if (property->is_synthetic()) {
......@@ -5250,7 +5250,38 @@ void DeferredReferenceGetNamedValue::Generate() {
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop(1) instruction to indicate that the
// in-object has been inlined.
__ nop(NAMED_PROPERTY_LOAD_INLINED);
__ nop(PROPERTY_LOAD_INLINED);
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
}
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
DeferredReferenceGetKeyedValue() {
set_comment("[ DeferredReferenceGetKeyedValue");
}
virtual void Generate();
};
void DeferredReferenceGetKeyedValue::Generate() {
__ DecrementCounter(&Counters::keyed_load_inline, 1, r1, r2);
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, r1, r2);
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has all arguments on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// keyed load has been inlined.
__ nop(PROPERTY_LOAD_INLINED);
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
......@@ -5269,7 +5300,7 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
} else {
// Inline the inobject property case.
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
......@@ -5304,7 +5335,7 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
__ cmp(r2, r3);
deferred->Branch(ne);
// Use initially use an invalid index. The index will be patched by the
// Initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(r1, 0));
......@@ -5318,13 +5349,81 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
}
void CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode rmode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
frame_->CallCodeObject(ic, rmode, 0);
void CodeGenerator::EmitKeyedLoad() {
if (loop_nesting() == 0) {
Comment cmnt(masm_, "[ Load from keyed property");
frame_->CallKeyedLoadIC();
} else {
// Inline the keyed load.
Comment cmnt(masm_, "[ Inlined load from keyed property");
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue();
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
// Load the receiver from the stack.
__ ldr(r0, MemOperand(sp, kPointerSize));
// Check that the receiver is a heap object.
__ tst(r0, Operand(kSmiTagMask));
deferred->Branch(eq);
// The following instructions are the inlined load keyed property. Parts
// of this code are patched, so the exact number of instructions generated
// need to be fixed. Therefore the constant pool is blocked while generating
// this code.
#ifdef DEBUG
int kInlinedKeyedLoadInstructions = 20;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Factory::null_value()));
__ cmp(r1, r2);
deferred->Branch(ne);
// Load the key from the stack.
__ ldr(r1, MemOperand(sp, 0));
// Check that the key is a smi.
__ tst(r1, Operand(kSmiTagMask));
deferred->Branch(ne);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(r2, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r2, JSObject::kMapOffset));
__ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
__ cmp(r3, r4);
deferred->Branch(ne);
// Check that key is within bounds.
__ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ cmp(r3, Operand(r1, ASR, kSmiTagSize));
deferred->Branch(ls); // Unsigned less equal.
// Load and check that the result is not the hole (r1 is a smi).
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r2, r1, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
__ cmp(r0, r3);
deferred->Branch(eq);
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedKeyedLoadInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
deferred->BindExit();
}
}
......@@ -5383,12 +5482,8 @@ void Reference::GetValue() {
}
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
ASSERT(property != NULL);
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
cgen_->EmitKeyedLoad(var != NULL);
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
break;
}
......
......@@ -157,7 +157,7 @@ enum ArgumentsAllocationMode {
// states of the generated code.
enum NopMarkerTypes {
NON_MARKING_NOP = 0,
NAMED_PROPERTY_LOAD_INLINED
PROPERTY_LOAD_INLINED
};
......@@ -318,7 +318,7 @@ class CodeGenerator: public AstVisitor {
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad(bool is_global);
void EmitKeyedLoad();
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
......
......@@ -563,15 +563,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static inline bool IsInlinedICSite(Address address,
Address* inline_end_address) {
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
......@@ -579,24 +572,42 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// a branch instruction for jumping back from the deferred code.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
if (!Assembler::IsNop(instr_after_call, NAMED_PROPERTY_LOAD_INLINED)) {
if (!Assembler::IsNop(instr_after_call, PROPERTY_LOAD_INLINED)) {
return false;
}
ASSERT_EQ(0, RegisterAllocator::kNumRegisters);
Address address_after_nop1 = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop1 = Assembler::instr_at(address_after_nop1);
ASSERT(Assembler::IsBranch(instr_after_nop1));
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
ASSERT(Assembler::IsBranch(instr_after_nop));
// Find the end of the inlined code for handling the load.
int b_offset =
Assembler::GetBranchOffset(instr_after_nop1) + Assembler::kPcLoadDelta;
Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
Address inline_end_address = address_after_nop1 + b_offset;
*inline_end_address = address_after_nop + b_offset;
return true;
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined in-object property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// Find the end of the inlined code for handling the load if this is an
// inlined IC call site.
Address inline_end_address;
if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
// The immediate must be represenatble in 12 bits.
// The immediate must be representable in 12 bits.
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
Address ldr_property_instr_address = inline_end_address - 4;
Address ldr_property_instr_address =
inline_end_address - Assembler::kInstrSize;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
......@@ -608,18 +619,31 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
// Patch the map check.
Address ldr_map_instr_address = inline_end_address - 16;
Address ldr_map_instr_address =
inline_end_address - 4 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined keyed load (if present) to
// guarantee failure by holding an invalid map (the null value).
PatchInlinedLoad(address, Heap::null_value());
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
Address inline_end_address;
if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address - 19 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
......
......@@ -303,6 +303,12 @@ void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
}
void VirtualFrame::CallKeyedLoadIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
......
......@@ -308,10 +308,14 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver on stack and property name in r2. Result returned in
// r0.
// Call load IC. Receiver is on the stack and the property name is in r2.
// Result is returned in r0.
void CallLoadIC(RelocInfo::Mode mode);
// Call keyed load IC. Key and receiver are on the stack. Result is returned
// in r0.
void CallKeyedLoadIC();
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment