Commit 968a5244 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Add inlining of property load on ARM

Generate inlined named property load for in-object properties. This uses the same mechanism as on the Intel platforms with the map check and load instruction of the inlined code being	patched by the inline cache code. The map check is patched through the normal constant pool patching and the load instruction is patched in place.
Review URL: http://codereview.chromium.org/1715003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4468 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent af63e616
......@@ -349,6 +349,44 @@ void Assembler::Align(int m) {
}
bool Assembler::IsB(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
int Assembler::GetBOffset(Instr instr) {
ASSERT(IsB(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
return ((instr & Imm24Mask) << 8) >> 6;
}
bool Assembler::IsLdrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
}
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLDRRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
int offset = instr & Off12Mask; // Zero extended offset.
return positive ? offset : -offset;
}
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(IsLDRRegisterImmediate(instr));
bool positive = offset >= 0;
if (!positive) offset = -offset;
ASSERT(is_uint12(offset));
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
return (instr & ~Off12Mask) | offset;
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
......@@ -372,10 +410,10 @@ int Assembler::target_at(int pos) {
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & Imm24Mask) << 8) >> 6;
if ((instr & CondMask) == nv && (instr & B24) != 0)
if ((instr & CondMask) == nv && (instr & B24) != 0) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
return pos + kPcLoadDelta + imm26;
}
......
......@@ -958,14 +958,21 @@ class Assembler : public Malloced {
int current_position() const { return current_position_; }
int current_statement_position() const { return current_statement_position_; }
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) {
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
static bool IsB(Instr instr);
static int GetBOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
......
......@@ -358,12 +358,9 @@ void CodeGenerator::Generate(CompilationInfo* info) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
{ // NOLINT
// Make sure that the constant pool is not emitted inside of the return
// sequence.
Assembler::BlockConstPoolScope block_const_pool(masm_);
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
......@@ -393,6 +390,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Code generation state must be reset.
ASSERT(!has_cc());
ASSERT(state_ == NULL);
ASSERT(loop_nesting() == 0);
ASSERT(!function_return_is_shadowed_);
function_return_.Unuse();
DeleteFrame();
......@@ -2940,20 +2938,13 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
__ bind(&fast);
}
// All extension objects were empty and it is safe to use a global
// load IC call.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Load the global object.
LoadGlobal();
// Setup the name register.
// Setup the name register and call load IC.
__ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
} else {
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
}
frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT);
// Drop the global object. The result is in r0.
frame_->Drop();
}
......@@ -4935,6 +4926,85 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
private:
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
// Setup the name register and call load IC.
__ mov(r2, Operand(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a b instruction to indicate that the inobject
// property case was inlined. Jumping back from the deferred code ensures
// that.
}
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
// Setup the name register and call load IC.
__ mov(r2, Operand(name));
frame_->CallLoadIC(is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
} else {
// Inline the inobject property case.
Comment cmnt(masm(), "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(name);
// The following instructions are the inlined load of an in-object property.
// Parts of this code is patched, so the exact instructions generated needs
// to be fixed. Therefore the instruction pool is blocked when generating
// this code
#ifdef DEBUG
int kInlinedNamedLoadInstructions = 8;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Load the receiver from the stack.
__ ldr(r1, MemOperand(sp, 0));
// Check that the receiver is a heap object.
__ tst(r1, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
deferred->Branch(ne);
// Use initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(r1, 0));
}
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
__ IncrementCounter(&Counters::named_load_inline, 1, r1, r2);
deferred->BindExit();
}
}
void CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
......@@ -4991,19 +5061,11 @@ void Reference::GetValue() {
}
case NAMED: {
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
__ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->EmitPush(r0);
break;
}
......
......@@ -287,6 +287,10 @@ class CodeGenerator: public AstVisitor {
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
// Load a named property, leaving it in r0. The receiver is passed on the
// stack, and remain there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad(bool is_global);
......
......@@ -199,12 +199,9 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
{ // NOLINT
// Make sure that the constant pool is not emitted inside of the return
// sequence.
Assembler::BlockConstPoolScope block_const_pool(masm_);
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
......@@ -703,7 +700,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ push(ip);
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A B instruction following the call signals that the load was inlined.
// Ensure that there is not a B instruction here.
__ nop();
}
DropAndApply(1, context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
......@@ -1001,7 +1003,12 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ Call(ic, RelocInfo::CODE_TARGET);
// A B instruction following the call signals that the load was inlined.
// Ensure that there is not a B instruction here.
__ nop();
}
}
......@@ -1438,7 +1445,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ Call(ic, RelocInfo::CODE_TARGET);
// A B instruction following the call signals that the load was
// inlined. Ensure that there is not a B instruction here.
__ nop();
}
__ str(r0, MemOperand(sp));
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
......
......@@ -27,7 +27,9 @@
#include "v8.h"
#include "assembler-arm.h"
#include "codegen-inl.h"
#include "disasm.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
......@@ -561,21 +563,64 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
// TODO(181): Implement map patching once loop nesting is tracked on the
// ARM platform so we can generate inlined fast-case code loads in
// loops.
void LoadIC::ClearInlinedVersion(Address address) {}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return false;
// If the instruction after the call site is not a B instruction then this is
// not related to an inlined in-object property load. The B instructions is
// located just after the call to the IC in the deferred code handling the
// miss in the inlined code. All other calls to a load IC should ensure there
// in no B instruction directly following the call.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
if (!Assembler::IsB(instr_after_call)) return false;
// Find the end of the inlined code for handling the load.
int b_offset =
Assembler::GetBOffset(instr_after_call) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
Address inline_end_address = address_after_call + b_offset;
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
Address ldr_property_instr_address = inline_end_address - 4;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
ldr_property_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
// Indicate that code has changed.
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
// Patch the map check.
Address ldr_map_instr_address = inline_end_address - 16;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return false;
}
......
......@@ -299,6 +299,20 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
}
void VirtualFrame::CallLoadIC(RelocInfo::Mode mode, bool load_inlined) {
// If a nop is generated later make sure the it follows the call directly.
Assembler::BlockConstPoolScope block_const_pool(masm());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
CallCodeObject(ic, mode, 0);
if (!load_inlined) {
// A B instruction following the call signals that the load was inlined.
// Ensure that there is not a B instruction here.
__ nop();
}
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
......
......@@ -308,6 +308,11 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver on stack and property name in r2. Result returned in
// r0. If load_inlined is false the code generated will make sure that the IC
// handling will not see this load as having an inlined counterpart.
void CallLoadIC(RelocInfo::Mode mode, bool load_inlined = false);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.
......
......@@ -36,7 +36,7 @@ var expected_source_line_text = null;
var expected_function_name = null;
// Simple debug event handler which first time will cause 'step in' action
// to get into g.call and than check that execution is pauesed inside
// to get into g.call and than check that execution is stopped inside
// function 'g'.
function listener(event, exec_state, event_data, data) {
try {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment