Commit 73256b05 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Fix jump targets on ARM to merge virtual frames (really this time).

Use the jump targets to move the first deferred code to not flush
registers in the inlined case (KeyedLoad).
Review URL: http://codereview.chromium.org/2249002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4745 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 67c0ec6f
......@@ -66,50 +66,19 @@ static void MultiplyByKnownInt(MacroAssembler* masm,
static bool IsEasyToMultiplyBy(int x);
#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
// Platform-specific FrameRegisterState functions.
void FrameRegisterState::Save(MacroAssembler* masm) const {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
__ push(RegisterAllocator::ToRegister(i));
} else if (action != kIgnore && (action & kSyncedFlag) == 0) {
__ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
}
}
}
void FrameRegisterState::Restore(MacroAssembler* masm) const {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
if (action == kPush) {
__ pop(RegisterAllocator::ToRegister(i));
} else if (action != kIgnore) {
action &= ~kSyncedFlag;
__ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
}
}
}
#undef __
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
frame_state_.Save(masm_);
// On ARM you either have a completely spilled frame or you
// handle it yourself, but at the moment there's no automation
// of registers and deferred code.
}
void DeferredCode::RestoreRegisters() {
frame_state_.Restore(masm_);
}
......@@ -117,12 +86,11 @@ void DeferredCode::RestoreRegisters() {
// Platform-specific RuntimeCallHelper functions.
void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
frame_state_->Save(masm);
frame_state_->frame()->AssertIsSpilled();
}
void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
frame_state_->Restore(masm);
}
......@@ -3455,7 +3423,6 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame_->Dup();
}
EmitNamedLoad(name, var != NULL);
frame_->EmitPush(r0);
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
......@@ -5613,11 +5580,19 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
};
// Convention for this is that on entry the receiver is in a register that
// is not used by the stack. On exit the answer is found in that same
// register and the stack has the same height.
void DeferredReferenceGetNamedValue::Generate() {
ASSERT(receiver_.is(r0) || receiver_.is(r1));
#ifdef DEBUG
int expected_height = frame_state()->frame()->height();
#endif
VirtualFrame copied_frame(*frame_state()->frame());
copied_frame.SpillAll();
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
......@@ -5633,11 +5608,23 @@ void DeferredReferenceGetNamedValue::Generate() {
// in-object has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
// At this point the answer is in r0. We move it to the expected register
// if necessary.
__ Move(receiver_, r0);
// Now go back to the frame that we entered with. This will not overwrite
// the receiver register since that register was not in use when we came
// in. The instructions emitted by this merge are skipped over by the
// inline load patching mechanism when looking for the branch instruction
// that tells it where the code to patch is.
copied_frame.MergeTo(frame_state()->frame());
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
ASSERT_EQ(expected_height, frame_state()->frame()->height());
}
......@@ -5738,6 +5725,7 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
// Consumes the top of stack (the receiver) and pushes the result instead.
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
......@@ -5746,6 +5734,7 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
frame_->EmitPush(r0); // Push answer.
} else {
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
......@@ -5762,7 +5751,6 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// Load the receiver from the stack.
Register receiver = frame_->PopToRegister();
VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(receiver, name);
......@@ -5778,16 +5766,19 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
// code. Therefore we can't use a LoadRoot call.
__ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch, scratch2);
deferred->Branch(ne);
// Initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(receiver, 0));
__ ldr(receiver, MemOperand(receiver, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
......@@ -5795,6 +5786,9 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
}
deferred->BindExit();
// At this point the receiver register has the result, either from the
// deferred code or from the inlined code.
frame_->EmitPush(receiver);
}
}
......@@ -6010,6 +6004,27 @@ Handle<String> Reference::GetName() {
}
void Reference::DupIfPersist() {
if (persist_after_get_) {
switch (type_) {
case KEYED:
cgen_->frame()->Dup2();
break;
case NAMED:
cgen_->frame()->Dup();
// Fall through.
case UNLOADED:
case ILLEGAL:
case SLOT:
// Do nothing.
;
}
} else {
set_unloaded();
}
}
void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
......@@ -6025,10 +6040,8 @@ void Reference::GetValue() {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
DupIfPersist();
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
......@@ -6036,23 +6049,17 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
if (persist_after_get_) {
cgen_->frame()->Dup();
}
cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) set_unloaded();
Handle<String> name = GetName();
DupIfPersist();
cgen_->EmitNamedLoad(name, is_global);
break;
}
case KEYED: {
ASSERT(property != NULL);
if (persist_after_get_) {
cgen_->frame()->Dup2();
}
DupIfPersist();
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) set_unloaded();
break;
}
......
......@@ -101,6 +101,11 @@ class Reference BASE_EMBEDDED {
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
// it as used.
inline void DupIfPersist();
private:
CodeGenerator* cgen_;
Expression* expression_;
......
......@@ -579,7 +579,13 @@ static inline bool IsInlinedICSite(Address address,
}
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
ASSERT(Assembler::IsBranch(instr_after_nop));
// There may be some reg-reg move and frame merging code to skip over before
// the branch back from the DeferredReferenceGetKeyedValue code to the inlined
// code.
while (!Assembler::IsBranch(instr_after_nop)) {
address_after_nop += Assembler::kInstrSize;
instr_after_nop = Assembler::instr_at(address_after_nop);
}
// Find the end of the inlined code for handling the load.
int b_offset =
......
......@@ -69,18 +69,15 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
if (entry_frame_set_) {
// Backward branch. We have an expected frame to merge to on the
// backward edge.
if (cc == al) {
cgen()->frame()->MergeTo(&entry_frame_);
} else {
// We can't do conditional merges yet so you have to ensure that all
// conditional branches to the JumpTarget have the same virtual frame.
ASSERT(cgen()->frame()->Equals(&entry_frame_));
}
cgen()->frame()->MergeTo(&entry_frame_, cc);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
__ b(cc, &entry_label_);
if (cc == al) {
cgen()->DeleteFrame();
}
}
......
......@@ -183,15 +183,18 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
if (scratch.is(no_reg)) {
eor(reg1, reg1, Operand(reg2));
eor(reg2, reg2, Operand(reg1));
eor(reg1, reg1, Operand(reg2));
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
mov(scratch, reg1);
mov(reg1, reg2);
mov(reg2, scratch);
mov(scratch, reg1, LeaveCC, cond);
mov(reg1, reg2, LeaveCC, cond);
mov(reg2, scratch, LeaveCC, cond);
}
}
......
......@@ -88,7 +88,10 @@ class MacroAssembler: public Assembler {
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Swap(Register reg1,
Register reg2,
Register scratch = no_reg,
Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
......
......@@ -72,90 +72,90 @@ void VirtualFrame::PopToR0() {
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
MergeTOSTo(expected->top_of_stack_state_);
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
VirtualFrame::TopOfStack expected_top_of_stack_state) {
VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
__ pop(r0);
__ pop(r0, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
__ pop(r1);
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
__ pop(r0);
__ pop(r1);
__ pop(r0, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1);
__ pop(r0);
__ pop(r1, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0);
__ push(r0, cond);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
__ mov(r1, r0);
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
__ pop(r1);
__ pop(r1, cond);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
__ mov(r1, r0);
__ pop(r0);
__ mov(r1, r0, LeaveCC, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
__ push(r1);
__ push(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
__ mov(r0, r1);
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
__ mov(r0, r1);
__ pop(r1);
__ mov(r0, r1, LeaveCC, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
__ pop(r0);
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
__ Push(r1, r0);
__ Push(r1, r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
__ push(r1);
__ push(r1, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
__ push(r1);
__ mov(r1, r0);
__ push(r1, cond);
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
__ Swap(r0, r1, ip);
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
__ Push(r0, r1);
__ Push(r0, r1, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
__ push(r0);
__ mov(r0, r1);
__ push(r0, cond);
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
__ push(r0);
__ push(r0, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
__ Swap(r0, r1, ip);
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
......@@ -163,7 +163,16 @@ void VirtualFrame::MergeTOSTo(
UNREACHABLE();
#undef CASE_NUMBER
}
top_of_stack_state_ = expected_top_of_stack_state;
// A conditional merge will be followed by a conditional branch and the
// fall-through code will have an unchanged virtual frame state. If the
// merge is unconditional ('al'ways) then it might be followed by a fall
// through. We need to update the virtual frame state to match the code we
// are falling into. The final case is an unconditional merge followed by an
// unconditional branch, in which case it doesn't matter what we do to the
// virtual frame state, because the virtual frame will be invalidated.
if (cond == al) {
top_of_stack_state_ = expected_top_of_stack_state;
}
}
......
......@@ -107,14 +107,14 @@ class VirtualFrame : public ZoneObject {
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
inline CodeGenerator* cgen();
inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() { return element_count_; }
int element_count() const { return element_count_; }
// The height of the virtual expression stack.
inline int height();
inline int height() const;
bool is_used(int num) {
switch (num) {
......@@ -162,7 +162,7 @@ class VirtualFrame : public ZoneObject {
// Spill all values from the frame to memory.
void SpillAll();
void AssertIsSpilled() {
void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
......@@ -184,7 +184,7 @@ class VirtualFrame : public ZoneObject {
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
void MergeTo(const VirtualFrame* expected, Condition cond = al);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
......@@ -426,13 +426,13 @@ class VirtualFrame : public ZoneObject {
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
inline int parameter_count();
inline int local_count();
inline int parameter_count() const;
inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
inline int frame_pointer();
inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
......@@ -448,10 +448,10 @@ class VirtualFrame : public ZoneObject {
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
inline int local0_index();
inline int local0_index() const;
// The index of the base of the expression stack.
inline int expression_base_index();
inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
......@@ -469,9 +469,9 @@ class VirtualFrame : public ZoneObject {
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
void MergeTOSTo(TopOfStack expected_state);
void MergeTOSTo(TopOfStack expected_state, Condition cond);
inline bool Equals(VirtualFrame* other);
inline bool Equals(const VirtualFrame* other);
friend class JumpTarget;
};
......
......@@ -180,6 +180,8 @@ class CodeGeneratorScope BASE_EMBEDDED {
};
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
class FrameRegisterState {
public:
......@@ -203,14 +205,28 @@ class FrameRegisterState {
// it should fit in the low zero bits of a valid offset.
static const int kSyncedFlag = 2;
// C++ doesn't allow zero length arrays, so we make the array length 1 even
// if we don't need it.
static const int kRegistersArrayLength =
(RegisterAllocator::kNumRegisters == 0) ?
1 : RegisterAllocator::kNumRegisters;
int registers_[kRegistersArrayLength];
int registers_[RegisterAllocator::kNumRegisters];
};
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
class FrameRegisterState {
public:
inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
inline const VirtualFrame* frame() const { return &frame_; }
private:
VirtualFrame frame_;
};
#else
#error Unsupported target architecture.
#endif
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
......
......@@ -34,17 +34,11 @@ namespace v8 {
namespace internal {
FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
// Nothing to do when register allocation is not supported.
ASSERT(RegisterAllocator::kNumRegisters == 0);
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()),
frame_state_(CodeGeneratorScope::Current()->frame()) {
frame_state_(*CodeGeneratorScope::Current()->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
......@@ -52,7 +46,6 @@ DeferredCode::DeferredCode()
#ifdef DEBUG
comment_ = "";
CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
#endif
}
......
......@@ -60,7 +60,7 @@ VirtualFrame::VirtualFrame(VirtualFrame* original)
register_allocation_map_(original->register_allocation_map_) { }
bool VirtualFrame::Equals(VirtualFrame* other) {
bool VirtualFrame::Equals(const VirtualFrame* other) {
ASSERT(element_count() == other->element_count());
if (top_of_stack_state_ != other->top_of_stack_state_) return false;
if (register_allocation_map_ != other->register_allocation_map_) return false;
......@@ -99,7 +99,9 @@ VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
}
CodeGenerator* VirtualFrame::cgen() { return CodeGeneratorScope::Current(); }
CodeGenerator* VirtualFrame::cgen() const {
return CodeGeneratorScope::Current();
}
MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
......@@ -112,15 +114,17 @@ void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
}
int VirtualFrame::parameter_count() {
int VirtualFrame::parameter_count() const {
return cgen()->scope()->num_parameters();
}
int VirtualFrame::local_count() { return cgen()->scope()->num_stack_slots(); }
int VirtualFrame::local_count() const {
return cgen()->scope()->num_stack_slots();
}
int VirtualFrame::frame_pointer() { return parameter_count() + 3; }
int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
int VirtualFrame::context_index() { return frame_pointer() - 1; }
......@@ -129,7 +133,7 @@ int VirtualFrame::context_index() { return frame_pointer() - 1; }
int VirtualFrame::function_index() { return frame_pointer() - 2; }
int VirtualFrame::local0_index() { return frame_pointer() + 2; }
int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
int VirtualFrame::fp_relative(int index) {
......@@ -139,12 +143,12 @@ int VirtualFrame::fp_relative(int index) {
}
int VirtualFrame::expression_base_index() {
int VirtualFrame::expression_base_index() const {
return local0_index() + local_count();
}
int VirtualFrame::height() {
int VirtualFrame::height() const {
return element_count() - expression_base_index();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment