Commit ea1d2ad8 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Fix keyed load inlining after my last commit accidentally

broke it.
Review URL: http://codereview.chromium.org/1780010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4526 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ef464f0c
...@@ -5280,8 +5280,10 @@ class DeferredReferenceGetKeyedValue: public DeferredCode { ...@@ -5280,8 +5280,10 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
void DeferredReferenceGetKeyedValue::Generate() { void DeferredReferenceGetKeyedValue::Generate() {
__ DecrementCounter(&Counters::keyed_load_inline, 1, r1, r2); Register scratch1 = VirtualFrame::scratch0();
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, r1, r2); Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
// The rest of the instructions in the deferred code must be together. // The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_); { Assembler::BlockConstPoolScope block_const_pool(masm_);
...@@ -5375,15 +5377,17 @@ void CodeGenerator::EmitKeyedLoad() { ...@@ -5375,15 +5377,17 @@ void CodeGenerator::EmitKeyedLoad() {
__ IncrementCounter(&Counters::keyed_load_inline, 1, __ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1()); frame_->scratch0(), frame_->scratch1());
// Load the receiver from the stack. // Load the receiver and key from the stack.
frame_->SpillAllButCopyTOSToR0(); frame_->SpillAllButCopyTOSToR1R0();
Register receiver = r0;
Register key = r1;
VirtualFrame::SpilledScope spilled(frame_); VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetKeyedValue* deferred = DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(); new DeferredReferenceGetKeyedValue();
// Check that the receiver is a heap object. // Check that the receiver is a heap object.
__ tst(r0, Operand(kSmiTagMask)); __ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq); deferred->Branch(eq);
// The following instructions are the inlined load keyed property. Parts // The following instructions are the inlined load keyed property. Parts
...@@ -5391,44 +5395,49 @@ void CodeGenerator::EmitKeyedLoad() { ...@@ -5391,44 +5395,49 @@ void CodeGenerator::EmitKeyedLoad() {
// need to be fixed. Therefore the constant pool is blocked while generating // need to be fixed. Therefore the constant pool is blocked while generating
// this code. // this code.
#ifdef DEBUG #ifdef DEBUG
int kInlinedKeyedLoadInstructions = 20; int kInlinedKeyedLoadInstructions = 19;
Label check_inlined_codesize; Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize); masm_->bind(&check_inlined_codesize);
#endif #endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_); { Assembler::BlockConstPoolScope block_const_pool(masm_);
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
// Check the map. The null map used below is patched by the inline cache // Check the map. The null map used below is patched by the inline cache
// code. // code.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(r2, Operand(Factory::null_value())); __ mov(scratch2, Operand(Factory::null_value()));
__ cmp(r1, r2); __ cmp(scratch1, scratch2);
deferred->Branch(ne); deferred->Branch(ne);
// Load the key from the stack.
__ ldr(r1, MemOperand(sp, 0));
// Check that the key is a smi. // Check that the key is a smi.
__ tst(r1, Operand(kSmiTagMask)); __ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne); deferred->Branch(ne);
// Get the elements array from the receiver and check that it // Get the elements array from the receiver and check that it
// is not a dictionary. // is not a dictionary.
__ ldr(r2, FieldMemOperand(r0, JSObject::kElementsOffset)); __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r2, JSObject::kMapOffset)); __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
__ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r3, r4); __ cmp(scratch2, ip);
deferred->Branch(ne); deferred->Branch(ne);
// Check that key is within bounds. // Check that key is within bounds.
__ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
__ cmp(r3, Operand(r1, ASR, kSmiTagSize)); __ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
deferred->Branch(ls); // Unsigned less equal. deferred->Branch(ls); // Unsigned less equal.
// Load and check that the result is not the hole (r1 is a smi). // Load and check that the result is not the hole (key is a smi).
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex); __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(scratch1,
__ ldr(r0, MemOperand(r2, r1, LSL, scratch1,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ cmp(r0, r3); __ ldr(r0,
MemOperand(scratch1, key, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
__ cmp(r0, scratch2);
// This is the only branch to deferred where r0 and r1 do not contain the
// receiver and key. We can't just load undefined here because we have to
// check the prototype.
deferred->Branch(eq); deferred->Branch(eq);
// Make sure that the expected number of instructions are generated. // Make sure that the expected number of instructions are generated.
......
...@@ -639,7 +639,7 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { ...@@ -639,7 +639,7 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
// Patch the map check. // Patch the map check.
Address ldr_map_instr_address = Address ldr_map_instr_address =
inline_end_address - 19 * Assembler::kInstrSize; inline_end_address - 18 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address, Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map)); reinterpret_cast<Address>(map));
return true; return true;
......
...@@ -88,7 +88,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) { ...@@ -88,7 +88,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
break; break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS): case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1); __ pop(r1);
__ pop(r1); __ pop(r0);
break; break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS): case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0); __ push(r0);
...@@ -429,6 +429,35 @@ void VirtualFrame::SpillAllButCopyTOSToR0() { ...@@ -429,6 +429,35 @@ void VirtualFrame::SpillAllButCopyTOSToR0() {
} }
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ Swap(r0, r1, ip);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
Register VirtualFrame::Peek() { Register VirtualFrame::Peek() {
AssertIsNotSpilled(); AssertIsNotSpilled();
if (top_of_stack_state_ == NO_TOS_REGISTERS) { if (top_of_stack_state_ == NO_TOS_REGISTERS) {
......
...@@ -344,10 +344,13 @@ class VirtualFrame : public ZoneObject { ...@@ -344,10 +344,13 @@ class VirtualFrame : public ZoneObject {
// must be copied to a scratch register before modification. // must be copied to a scratch register before modification.
Register Peek(); Register Peek();
// A little specialized, this one. It flushes all registers, but it puts a // Flushes all registers, but it puts a copy of the top-of-stack in r0.
// copy of the top-of-stack in R0.
void SpillAllButCopyTOSToR0(); void SpillAllButCopyTOSToR0();
// Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0();
// Pop and save an element from the top of the expression stack and // Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction. // emit a corresponding pop instruction.
void EmitPop(Register reg); void EmitPop(Register reg);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment