Commit 4fdf57ac authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Faster push/pop implementation.

Also snuck in an intended optimization for fast api call preparation and a few indentation fixes.

Review URL: http://codereview.chromium.org/1689010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4579 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 54353967
...@@ -1179,7 +1179,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, ...@@ -1179,7 +1179,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ add(Operand(eax), Immediate(argc << 1)); __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
// Get the element's length into ecx. // Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
...@@ -1232,7 +1232,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, ...@@ -1232,7 +1232,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &call_builtin); __ j(not_equal, &call_builtin);
__ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize)); __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(greater, &call_builtin); __ j(above, &call_builtin);
// We fit and could grow elements. // We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx); __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
...@@ -1298,7 +1298,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, ...@@ -1298,7 +1298,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
return Heap::undefined_value(); return Heap::undefined_value();
} }
Label miss, empty_array, call_builtin; Label miss, return_undefined, call_builtin;
// Get the receiver from the stack. // Get the receiver from the stack.
const int argc = arguments().immediate(); const int argc = arguments().immediate();
...@@ -1307,7 +1307,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, ...@@ -1307,7 +1307,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss); __ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx, CheckPrototypes(JSObject::cast(object), edx,
holder, ebx, holder, ebx,
eax, name, &miss); eax, name, &miss);
...@@ -1323,7 +1322,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, ...@@ -1323,7 +1322,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the array's length into ecx and calculate new length. // Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
__ sub(Operand(ecx), Immediate(Smi::FromInt(1))); __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
__ j(negative, &empty_array); __ j(negative, &return_undefined);
// Get the last element. // Get the last element.
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
...@@ -1344,12 +1343,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, ...@@ -1344,12 +1343,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Immediate(Factory::the_hole_value())); Immediate(Factory::the_hole_value()));
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
__ bind(&empty_array); __ bind(&return_undefined);
__ mov(eax, Immediate(Factory::undefined_value())); __ mov(eax, Immediate(Factory::undefined_value()));
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin); __ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop), __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1, argc + 1,
1); 1);
......
...@@ -8516,6 +8516,7 @@ static int NegativeComparisonResult(Condition cc) { ...@@ -8516,6 +8516,7 @@ static int NegativeComparisonResult(Condition cc) {
return (cc == greater || cc == greater_equal) ? LESS : GREATER; return (cc == greater || cc == greater_equal) ? LESS : GREATER;
} }
void CompareStub::Generate(MacroAssembler* masm) { void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done; Label call_builtin, done;
// The compare stub returns a positive, negative, or zero 64-bit integer // The compare stub returns a positive, negative, or zero 64-bit integer
......
...@@ -101,15 +101,17 @@ void MacroAssembler::RecordWriteHelper(Register object, ...@@ -101,15 +101,17 @@ void MacroAssembler::RecordWriteHelper(Register object,
// If the bit offset lies beyond the normal remembered set range, it is in // If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object. // the extra remembered set area of a large object.
cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
j(less, &fast); j(below, &fast);
// We have a large object containing pointers. It must be a FixedArray.
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object. // extra remembered set after the large object.
// Load the array length into 'scratch'. // Load the array length into 'scratch'.
movl(scratch, movl(scratch,
Operand(page_start, Operand(page_start,
Page::kObjectStartOffset + FixedArray::kLengthOffset)); Page::kObjectStartOffset + FixedArray::kLengthOffset));
Register array_length = scratch; Register array_length = scratch;
// Extra remembered set starts right after the large object (a FixedArray), at // Extra remembered set starts right after the large object (a FixedArray), at
...@@ -119,9 +121,9 @@ void MacroAssembler::RecordWriteHelper(Register object, ...@@ -119,9 +121,9 @@ void MacroAssembler::RecordWriteHelper(Register object,
// extra RSet to 'page_start', so that addressing the bit using // extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words. // 'pointer_offset' hits the extra RSet words.
lea(page_start, lea(page_start,
Operand(page_start, array_length, times_pointer_size, Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize Page::kObjectStartOffset + FixedArray::kHeaderSize
- Page::kRSetEndOffset)); - Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by // to limit code size. We should probably evaluate this decision by
...@@ -132,22 +134,6 @@ void MacroAssembler::RecordWriteHelper(Register object, ...@@ -132,22 +134,6 @@ void MacroAssembler::RecordWriteHelper(Register object,
} }
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
ASSERT(cc == equal || cc == not_equal);
if (!scratch.is(object)) {
movq(scratch, object);
}
ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
movq(kScratchRegister, ExternalReference::new_space_start());
cmpq(scratch, kScratchRegister);
j(cc, branch);
}
// Set the remembered set bit for [object+offset]. // Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored. // object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into // If offset is zero, then the smi_index register contains the array index into
...@@ -213,11 +199,11 @@ void MacroAssembler::RecordWriteNonSmi(Register object, ...@@ -213,11 +199,11 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// We make sure that an offset is inside the right limits whether it is // We make sure that an offset is inside the right limits whether it is
// tagged or untagged. // tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) { if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'value'. // Compute the bit offset in the remembered set, leave it in 'scratch'.
lea(scratch, Operand(object, offset)); lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask)); ASSERT(is_int32(Page::kPageAlignmentMask));
and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
shr(scratch, Immediate(kObjectAlignmentBits)); shr(scratch, Immediate(kPointerSizeLog2));
// Compute the page address from the heap object pointer, leave it in // Compute the page address from the heap object pointer, leave it in
// 'object' (immediate value is sign extended). // 'object' (immediate value is sign extended).
...@@ -236,10 +222,10 @@ void MacroAssembler::RecordWriteNonSmi(Register object, ...@@ -236,10 +222,10 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// array access: calculate the destination address in the same manner as // array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. // KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, Operand(object, lea(dst, FieldOperand(object,
index.reg, index.reg,
index.scale, index.scale,
FixedArray::kHeaderSize - kHeapObjectTag)); FixedArray::kHeaderSize));
} }
// If we are already generating a shared stub, not inlining the // If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory. // record write code isn't going to save us any memory.
...@@ -263,6 +249,41 @@ void MacroAssembler::RecordWriteNonSmi(Register object, ...@@ -263,6 +249,41 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
} }
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
movq(kScratchRegister, ExternalReference::new_space_mask());
and_(scratch, kScratchRegister);
} else {
movq(scratch, ExternalReference::new_space_mask());
and_(scratch, object);
}
movq(kScratchRegister, ExternalReference::new_space_start());
cmpq(scratch, kScratchRegister);
j(cc, branch);
} else {
ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
j(cc, branch);
}
}
void MacroAssembler::Assert(Condition cc, const char* msg) { void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg); if (FLAG_debug_code) Check(cc, msg);
} }
...@@ -2161,7 +2182,7 @@ Register MacroAssembler::CheckMaps(JSObject* object, ...@@ -2161,7 +2182,7 @@ Register MacroAssembler::CheckMaps(JSObject* object,
int depth = 0; int depth = 0;
if (save_at_depth == depth) { if (save_at_depth == depth) {
movq(Operand(rsp, kPointerSize), reg); movq(Operand(rsp, kPointerSize), object_reg);
} }
// Check the maps in the prototype chain. // Check the maps in the prototype chain.
......
...@@ -102,7 +102,6 @@ class MacroAssembler: public Assembler { ...@@ -102,7 +102,6 @@ class MacroAssembler: public Assembler {
Register value, Register value,
Register scratch); Register scratch);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugger Support // Debugger Support
......
This diff is collapsed.
...@@ -58,6 +58,29 @@ ...@@ -58,6 +58,29 @@
assertEquals(undefined, a.pop(1, 2, 3), "9th pop"); assertEquals(undefined, a.pop(1, 2, 3), "9th pop");
assertEquals(0, a.length, "length 9th pop"); assertEquals(0, a.length, "length 9th pop");
} }
// Check that pop works on inherited properties.
for (var i = 0; i < 10 ;i++) { // Ensure ICs are stabilized.
Array.prototype[1] = 1;
Array.prototype[3] = 3;
Array.prototype[5] = 5;
Array.prototype[7] = 7;
Array.prototype[9] = 9;
a = [0,1,2,,4,,6,7,8,,];
assertEquals(10, a.length, "inherit-initial-length");
for (var j = 9; j >= 0; j--) {
assertEquals(j + 1, a.length, "inherit-pre-length-" + j);
assertTrue(j in a, "has property " + j);
var own = a.hasOwnProperty(j);
var inherited = Array.prototype.hasOwnProperty(j);
assertEquals(j, a.pop(), "inherit-pop");
assertEquals(j, a.length, "inherit-post-length");
assertFalse(a.hasOwnProperty(j), "inherit-deleted-own-" + j);
assertEquals(inherited, Array.prototype.hasOwnProperty(j),
"inherit-not-deleted-inherited" + j);
}
Array.prototype.length = 0; // Clean-up.
}
})(); })();
// Test the case of not JSArray receiver. // Test the case of not JSArray receiver.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment