Commit 9f64f43a authored by verwaest@chromium.org's avatar verwaest@chromium.org

Turn ArrayPush into a stub specialized on the elements kind and argc.

BUG=
R=mvstanton@chromium.org

Review URL: https://codereview.chromium.org/143213003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18696 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c970c1cb
......@@ -4196,6 +4196,211 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
Register receiver = r0;
Register scratch = r1;
int argc = arguments_count();
if (argc == 0) {
// Nothing to do, just return the length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ Ret();
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
Register elements = r6;
Register end_elements = r5;
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
}
// Get the array's length into scratch and calculate new length.
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ add(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(scratch, r4);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
// Store the value.
// We may need a register containing the address end_elements below, so
// write back the value in end_elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
} else {
// Check if we could survive without allocation.
__ cmp(scratch, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
&call_builtin, argc * kDoubleSize);
}
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r9, ip);
__ b(eq, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ ldr(r2, FieldMemOperand(receiver, origin_offset));
__ ldr(ip, FieldMemOperand(r3, HeapObject::kMapOffset));
__ cmp(r2, ip);
__ b(ne, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ ldr(r3, FieldMemOperand(r3, target_offset));
__ mov(r2, receiver);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
}
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below, so write
// back the value in end_elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(r2, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r4, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
__ mov(r9, Operand(new_space_allocation_limit));
__ ldr(r9, MemOperand(r9));
__ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
__ cmp(r3, r9);
__ b(hi, &call_builtin);
// We fit and could grow elements.
// Update new_space_allocation_top.
__ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ str(r3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left
......
......@@ -1571,251 +1571,6 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
Register receiver = r0;
Register scratch = r1;
const int argc = arguments().immediate();
if (argc == 0) {
// Nothing to do, just return the length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ Ret();
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = r6;
Register end_elements = r5;
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into scratch and calculate new length.
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ add(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(scratch, r4);
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into scratch and calculate new length.
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ add(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(scratch, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
&call_builtin, argc * kDoubleSize);
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(r3, r9, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r9, &call_builtin);
__ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r9, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
}
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
if (!FLAG_inline_new) {
__ b(&call_builtin);
}
__ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
__ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r4, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
__ mov(r9, Operand(new_space_allocation_limit));
__ ldr(r9, MemOperand(r9));
__ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
__ cmp(r3, r9);
__ b(hi, &call_builtin);
// We fit and could grow elements.
// Update new_space_allocation_top.
__ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ str(r3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
}
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
......
......@@ -78,6 +78,7 @@ namespace internal {
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
V(ArrayPush) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
......@@ -1151,6 +1152,30 @@ class BinaryOpICStub : public HydrogenCodeStub {
};
class ArrayPushStub: public PlatformCodeStub {
public:
ArrayPushStub(ElementsKind kind, int argc) {
bit_field_ = ElementsKindBits::encode(kind) | ArgcBits::encode(argc);
}
void Generate(MacroAssembler* masm);
private:
int arguments_count() { return ArgcBits::decode(bit_field_); }
ElementsKind elements_kind() {
return ElementsKindBits::decode(bit_field_);
}
virtual CodeStub::Major MajorKey() { return ArrayPush; }
virtual int MinorKey() { return bit_field_; }
class ElementsKindBits: public BitField<ElementsKind, 0, 3> {};
class ArgcBits: public BitField<int, 3, Code::kArgumentsBits> {};
int bit_field_;
};
// TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
// call support for stubs in Hydrogen.
class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
......
......@@ -4413,6 +4413,198 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
int argc = arguments_count();
if (argc == 0) {
// Noop, return the length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
isolate->factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
}
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc)));
// Get the elements' length into ecx.
__ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(eax, ecx);
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ JumpIfNotSmi(ecx, &with_write_barrier);
// Store the value.
__ mov(FieldOperand(edi, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
ecx);
} else {
__ j(greater, &call_builtin);
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ StoreNumberToDoubleElements(
ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
}
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
isolate->factory()->heap_number_map());
__ j(equal, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ mov(edi, FieldOperand(ebx, origin_offset));
__ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ mov(ebx, FieldOperand(ebx, target_offset));
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
// Restore edi used as a scratch register for the write barrier used while
// setting the map.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
}
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Store the value.
__ lea(edx, FieldOperand(edi, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
__ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ mov(ebx, Operand(esp, argc * kPointerSize));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(ebx, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top.
__ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
// Check if it's the end of elements.
__ lea(edx, FieldOperand(edi, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmp(edx, ecx);
__ j(not_equal, &call_builtin);
__ add(ecx, Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Push the argument...
__ mov(Operand(edx, 0), ebx);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
isolate->factory()->the_hole_value());
}
if (IsFastObjectElementsKind(elements_kind())) {
// We know the elements array is in new space so we don't need the
// remembered set, but we just pushed a value onto it so we may have to tell
// the incremental marker to rescan the object that we just grew. We don't
// need to worry about the holes because they are in old space and already
// marked black.
__ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
}
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ add(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta)));
// NOTE: This only happen in new-space, where we don't care about the
// black-byte-count on pages. Otherwise we should update that too if the
// object is black.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : left
......
......@@ -1666,251 +1666,6 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
const int argc = arguments().immediate();
if (argc == 0) {
// Noop, return the length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
__ j(not_equal, &check_double);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc)));
// Get the elements' length into ecx.
__ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(eax, ecx);
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ JumpIfNotSmi(ecx, &with_write_barrier);
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Store the value.
__ mov(FieldOperand(edi,
eax,
times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&check_double);
// Check that the elements are in double mode.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_double_array_map()));
__ j(not_equal, &call_builtin);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc)));
// Get the elements' length into ecx.
__ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(eax, ecx);
__ j(greater, &call_builtin);
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ StoreNumberToDoubleElements(
ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(factory()->heap_number_map()));
__ j(equal, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&try_holey_map);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
ebx,
edi,
&call_builtin);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(ebx, &call_builtin);
}
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Store the value.
__ lea(edx, FieldOperand(edi,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
__ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ jmp(&call_builtin);
}
__ mov(ebx, Operand(esp, argc * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(ebx, &no_fast_elements_check);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
// We could be lucky and the elements array could be at the top of
// new-space. In this case we can just grow it in place by moving the
// allocation pointer up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top.
__ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
// Check if it's the end of elements.
__ lea(edx, FieldOperand(edi,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmp(edx, ecx);
__ j(not_equal, &call_builtin);
__ add(ecx, Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Push the argument...
__ mov(Operand(edx, 0), ebx);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
Immediate(factory()->the_hole_value()));
}
// We know the elements array is in new space so we don't need the
// remembered set, but we just pushed a value onto it so we may have to
// tell the incremental marker to rescan the object that we just grew. We
// don't need to worry about the holes because they are in old space and
// already marked black.
__ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ add(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta)));
// NOTE: This only happen in new-space, where we don't
// care about the black-byte-count on pages. Otherwise we should
// update that too if the object is black.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
}
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate()),
argc + 1,
1);
}
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
......
......@@ -1282,6 +1282,41 @@ void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
}
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
Handle<Map> map(Handle<JSArray>::cast(object)->map());
ElementsKind elements_kind = map->elements_kind();
const int argc = arguments().immediate();
ArrayPushStub stub(elements_kind, argc);
Handle<Code> code = stub.GetCode(isolate());
StubCompiler::GenerateTailCall(masm(), code);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<Object> object,
Handle<JSObject> holder,
......
......@@ -4257,6 +4257,202 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
int argc = arguments_count();
StackArgumentsAccessor args(rsp, argc);
if (argc == 0) {
// Noop, return the length.
__ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object.
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
isolate->factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
}
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc));
// Get the elements' length into rcx.
__ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmpl(rax, rcx);
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ movp(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Store the value.
__ movp(FieldOperand(rdi,
rax,
times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
rcx);
} else {
__ j(greater, &call_builtin);
__ movp(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
}
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
isolate->factory()->heap_number_map());
__ j(equal, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
__ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movp(rbx, ContextOperand(rbx, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
__ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ movp(rbx, FieldOperand(rbx, target_offset));
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
}
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movp(Operand(rdx, 0), rcx);
__ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ movp(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(rbx, &no_fast_elements_check);
__ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top.
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
__ cmpq(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
__ Store(new_space_allocation_top, rcx);
// Push the argument...
__ movp(Operand(rdx, 0), rbx);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
}
if (IsFastObjectElementsKind(elements_kind())) {
// We know the elements array is in new space so we don't need the
// remembered set, but we just pushed a value onto it so we may have to tell
// the incremental marker to rescan the object that we just grew. We don't
// need to worry about the holes because they are in old space and already
// marked black.
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
}
// Restore receiver to rdx as finish sequence assumes it's here.
__ movp(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : left
......
......@@ -1593,247 +1593,6 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
const int argc = arguments().immediate();
StackArgumentsAccessor args(rsp, argc);
if (argc == 0) {
// Noop, return the length.
__ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
__ j(not_equal, &check_double);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc));
// Get the elements' length into rcx.
__ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ movp(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
__ movp(FieldOperand(rdi,
rax,
times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
rcx);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&check_double);
// Check that the elements are in double mode.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_double_array_map());
__ j(not_equal, &call_builtin);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc));
// Get the elements' length into rcx.
__ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmpl(rax, rcx);
__ j(greater, &call_builtin);
__ movp(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(equal, &call_builtin);
// rdx: receiver
// rbx: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
rbx,
rdi,
&try_holey_map);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
// Restore edi.
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
rbx,
rdi,
&call_builtin);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(rbx, &call_builtin);
}
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movp(Operand(rdx, 0), rcx);
__ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ jmp(&call_builtin);
}
__ movp(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(rbx, &no_fast_elements_check);
__ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top.
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand =
masm()->ExternalOperand(new_space_allocation_limit);
__ cmpq(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
__ Store(new_space_allocation_top, rcx);
// Push the argument...
__ movp(Operand(rdx, 0), rbx);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
}
// We know the elements array is in new space so we don't need the
// remembered set, but we just pushed a value onto it so we may have to
// tell the incremental marker to rescan the object that we just grew. We
// don't need to worry about the holes because they are in old space and
// already marked black.
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
__ movp(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ ret((argc + 1) * kPointerSize);
}
__ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
isolate()),
argc + 1,
1);
}
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
......
......@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Check pushes with various number of arguments.
(function() {
var a = [];
......@@ -113,3 +115,34 @@
assertEquals(i + 1, x.length, i + 'th iteration');
}
})();
(function() {
function f(a, i) {
a.push(i);
}
var a = [1,2,3];
a.f = function() { return 10; }
f(a, 4);
f(a, 5);
f(a, 6);
f(a, 7);
f(a, {});
assertEquals(10, a.f());
})();
(function() {
function f(a, i) {
a.push(i);
}
var a = [1,2,3];
a.f = function() { return 10; }
f(a, 4);
f(a, 5);
f(a, 6);
%OptimizeFunctionOnNextCall(f);
f(a, 7);
f(a, {});
assertEquals(10, a.f());
})();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment