Commit e1d9313c authored by palfia@homejinni.com's avatar palfia@homejinni.com

MIPS: Turn ArrayPush into a stub specialized on the elements kind and argc.

Port r18696 (6e4b51b4)

BUG=
R=gergely@homejinni.com

Review URL: https://codereview.chromium.org/143663002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18699 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3be45008
......@@ -4332,6 +4332,206 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
Register receiver = a0;
Register scratch = a1;
int argc = arguments_count();
if (argc == 0) {
// Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ DropAndRet(argc + 1);
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
Register elements = t2;
Register end_elements = t1;
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
}
// Get the array's length into scratch and calculate new length.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
// Check if value is a smi.
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
} else {
// Check if we could survive without allocation.
__ Branch(&call_builtin, gt, scratch, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
}
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&call_builtin, eq, t3, Operand(at));
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ lw(a2, FieldMemOperand(receiver, origin_offset));
__ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset));
__ Branch(&call_builtin, ne, a2, Operand(at));
const int target_offset = header_size + target_kind * kPointerSize;
__ lw(a3, FieldMemOperand(a3, target_offset));
__ mov(a2, receiver);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
}
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below, so write
// back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
__ RecordWrite(elements,
end_elements,
t0,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(a2, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top and check if it is the end of elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
__ li(t0, Operand(new_space_allocation_top));
__ lw(a3, MemOperand(t0));
__ Branch(&call_builtin, ne, a3, Operand(end_elements));
__ li(t3, Operand(new_space_allocation_limit));
__ lw(t3, MemOperand(t3));
__ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
__ Branch(&call_builtin, hi, a3, Operand(t3));
// We fit and could grow elements.
// Update new_space_allocation_top.
__ sw(a3, MemOperand(t0));
// Push the argument.
__ sw(a2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ sw(a3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : left
......
......@@ -1556,250 +1556,6 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
Register receiver = a0;
Register scratch = a1;
const int argc = arguments().immediate();
if (argc == 0) {
// Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ DropAndRet(argc + 1);
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = t2;
Register end_elements = t1;
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into scratch and calculate new length.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
// Check if value is a smi.
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
// Check for a smi.
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into scratch and calculate new length.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ Branch(&call_builtin, gt, scratch, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
t0, scratch, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&with_write_barrier);
__ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(a3, t3, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
__ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
// a3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a3,
t3,
&try_holey_map);
__ mov(a2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
a3,
t3,
&call_builtin);
__ mov(a2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
}
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
__ RecordWrite(elements,
end_elements,
t0,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
// t0: elements' length.
if (!FLAG_inline_new) {
__ Branch(&call_builtin);
}
__ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(a2, &no_fast_elements_check);
__ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(t3, t3, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
__ li(t3, Operand(new_space_allocation_top));
__ lw(a3, MemOperand(t3));
__ Branch(&call_builtin, ne, end_elements, Operand(a3));
__ li(t5, Operand(new_space_allocation_limit));
__ lw(t5, MemOperand(t5));
__ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
__ Branch(&call_builtin, hi, a3, Operand(t5));
// We fit and could grow elements.
// Update new_space_allocation_top.
__ sw(a3, MemOperand(t3));
// Push the argument.
__ sw(a2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ sw(a3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
}
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment