Commit d714d851 authored by yangguo@chromium.org's avatar yangguo@chromium.org

Convert fast smi-only to fast object in generated code for array push.

Review URL: https://chromiumcodereview.appspot.com/9365020

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10648 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 96baf602
...@@ -1475,7 +1475,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1475,7 +1475,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Ret(); __ Ret();
} else { } else {
Label call_builtin; Label call_builtin;
Register elements = r3;
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements;
Register elements = r6;
Register end_elements = r5; Register end_elements = r5;
// Get the elements array of the object. // Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
...@@ -1487,8 +1491,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1487,8 +1491,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin, &call_builtin,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length. // Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
...@@ -1496,7 +1498,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1496,7 +1498,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc))); __ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the element's length. // Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation. // Check if we could survive without allocation.
...@@ -1511,7 +1513,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1511,7 +1513,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Save new length. // Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element. // Store the value.
// We may need a register containing the address end_elements below, // We may need a register containing the address end_elements below,
// so write back the value in end_elements. // so write back the value in end_elements.
__ add(end_elements, elements, __ add(end_elements, elements,
...@@ -1526,13 +1528,33 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1526,13 +1528,33 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&with_write_barrier); __ bind(&with_write_barrier);
__ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r6, r6, &call_builtin);
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(r3, r7, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
}
// Save new length. // Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element. // Store the value.
// We may need a register containing the address end_elements below, // We may need a register containing the address end_elements below,
// so write back the value in end_elements. // so write back the value in end_elements.
__ add(end_elements, elements, __ add(end_elements, elements,
...@@ -1578,25 +1600,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1578,25 +1600,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(end_elements, end_elements, Operand(kEndElementsOffset)); __ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top)); __ mov(r7, Operand(new_space_allocation_top));
__ ldr(r6, MemOperand(r7)); __ ldr(r3, MemOperand(r7));
__ cmp(end_elements, r6); __ cmp(end_elements, r3);
__ b(ne, &call_builtin); __ b(ne, &call_builtin);
__ mov(r9, Operand(new_space_allocation_limit)); __ mov(r9, Operand(new_space_allocation_limit));
__ ldr(r9, MemOperand(r9)); __ ldr(r9, MemOperand(r9));
__ add(r6, r6, Operand(kAllocationDelta * kPointerSize)); __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
__ cmp(r6, r9); __ cmp(r3, r9);
__ b(hi, &call_builtin); __ b(hi, &call_builtin);
// We fit and could grow elements. // We fit and could grow elements.
// Update new_space_allocation_top. // Update new_space_allocation_top.
__ str(r6, MemOperand(r7)); __ str(r3, MemOperand(r7));
// Push the argument. // Push the argument.
__ str(r2, MemOperand(end_elements)); __ str(r2, MemOperand(end_elements));
// Fill the rest with holes. // Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex); __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) { for (int i = 1; i < kAllocationDelta; i++) {
__ str(r6, MemOperand(end_elements, i * kPointerSize)); __ str(r3, MemOperand(end_elements, i * kPointerSize));
} }
// Update elements' and array's sizes. // Update elements' and array's sizes.
......
...@@ -1345,25 +1345,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1345,25 +1345,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else { } else {
Label call_builtin; Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object. // Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable. // Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset), __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map())); Immediate(factory()->fixed_array_map()));
__ j(not_equal, &call_builtin); __ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
// Get the array's length into eax and calculate new length. // Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc))); __ add(eax, Immediate(Smi::FromInt(argc)));
// Get the element's length into ecx. // Get the elements' length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation. // Check if we could survive without allocation.
__ cmp(eax, ecx); __ cmp(eax, ecx);
...@@ -1376,29 +1376,52 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1376,29 +1376,52 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Save new length. // Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Push the element. // Store the value.
__ lea(edx, FieldOperand(ebx, __ mov(FieldOperand(edi,
eax, times_half_pointer_size, eax,
FixedArray::kHeaderSize - argc * kPointerSize)); times_half_pointer_size,
__ mov(Operand(edx, 0), ecx); FixedArray::kHeaderSize - argc * kPointerSize),
ecx);
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier); __ bind(&with_write_barrier);
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(edi, &call_builtin);
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(ebx, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&call_builtin);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(ebx, &call_builtin);
}
// Save new length. // Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Push the element. // Store the value.
__ lea(edx, FieldOperand(ebx, __ lea(edx, FieldOperand(edi,
eax, times_half_pointer_size, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize)); FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(Operand(edx, 0), ecx); __ mov(Operand(edx, 0), ecx);
__ RecordWrite(ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
...@@ -1408,11 +1431,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1408,11 +1431,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin); __ jmp(&call_builtin);
} }
__ mov(edi, Operand(esp, argc * kPointerSize)); __ mov(ebx, Operand(esp, argc * kPointerSize));
// Growing elements that are SMI-only requires special handling in case // Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin. // the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check; Label no_fast_elements_check;
__ JumpIfSmi(edi, &no_fast_elements_check); __ JumpIfSmi(ebx, &no_fast_elements_check);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(ecx, &call_builtin, Label::kFar); __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check); __ bind(&no_fast_elements_check);
...@@ -1431,7 +1454,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1431,7 +1454,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ mov(ecx, Operand::StaticVariable(new_space_allocation_top)); __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
// Check if it's the end of elements. // Check if it's the end of elements.
__ lea(edx, FieldOperand(ebx, __ lea(edx, FieldOperand(edi,
eax, times_half_pointer_size, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize)); FixedArray::kHeaderSize - argc * kPointerSize));
__ cmp(edx, ecx); __ cmp(edx, ecx);
...@@ -1444,7 +1467,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1444,7 +1467,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx); __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Push the argument... // Push the argument...
__ mov(Operand(edx, 0), edi); __ mov(Operand(edx, 0), ebx);
// ... and fill the rest with holes. // ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) { for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize), __ mov(Operand(edx, i * kPointerSize),
...@@ -1456,13 +1479,13 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1456,13 +1479,13 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// tell the incremental marker to rescan the object that we just grew. We // tell the incremental marker to rescan the object that we just grew. We
// don't need to worry about the holes because they are in old space and // don't need to worry about the holes because they are in old space and
// already marked black. // already marked black.
__ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET); __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to edx as finish sequence assumes it's here. // Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes. // Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset), __ add(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta))); Immediate(Smi::FromInt(kAllocationDelta)));
// NOTE: This only happen in new-space, where we don't // NOTE: This only happen in new-space, where we don't
......
...@@ -1331,24 +1331,24 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1331,24 +1331,24 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else { } else {
Label call_builtin; Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object. // Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset)); __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable. // Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map()); factory()->fixed_array_map());
__ j(not_equal, &call_builtin); __ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
// Get the array's length into rax and calculate new length. // Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset)); __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue); STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc)); __ addl(rax, Immediate(argc));
// Get the element's length into rcx. // Get the elements' length into rcx.
__ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset)); __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation. // Check if we could survive without allocation.
__ cmpl(rax, rcx); __ cmpl(rax, rcx);
...@@ -1361,30 +1361,52 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1361,30 +1361,52 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Save new length. // Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element. // Store the value.
__ lea(rdx, FieldOperand(rbx, __ movq(FieldOperand(rdi,
rax, times_pointer_size, rax,
FixedArray::kHeaderSize - argc * kPointerSize)); times_pointer_size,
__ movq(Operand(rdx, 0), rcx); FixedArray::kHeaderSize - argc * kPointerSize),
rcx);
__ Integer32ToSmi(rax, rax); // Return new length as smi. __ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier); __ bind(&with_write_barrier);
__ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ CheckFastObjectElements(rdi, &call_builtin);
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(rbx, &call_builtin);
// rdx: receiver
// rbx: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
rbx,
r10,
&call_builtin);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(rbx, &call_builtin);
}
__ CheckFastObjectElements(rbx, &call_builtin);
// Save new length. // Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element. // Store the value.
__ lea(rdx, FieldOperand(rbx, __ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size, rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize)); FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx); __ movq(Operand(rdx, 0), rcx);
__ RecordWrite(rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ Integer32ToSmi(rax, rax); // Return new length as smi. __ Integer32ToSmi(rax, rax); // Return new length as smi.
...@@ -1395,11 +1417,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1395,11 +1417,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin); __ jmp(&call_builtin);
} }
__ movq(rdi, Operand(rsp, argc * kPointerSize)); __ movq(rbx, Operand(rsp, argc * kPointerSize));
// Growing elements that are SMI-only requires special handling in case // Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin. // the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check; Label no_fast_elements_check;
__ JumpIfSmi(rdi, &no_fast_elements_check); __ JumpIfSmi(rbx, &no_fast_elements_check);
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ CheckFastObjectElements(rcx, &call_builtin, Label::kFar); __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check); __ bind(&no_fast_elements_check);
...@@ -1414,7 +1436,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1414,7 +1436,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Load(rcx, new_space_allocation_top); __ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements. // Check if it's the end of elements.
__ lea(rdx, FieldOperand(rbx, __ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size, rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize)); FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx); __ cmpq(rdx, rcx);
...@@ -1429,7 +1451,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1429,7 +1451,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Store(new_space_allocation_top, rcx); __ Store(new_space_allocation_top, rcx);
// Push the argument... // Push the argument...
__ movq(Operand(rdx, 0), rdi); __ movq(Operand(rdx, 0), rbx);
// ... and fill the rest with holes. // ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) { for (int i = 1; i < kAllocationDelta; i++) {
...@@ -1441,13 +1463,13 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( ...@@ -1441,13 +1463,13 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// tell the incremental marker to rescan the object that we just grew. We // tell the incremental marker to rescan the object that we just grew. We
// don't need to worry about the holes because they are in old space and // don't need to worry about the holes because they are in old space and
// already marked black. // already marked black.
__ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET); __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here. // Restore receiver to rdx as finish sequence assumes it's here.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes. // Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset), __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta)); Smi::FromInt(kAllocationDelta));
// Make new length a smi before returning it. // Make new length a smi before returning it.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment