Use register parameters in ElementsTransitionGenerator.

R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/389283002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22384 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 6466ff39
......@@ -374,26 +374,28 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
Register scratch_elements = r4;
ASSERT(!AreAliased(receiver, key, value, target_map,
scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
__ JumpIfJSArrayHasAllocationMemento(
receiver, scratch_elements, allocation_memento_found);
}
// Set transitioned map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
target_map,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
......@@ -403,87 +405,103 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = r4;
Register length = r5;
Register array = r6;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r9;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, length, array, scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r5: number of elements (smi-tagged)
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
r9,
target_map,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag));
__ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
__ add(scratch1, array, Operand(kHeapObjectTag));
__ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
r3,
r9,
scratch1,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r9: begin of FixedDoubleArray element fields, not tagged
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_end, scratch2, Operand(length, LSL, 2));
// Repurpose registers no longer in use.
Register hole_lower = elements;
Register hole_upper = length;
__ mov(hole_lower, Operand(kHoleNanLower32));
__ mov(hole_upper, Operand(kHoleNanUpper32));
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
__ bind(&only_change_map);
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
r9,
target_map,
scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
......@@ -497,15 +515,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
__ ldr(lr, MemOperand(r3, 4, PostIndex));
__ ldr(lr, MemOperand(scratch1, 4, PostIndex));
// lr: current element
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
__ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r9, 0);
__ add(r9, r9, Operand(8));
__ vstr(d0, scratch2, 0);
__ add(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
......@@ -517,10 +535,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
__ bind(&entry);
__ cmp(r9, r6);
__ cmp(scratch2, array_end);
__ b(lt, &loop);
__ pop(lr);
......@@ -529,80 +547,104 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label entry, loop, convert_hole, gc_required, only_change_map;
Register elements = r4;
Register array = r6;
Register length = r5;
Register scratch = r9;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, array, length, scratch));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
// r5: number of elements (smi-tagged)
__ Push(target_map, receiver, key, value);
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// elements: source FixedDoubleArray
// length: number of elements (smi-tagged)
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
__ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Re-use value and target_map registers, as they have been saved on the
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
__ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ str(scratch, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
// r9: heap number map
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ add(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(array, array, Operand(kHeapObjectTag));
__ add(dst_end, dst_elements, Operand(length, LSL, 1));
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
// dst_elements: begin of destination FixedArray element fields, not tagged
// src_elements: begin of source FixedDoubleArray element fields,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// heap_number_map: heap number map
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(r3, r2, r1, r0);
__ Pop(target_map, receiver, key, value);
__ pop(lr);
__ b(fail);
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
// r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
Register upper_bits = key;
__ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ cmp(upper_bits, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
// r2: new heap number
__ ldr(r0, MemOperand(r4, 12, NegOffset));
__ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
__ mov(r0, r3);
__ str(r2, MemOperand(r3, 4, PostIndex));
__ RecordWrite(r6,
r0,
r2,
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
&gc_required);
// heap_number: new heap number
__ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
__ Strd(scratch2, upper_bits,
FieldMemOperand(heap_number, HeapNumber::kValueOffset));
__ mov(scratch2, dst_elements);
__ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
__ RecordWrite(array,
scratch2,
heap_number,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
......@@ -611,20 +653,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, MemOperand(r3, 4, PostIndex));
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
__ cmp(dst_elements, dst_end);
__ b(lt, &loop);
__ Pop(r3, r2, r1, r0);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
__ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
r6,
r9,
array,
scratch,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
......@@ -633,11 +675,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&only_change_map);
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
r9,
target_map,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
......
......@@ -974,10 +974,10 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
......@@ -988,10 +988,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
......@@ -1004,9 +1003,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
......
......@@ -4050,9 +4050,12 @@ bool AreAliased(Register reg1,
Register reg3,
Register reg4,
Register reg5,
Register reg6) {
Register reg6,
Register reg7,
Register reg8) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
......@@ -4061,6 +4064,8 @@ bool AreAliased(Register reg1,
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
......
......@@ -58,7 +58,9 @@ bool AreAliased(Register reg1,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
Register reg6 = no_reg,
Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
......
......@@ -102,14 +102,16 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- x2 : receiver
// -- x3 : target map
// -----------------------------------
Register receiver = x2;
Register map = x3;
ASM_LOCATION(
"ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
ASSERT(!AreAliased(receiver, key, value, target_map));
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
......@@ -118,10 +120,10 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
// Set transitioned map.
__ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
map,
target_map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
......@@ -131,19 +133,25 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
// ----------- S t a t e -------------
// -- lr : return address
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- x3 : target map, scratch for subsequent call
// -----------------------------------
Register receiver = x2;
Register target_map = x3;
Label gc_required, only_change_map;
Register elements = x4;
Register length = x5;
Register array_size = x6;
Register array = x7;
Register scratch = x6;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, length, array_size, array));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
......@@ -151,32 +159,28 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
Register array_size = x6;
Register array = x7;
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = x6;
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
......@@ -184,7 +188,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
x6, kLRHasBeenSaved, kDontSaveFPRegs,
scratch, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare for conversion loop.
......@@ -203,7 +207,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
......@@ -235,20 +239,22 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
// -- x3 : target map, scratch for subsequent call
// -- x4 : scratch (elements)
// -----------------------------------
Register value = x0;
Register key = x1;
Register receiver = x2;
Register target_map = x3;
Register elements = x4;
Register array_size = x6;
Register array = x7;
Register length = x5;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, array_size, array, length));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
......@@ -257,7 +263,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
......@@ -265,20 +271,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Register array_size = x6;
Register array = x7;
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = x6;
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
......@@ -316,8 +318,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = x5;
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
Register heap_num = length;
Register scratch = array_size;
Register scratch2 = elements;
__ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
x13, heap_num_map);
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
......
......@@ -1013,10 +1013,10 @@ static void KeyedStoreGenerateGenericHelper(
x10,
x11,
slow);
ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, receiver_map, mode, slow);
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&fast_double_without_map_check);
......@@ -1028,10 +1028,11 @@ static void KeyedStoreGenerateGenericHelper(
x10,
x11,
slow);
ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&finish_store);
......@@ -1045,9 +1046,9 @@ static void KeyedStoreGenerateGenericHelper(
x10,
x11,
slow);
ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&finish_store);
}
......
......@@ -117,15 +117,30 @@ class ElementsTransitionGenerator : public AllStatic {
public:
// If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
// |allocation_memento_found| may be NULL.
static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
static void GenerateMapChangeElementsTransition(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found);
static void GenerateSmiToDouble(MacroAssembler* masm,
AllocationSiteMode mode,
Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm,
AllocationSiteMode mode,
Label* fail);
static void GenerateSmiToDouble(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail);
static void GenerateDoubleToObject(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail);
private:
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
......
......@@ -522,26 +522,28 @@ MemMoveFunction CreateMemMoveFunction() {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Register scratch = edi;
ASSERT(!AreAliased(receiver, key, value, target_map, scratch));
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
__ JumpIfJSArrayHasAllocationMemento(
receiver, scratch, allocation_memento_found);
}
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
__ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
ebx,
edi,
target_map,
scratch,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
......@@ -549,14 +551,19 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Return address is on the stack.
ASSERT(receiver.is(edx));
ASSERT(key.is(ecx));
ASSERT(value.is(eax));
ASSERT(target_map.is(ebx));
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
......@@ -670,14 +677,19 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Return address is on the stack.
ASSERT(receiver.is(edx));
ASSERT(key.is(ecx));
ASSERT(value.is(eax));
ASSERT(target_map.is(ebx));
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
......
......@@ -804,7 +804,8 @@ static void KeyedStoreGenerateGenericHelper(
slow);
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
......@@ -816,8 +817,8 @@ static void KeyedStoreGenerateGenericHelper(
edi,
slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
......@@ -832,7 +833,8 @@ static void KeyedStoreGenerateGenericHelper(
edi,
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
......
......@@ -3043,15 +3043,33 @@ void MacroAssembler::CallCFunction(Register function,
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
if (r1.is(r4)) return true;
if (r2.is(r3)) return true;
if (r2.is(r4)) return true;
if (r3.is(r4)) return true;
return false;
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register reg6,
Register reg7,
Register reg8) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
#endif
CodePatcher::CodePatcher(byte* address, int size)
......
......@@ -30,7 +30,16 @@ enum RegisterValueType {
};
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg,
Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
// MacroAssembler implements a collection of frequently used macros.
......
......@@ -184,26 +184,29 @@ ModuloFunction CreateModuloFunction() {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
// Return address is on the stack.
Register scratch = rdi;
ASSERT(!AreAliased(receiver, key, value, target_map, scratch));
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
__ JumpIfJSArrayHasAllocationMemento(
receiver, scratch, allocation_memento_found);
}
// Set transitioned map.
__ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
__ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
rbx,
rdi,
target_map,
scratch,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
......@@ -211,14 +214,19 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Return address is on the stack.
ASSERT(receiver.is(rdx));
ASSERT(key.is(rcx));
ASSERT(value.is(rax));
ASSERT(target_map.is(rbx));
// The fail label is not actually used since we do not allocate.
Label allocated, new_backing_store, only_change_map, done;
......@@ -345,14 +353,19 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Return address is on the stack.
ASSERT(receiver.is(rdx));
ASSERT(key.is(rcx));
ASSERT(value.is(rax));
ASSERT(target_map.is(rbx));
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
......
......@@ -673,7 +673,8 @@ static void KeyedStoreGenerateGenericHelper(
slow);
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, rbx, mode, slow);
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
......@@ -685,8 +686,8 @@ static void KeyedStoreGenerateGenericHelper(
rdi,
slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, rbx, mode, slow);
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
......@@ -701,7 +702,8 @@ static void KeyedStoreGenerateGenericHelper(
rdi,
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, rbx, mode, slow);
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
......
......@@ -5020,15 +5020,33 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
if (r1.is(r4)) return true;
if (r2.is(r3)) return true;
if (r2.is(r4)) return true;
if (r3.is(r4)) return true;
return false;
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register reg6,
Register reg7,
Register reg8) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
#endif
CodePatcher::CodePatcher(byte* address, int size)
......
......@@ -50,7 +50,16 @@ class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
: EnumSet<SmiOperationConstraint, byte>(bits) { }
};
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg,
Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
// Forward declaration.
class JumpTarget;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment