Commit d6fcb81a authored by akos.palfi@imgtec.com's avatar akos.palfi@imgtec.com

MIPS: Use register parameters in ElementsTransitionGenerator.

Port r22384 (52caca20)

BUG=
R=mvstanton@chromium.org

Review URL: https://codereview.chromium.org/393693003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22396 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 23887bf9
...@@ -655,26 +655,28 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { ...@@ -655,26 +655,28 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode, MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) { Label* allocation_memento_found) {
// ----------- S t a t e ------------- Register scratch_elements = t0;
// -- a0 : value ASSERT(!AreAliased(receiver, key, value, target_map,
// -- a1 : key scratch_elements));
// -- a2 : receiver
// -- ra : return address
// -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements)
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL); ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found); __ JumpIfJSArrayHasAllocationMemento(
receiver, scratch_elements, allocation_memento_found);
} }
// Set transitioned map. // Set transitioned map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t5, t5,
kRAHasNotBeenSaved, kRAHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
...@@ -684,62 +686,74 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( ...@@ -684,62 +686,74 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { MacroAssembler* masm,
// ----------- S t a t e ------------- Register receiver,
// -- a0 : value Register key,
// -- a1 : key Register value,
// -- a2 : receiver Register target_map,
// -- ra : return address AllocationSiteMode mode,
// -- a3 : target map, scratch for subsequent call Label* fail) {
// -- t0 : scratch (elements) // Register ra contains the return address.
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done; Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = t0;
Register length = t1;
Register array = t2;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = t5;
Register scratch3 = t3;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, length, array, scratch2));
Register scratch = t6; Register scratch = t6;
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
} }
// Check for empty arrays, which only require a map transition and no changes // Check for empty arrays, which only require a map transition and no changes
// to the backing store. // to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(t0)); __ Branch(&only_change_map, eq, at, Operand(elements));
__ push(ra); __ push(ra);
__ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// t0: source FixedArray // elements: source FixedArray
// t1: number of elements (smi-tagged) // length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray. // Allocate new FixedDoubleArray.
__ sll(scratch, t1, 2); __ sll(scratch, length, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT); __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// t2: destination FixedDoubleArray, not tagged as heap object // array: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex); __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
// Update receiver's map. // Update receiver's map.
__ sw(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t5, scratch2,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray. // Replace receiver's backing store with newly created FixedDoubleArray.
__ Addu(a3, t2, Operand(kHeapObjectTag)); __ Addu(scratch1, array, Operand(kHeapObjectTag));
__ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset)); __ sw(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
JSObject::kElementsOffset, JSObject::kElementsOffset,
a3, scratch1,
t5, scratch2,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
...@@ -747,25 +761,31 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -747,25 +761,31 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop. // Prepare for conversion loop.
__ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(scratch1, elements,
__ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(t2, t1, 2); __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
__ Addu(t2, t2, t3); __ sll(at, length, 2);
__ li(t0, Operand(kHoleNanLower32)); __ Addu(array_end, scratch3, at);
__ li(t1, Operand(kHoleNanUpper32));
// t0: kHoleNanLower32 // Repurpose registers no longer in use.
// t1: kHoleNanUpper32 Register hole_lower = elements;
// t2: end of destination FixedDoubleArray, not tagged Register hole_upper = length;
// t3: begin of FixedDoubleArray element fields, not tagged
__ li(hole_lower, Operand(kHoleNanLower32));
__ Branch(&entry); // scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch3: begin of FixedDoubleArray element fields, not tagged
__ Branch(USE_DELAY_SLOT, &entry);
__ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot.
__ bind(&only_change_map); __ bind(&only_change_map);
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t5, scratch2,
kRAHasNotBeenSaved, kRAHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
...@@ -774,39 +794,40 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -774,39 +794,40 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ Branch(USE_DELAY_SLOT, fail);
__ pop(ra); __ pop(ra);
__ Branch(fail);
// Convert and copy elements. // Convert and copy elements.
__ bind(&loop); __ bind(&loop);
__ lw(t5, MemOperand(a3)); __ lw(scratch2, MemOperand(scratch1));
__ Addu(a3, a3, kIntSize); __ Addu(scratch1, scratch1, kIntSize);
// t5: current element // scratch2: current element
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole); __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
// Normal smi, convert to double and store. // Normal smi, convert to double and store.
__ mtc1(t5, f0); __ mtc1(scratch2, f0);
__ cvt_d_w(f0, f0); __ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(t3)); __ sdc1(f0, MemOperand(scratch3));
__ Addu(t3, t3, kDoubleSize); __ Branch(USE_DELAY_SLOT, &entry);
__ addiu(scratch3, scratch3, kDoubleSize); // In delay slot.
__ Branch(&entry);
// Hole found, store the-hole NaN. // Hole found, store the-hole NaN.
__ bind(&convert_hole); __ bind(&convert_hole);
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object. // Restore a "smi-untagged" heap object.
__ SmiTag(t5); __ SmiTag(scratch2);
__ Or(t5, t5, Operand(1)); __ Or(scratch2, scratch2, Operand(1));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5)); __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
} }
__ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa // mantissa
__ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
__ Addu(t3, t3, kDoubleSize); // exponent
__ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
__ bind(&entry); __ bind(&entry);
__ Branch(&loop, lt, t3, Operand(t2)); __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot.
__ Branch(&loop, lt, scratch3, Operand(array_end));
__ pop(ra); __ pop(ra);
__ bind(&done); __ bind(&done);
...@@ -814,90 +835,112 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -814,90 +835,112 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject( void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { MacroAssembler* masm,
// ----------- S t a t e ------------- Register receiver,
// -- a0 : value Register key,
// -- a1 : key Register value,
// -- a2 : receiver Register target_map,
// -- ra : return address AllocationSiteMode mode,
// -- a3 : target map, scratch for subsequent call Label* fail) {
// -- t0 : scratch (elements) // Register ra contains the return address.
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map; Label entry, loop, convert_hole, gc_required, only_change_map;
Register elements = t0;
Register array = t2;
Register length = t1;
Register scratch = t5;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, array, length, scratch));
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
} }
// Check for empty arrays, which only require a map transition and no changes // Check for empty arrays, which only require a map transition and no changes
// to the backing store. // to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(t0)); __ Branch(&only_change_map, eq, at, Operand(elements));
__ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); __ MultiPush(
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
__ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// t0: source FixedArray // elements: source FixedArray
// t1: number of elements (smi-tagged) // length: number of elements (smi-tagged)
// Allocate new FixedArray. // Allocate new FixedArray.
__ sll(a0, t1, 1); // Re-use value and target_map registers, as they have been saved on the
__ Addu(a0, a0, FixedDoubleArray::kHeaderSize); // stack.
__ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); Register array_size = value;
// t2: destination FixedArray, not tagged as heap object Register allocate_scratch = target_map;
__ sll(array_size, length, 1);
__ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedArrayMapRootIndex); __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); __ sw(scratch, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop. // Prepare for conversion loop.
__ Addu(t0, t0, Operand( Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ Addu(src_elements, src_elements, Operand(
FixedDoubleArray::kHeaderSize - kHeapObjectTag FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ Register::kExponentOffset)); + Register::kExponentOffset));
__ Addu(a3, t2, Operand(FixedArray::kHeaderSize)); __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ Addu(t2, t2, Operand(kHeapObjectTag)); __ Addu(array, array, Operand(kHeapObjectTag));
__ sll(t1, t1, 1); __ sll(dst_end, dst_end, 1);
__ Addu(t1, a3, t1); __ Addu(dst_end, dst_elements, dst_end);
__ LoadRoot(t3, Heap::kTheHoleValueRootIndex); __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses. // Using offsetted addresses.
// a3: begin of destination FixedArray element fields, not tagged // dst_elements: begin of destination FixedArray element fields, not tagged
// t0: begin of source FixedDoubleArray element fields, not tagged, // src_elements: begin of source FixedDoubleArray element fields, not tagged,
// points to the exponent // points to the exponent
// t1: end of destination FixedArray, not tagged // dst_end: end of destination FixedArray, not tagged
// t2: destination FixedArray // array: destination FixedArray
// t3: the-hole pointer // heap_number_map: heap number map
// t5: heap number map
__ Branch(&entry); __ Branch(&entry);
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); __ MultiPop(
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
__ Branch(fail); __ Branch(fail);
__ bind(&loop); __ bind(&loop);
__ lw(a1, MemOperand(t0)); Register upper_bits = key;
__ Addu(t0, t0, kDoubleSize); __ lw(upper_bits, MemOperand(src_elements));
// a1: current element's upper 32 bit __ Addu(src_elements, src_elements, kDoubleSize);
// t0: address of next element's upper 32 bit // upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
// Non-hole double, copy value into a heap number. // Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(a2, a0, t6, t5, &gc_required); Register heap_number = receiver;
// a2: new heap number Register scratch2 = value;
// Load mantissa of current element, t0 point to exponent of next element. Register scratch3 = t6;
__ lw(a0, MemOperand(t0, (Register::kMantissaOffset __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
&gc_required);
// heap_number: new heap number
// Load mantissa of current element, src_elements
// point to exponent of next element.
__ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
- Register::kExponentOffset - kDoubleSize))); - Register::kExponentOffset - kDoubleSize)));
__ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
__ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
__ mov(a0, a3); __ mov(scratch2, dst_elements);
__ sw(a2, MemOperand(a3)); __ sw(heap_number, MemOperand(dst_elements));
__ Addu(a3, a3, kIntSize); __ Addu(dst_elements, dst_elements, kIntSize);
__ RecordWrite(t2, __ RecordWrite(array,
a0, scratch2,
a2, heap_number,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
...@@ -906,19 +949,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -906,19 +949,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer. // Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole); __ bind(&convert_hole);
__ sw(t3, MemOperand(a3)); __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ Addu(a3, a3, kIntSize); __ sw(scratch2, MemOperand(dst_elements));
__ Addu(dst_elements, dst_elements, kIntSize);
__ bind(&entry); __ bind(&entry);
__ Branch(&loop, lt, a3, Operand(t1)); __ Branch(&loop, lt, dst_elements, Operand(dst_end));
__ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
// Replace receiver's backing store with newly created and filled FixedArray. // Replace receiver's backing store with newly created and filled FixedArray.
__ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
JSObject::kElementsOffset, JSObject::kElementsOffset,
t2, array,
t5, scratch,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
...@@ -927,11 +971,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -927,11 +971,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&only_change_map); __ bind(&only_change_map);
// Update receiver's map. // Update receiver's map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t5, scratch,
kRAHasNotBeenSaved, kRAHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
......
...@@ -927,10 +927,10 @@ static void KeyedStoreGenerateGenericHelper( ...@@ -927,10 +927,10 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map, receiver_map,
t0, t0,
slow); slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS); FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, receiver_map, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check); __ jmp(&fast_double_without_map_check);
...@@ -941,10 +941,9 @@ static void KeyedStoreGenerateGenericHelper( ...@@ -941,10 +941,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map, receiver_map,
t0, t0,
slow); slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
slow); masm, receiver, key, value, receiver_map, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
...@@ -957,9 +956,9 @@ static void KeyedStoreGenerateGenericHelper( ...@@ -957,9 +956,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map, receiver_map,
t0, t0,
slow); slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
} }
......
...@@ -5674,14 +5674,30 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( ...@@ -5674,14 +5674,30 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
} }
bool AreAliased(Register r1, Register r2, Register r3, Register r4) { bool AreAliased(Register reg1,
if (r1.is(r2)) return true; Register reg2,
if (r1.is(r3)) return true; Register reg3,
if (r1.is(r4)) return true; Register reg4,
if (r2.is(r3)) return true; Register reg5,
if (r2.is(r4)) return true; Register reg6,
if (r3.is(r4)) return true; Register reg7,
return false; Register reg8) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
} }
......
...@@ -84,7 +84,14 @@ Register GetRegisterThatIsNotOneOf(Register reg1, ...@@ -84,7 +84,14 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg); Register reg6 = no_reg);
bool AreAliased(Register r1, Register r2, Register r3, Register r4); bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg,
Register reg7 = no_reg,
Register reg8 = no_reg);
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
......
...@@ -551,26 +551,27 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { ...@@ -551,26 +551,27 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode, MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) { Label* allocation_memento_found) {
// ----------- S t a t e ------------- Register scratch_elements = a4;
// -- a0 : value ASSERT(!AreAliased(receiver, key, value, target_map,
// -- a1 : key scratch_elements));
// -- a2 : receiver
// -- ra : return address
// -- a3 : target map, scratch for subsequent call
// -- a4 : scratch (elements)
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL); __ JumpIfJSArrayHasAllocationMemento(
__ JumpIfJSArrayHasAllocationMemento(a2, a4, allocation_memento_found); receiver, scratch_elements, allocation_memento_found);
} }
// Set transitioned map. // Set transitioned map.
__ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(a2,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t1, t1,
kRAHasNotBeenSaved, kRAHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
...@@ -580,61 +581,73 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( ...@@ -580,61 +581,73 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { MacroAssembler* masm,
// ----------- S t a t e ------------- Register receiver,
// -- a0 : value Register key,
// -- a1 : key Register value,
// -- a2 : receiver Register target_map,
// -- ra : return address AllocationSiteMode mode,
// -- a3 : target map, scratch for subsequent call Label* fail) {
// -- a4 : scratch (elements) // Register ra contains the return address.
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done; Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = a4;
Register length = a5;
Register array = a6;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = t1;
Register scratch3 = a7;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, length, array, scratch2));
Register scratch = t2; Register scratch = t2;
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(a2, a4, fail); __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
} }
// Check for empty arrays, which only require a map transition and no changes // Check for empty arrays, which only require a map transition and no changes
// to the backing store. // to the backing store.
__ ld(a4, FieldMemOperand(a2, JSObject::kElementsOffset)); __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(a4)); __ Branch(&only_change_map, eq, at, Operand(elements));
__ push(ra); __ push(ra);
__ ld(a5, FieldMemOperand(a4, FixedArray::kLengthOffset)); __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// a4: source FixedArray // elements: source FixedArray
// a5: number of elements (smi-tagged) // length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray. // Allocate new FixedDoubleArray.
__ SmiScale(scratch, a5, kDoubleSizeLog2); __ SmiScale(scratch, length, kDoubleSizeLog2);
__ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize); __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ Allocate(scratch, a6, a7, t1, &gc_required, DOUBLE_ALIGNMENT); __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// a6: destination FixedDoubleArray, not tagged as heap object // array: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t1, Heap::kFixedDoubleArrayMapRootIndex); __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ sd(a5, MemOperand(a6, FixedDoubleArray::kLengthOffset)); __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ sd(t1, MemOperand(a6, HeapObject::kMapOffset));
// Update receiver's map. // Update receiver's map.
__ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t1, scratch2,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray. // Replace receiver's backing store with newly created FixedDoubleArray.
__ Daddu(a3, a6, Operand(kHeapObjectTag)); __ Daddu(scratch1, array, Operand(kHeapObjectTag));
__ sd(a3, FieldMemOperand(a2, JSObject::kElementsOffset)); __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
JSObject::kElementsOffset, JSObject::kElementsOffset,
a3, scratch1,
t1, scratch2,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
...@@ -642,25 +655,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -642,25 +655,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop. // Prepare for conversion loop.
__ Daddu(a3, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Daddu(scratch1, elements,
__ Daddu(a7, a6, Operand(FixedDoubleArray::kHeaderSize)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiScale(a6, a5, kDoubleSizeLog2); __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
__ Daddu(a6, a6, a7); __ SmiScale(array_end, length, kDoubleSizeLog2);
__ li(a4, Operand(kHoleNanLower32)); __ Daddu(array_end, array_end, scratch3);
__ li(a5, Operand(kHoleNanUpper32));
// a4: kHoleNanLower32 // Repurpose registers no longer in use.
// a5: kHoleNanUpper32 Register hole_lower = elements;
// a6: end of destination FixedDoubleArray, not tagged Register hole_upper = length;
// a7: begin of FixedDoubleArray element fields, not tagged __ li(hole_lower, Operand(kHoleNanLower32));
__ li(hole_upper, Operand(kHoleNanUpper32));
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch3: begin of FixedDoubleArray element fields, not tagged
__ Branch(&entry); __ Branch(&entry);
__ bind(&only_change_map); __ bind(&only_change_map);
__ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t1, scratch2,
kRAHasNotBeenSaved, kRAHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
...@@ -674,17 +692,17 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -674,17 +692,17 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements. // Convert and copy elements.
__ bind(&loop); __ bind(&loop);
__ ld(t1, MemOperand(a3)); __ ld(scratch2, MemOperand(scratch1));
__ Daddu(a3, a3, kIntSize); __ Daddu(scratch1, scratch1, kIntSize);
// t1: current element // scratch2: current element
__ JumpIfNotSmi(t1, &convert_hole); __ JumpIfNotSmi(scratch2, &convert_hole);
__ SmiUntag(t1); __ SmiUntag(scratch2);
// Normal smi, convert to double and store. // Normal smi, convert to double and store.
__ mtc1(t1, f0); __ mtc1(scratch2, f0);
__ cvt_d_w(f0, f0); __ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(a7)); __ sdc1(f0, MemOperand(scratch3));
__ Daddu(a7, a7, kDoubleSize); __ Daddu(scratch3, scratch3, kDoubleSize);
__ Branch(&entry); __ Branch(&entry);
...@@ -692,16 +710,18 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -692,16 +710,18 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ bind(&convert_hole); __ bind(&convert_hole);
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object. // Restore a "smi-untagged" heap object.
__ Or(t1, t1, Operand(1)); __ Or(scratch2, scratch2, Operand(1));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t1)); __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
} }
__ sw(a4, MemOperand(a7)); // mantissa // mantissa
__ sw(a5, MemOperand(a7, kIntSize)); // exponent __ sw(hole_lower, MemOperand(scratch3));
__ Daddu(a7, a7, kDoubleSize); // exponent
__ sw(hole_upper, MemOperand(scratch3, kIntSize));
__ Daddu(scratch3, scratch3, kDoubleSize);
__ bind(&entry); __ bind(&entry);
__ Branch(&loop, lt, a7, Operand(a6)); __ Branch(&loop, lt, scratch3, Operand(array_end));
__ pop(ra); __ pop(ra);
__ bind(&done); __ bind(&done);
...@@ -709,84 +729,109 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -709,84 +729,109 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject( void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { MacroAssembler* masm,
// ----------- S t a t e ------------- Register receiver,
// -- a0 : value Register key,
// -- a1 : key Register value,
// -- a2 : receiver Register target_map,
// -- ra : return address AllocationSiteMode mode,
// -- a3 : target map, scratch for subsequent call Label* fail) {
// -- a4 : scratch (elements) // Register ra contains the return address.
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map; Label entry, loop, convert_hole, gc_required, only_change_map;
Register elements = a4;
Register array = a6;
Register length = a5;
Register scratch = t1;
// Verify input registers don't conflict with locals.
ASSERT(!AreAliased(receiver, key, value, target_map,
elements, array, length, scratch));
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(a2, a4, fail); __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
} }
// Check for empty arrays, which only require a map transition and no changes // Check for empty arrays, which only require a map transition and no changes
// to the backing store. // to the backing store.
__ ld(a4, FieldMemOperand(a2, JSObject::kElementsOffset)); __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(a4)); __ Branch(&only_change_map, eq, at, Operand(elements));
__ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); __ MultiPush(
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
__ ld(a5, FieldMemOperand(a4, FixedArray::kLengthOffset)); __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// a4: source FixedArray // elements: source FixedArray
// a5: number of elements (smi-tagged) // length: number of elements (smi-tagged)
// Allocate new FixedArray. // Allocate new FixedArray.
__ SmiScale(a0, a5, kPointerSizeLog2); // Re-use value and target_map registers, as they have been saved on the
__ Daddu(a0, a0, FixedDoubleArray::kHeaderSize); // stack.
__ Allocate(a0, a6, a7, t1, &gc_required, NO_ALLOCATION_FLAGS); Register array_size = value;
// a6: destination FixedArray, not tagged as heap object Register allocate_scratch = target_map;
__ SmiScale(array_size, length, kPointerSizeLog2);
__ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map. // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ sd(a5, MemOperand(a6, FixedDoubleArray::kLengthOffset)); __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ sd(t1, MemOperand(a6, HeapObject::kMapOffset)); __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop. // Prepare for conversion loop.
__ Daddu(a4, a4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); Register src_elements = elements;
__ Daddu(a3, a6, Operand(FixedArray::kHeaderSize)); Register dst_elements = target_map;
__ Daddu(a6, a6, Operand(kHeapObjectTag)); Register dst_end = length;
__ SmiScale(a5, a5, kPointerSizeLog2); Register heap_number_map = scratch;
__ Daddu(a5, a3, a5); __ Daddu(src_elements, src_elements,
__ LoadRoot(a7, Heap::kTheHoleValueRootIndex); Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ Daddu(array, array, Operand(kHeapObjectTag));
__ SmiScale(dst_end, dst_end, kPointerSizeLog2);
__ Daddu(dst_end, dst_elements, dst_end);
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses. // Using offsetted addresses.
// a3: begin of destination FixedArray element fields, not tagged // dst_elements: begin of destination FixedArray element fields, not tagged
// a4: begin of source FixedDoubleArray element fields, not tagged, +4 // src_elements: begin of source FixedDoubleArray element fields, not tagged,
// a5: end of destination FixedArray, not tagged // points to the exponent
// a6: destination FixedArray // dst_end: end of destination FixedArray, not tagged
// a7: the-hole pointer // array: destination FixedArray
// t1: heap number map // heap_number_map: heap number map
__ Branch(&entry); __ Branch(&entry);
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); __ MultiPop(
value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
__ Branch(fail); __ Branch(fail);
__ bind(&loop); __ bind(&loop);
__ lw(a1, MemOperand(a4)); Register upper_bits = key;
__ Daddu(a4, a4, kDoubleSize); __ lw(upper_bits, MemOperand(src_elements));
// a1: current element's upper 32 bit __ Daddu(src_elements, src_elements, kDoubleSize);
// a4: address of next element's upper 32 bit // upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
// Non-hole double, copy value into a heap number. // Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(a2, a0, t2, t1, &gc_required); Register heap_number = receiver;
// a2: new heap number Register scratch2 = value;
__ lw(a0, MemOperand(a4, -12)); Register scratch3 = t2;
__ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
__ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); &gc_required);
__ mov(a0, a3); // heap_number: new heap number
__ sd(a2, MemOperand(a3)); // Load mantissa of current element, src_elements
__ Daddu(a3, a3, kPointerSize); // point to exponent of next element.
__ RecordWrite(a6, __ lw(scratch2, MemOperand(heap_number, -12));
a0, __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
a2, __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
__ mov(scratch2, dst_elements);
__ sd(heap_number, MemOperand(dst_elements));
__ Daddu(dst_elements, dst_elements, kPointerSize);
__ RecordWrite(array,
scratch2,
heap_number,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
...@@ -795,19 +840,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -795,19 +840,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer. // Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole); __ bind(&convert_hole);
__ sd(a7, MemOperand(a3)); __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ Daddu(a3, a3, kPointerSize); __ sd(scratch2, MemOperand(dst_elements));
__ Daddu(dst_elements, dst_elements, kPointerSize);
__ bind(&entry); __ bind(&entry);
__ Branch(&loop, lt, a3, Operand(a5)); __ Branch(&loop, lt, dst_elements, Operand(dst_end));
__ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
// Replace receiver's backing store with newly created and filled FixedArray. // Replace receiver's backing store with newly created and filled FixedArray.
__ sd(a6, FieldMemOperand(a2, JSObject::kElementsOffset)); __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
JSObject::kElementsOffset, JSObject::kElementsOffset,
a6, array,
t1, scratch,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
...@@ -816,11 +862,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -816,11 +862,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&only_change_map); __ bind(&only_change_map);
// Update receiver's map. // Update receiver's map.
__ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(a2, __ RecordWriteField(receiver,
HeapObject::kMapOffset, HeapObject::kMapOffset,
a3, target_map,
t1, scratch,
kRAHasNotBeenSaved, kRAHasNotBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
......
...@@ -938,10 +938,10 @@ static void KeyedStoreGenerateGenericHelper( ...@@ -938,10 +938,10 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map, receiver_map,
a4, a4,
slow); slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS); FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, receiver_map, mode, slow);
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check); __ jmp(&fast_double_without_map_check);
...@@ -952,10 +952,9 @@ static void KeyedStoreGenerateGenericHelper( ...@@ -952,10 +952,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map, receiver_map,
a4, a4,
slow); slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
slow); masm, receiver, key, value, receiver_map, mode, slow);
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
...@@ -968,9 +967,9 @@ static void KeyedStoreGenerateGenericHelper( ...@@ -968,9 +967,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map, receiver_map,
a4, a4,
slow); slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
} }
......
...@@ -5862,14 +5862,30 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( ...@@ -5862,14 +5862,30 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
} }
bool AreAliased(Register r1, Register r2, Register r3, Register r4) { bool AreAliased(Register reg1,
if (r1.is(r2)) return true; Register reg2,
if (r1.is(r3)) return true; Register reg3,
if (r1.is(r4)) return true; Register reg4,
if (r2.is(r3)) return true; Register reg5,
if (r2.is(r4)) return true; Register reg6,
if (r3.is(r4)) return true; Register reg7,
return false; Register reg8) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
} }
......
...@@ -90,7 +90,14 @@ Register GetRegisterThatIsNotOneOf(Register reg1, ...@@ -90,7 +90,14 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg); Register reg6 = no_reg);
bool AreAliased(Register r1, Register r2, Register r3, Register r4); bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg,
Register reg7 = no_reg,
Register reg8 = no_reg);
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment