Commit 5ed752de authored by yangguo@chromium.org's avatar yangguo@chromium.org

Porting r9456 to x64 (Optimize KeyedStoreGeneric for Smi arrays).

Review URL: http://codereview.chromium.org/8054043

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9486 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 008f7ab3
...@@ -765,7 +765,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -765,7 +765,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// ecx: key (a smi) // ecx: key (a smi)
// edi: receiver map // edi: receiver map
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable. // Check array bounds. Both the key and the length of FixedArray are smis.
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ j(below, &fast_object_with_map_check); __ j(below, &fast_object_with_map_check);
...@@ -787,15 +787,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -787,15 +787,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ j(not_equal, &slow); __ j(not_equal, &slow);
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); __ j(above_equal, &slow);
__ CheckMap(ebx, FACTORY->fixed_array_map(), __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
&check_if_double_array, DONT_DO_SMI_CHECK); __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &check_if_double_array);
// Add 1 to receiver->length, and go to common element store code for Objects. // Add 1 to receiver->length, and go to common element store code for Objects.
__ add(FieldOperand(edx, JSArray::kLengthOffset), __ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1))); Immediate(Smi::FromInt(1)));
__ jmp(&fast_object_without_map_check); __ jmp(&fast_object_without_map_check);
__ bind(&check_if_double_array); __ bind(&check_if_double_array);
__ CheckMap(ebx, FACTORY->fixed_double_array_map(), &slow, DONT_DO_SMI_CHECK); __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
// Add 1 to receiver->length, and go to common element store code for doubles. // Add 1 to receiver->length, and go to common element store code for doubles.
__ add(FieldOperand(edx, JSArray::kLengthOffset), __ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1))); Immediate(Smi::FromInt(1)));
...@@ -823,8 +825,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -823,8 +825,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// edx: receiver // edx: receiver
// ebx: FixedArray receiver->elements // ebx: FixedArray receiver->elements
// edi: receiver map // edi: receiver map
__ CheckMap(ebx, FACTORY->fixed_array_map(), __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
&fast_double_with_map_check, DONT_DO_SMI_CHECK); __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check); __ bind(&fast_object_without_map_check);
// Smi stores don't require further checks. // Smi stores don't require further checks.
Label non_smi_value; Label non_smi_value;
...@@ -836,6 +839,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -836,6 +839,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&non_smi_value); __ bind(&non_smi_value);
if (FLAG_smi_only_arrays) { if (FLAG_smi_only_arrays) {
// Escape to slow case when writing non-smi into smi-only array. // Escape to slow case when writing non-smi into smi-only array.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(edi, &slow, Label::kNear); __ CheckFastObjectElements(edi, &slow, Label::kNear);
} }
...@@ -850,17 +854,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -850,17 +854,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&fast_double_with_map_check); __ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the // Check for fast double array case. If this fails, call through to the
// runtime. // runtime.
__ CheckMap(ebx, FACTORY->fixed_double_array_map(), &slow, DONT_DO_SMI_CHECK); __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
__ bind(&fast_double_without_map_check); __ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements // If the value is a number, store it as a double in the FastDoubleElements
// array. // array.
__ StoreNumberToDoubleElements(eax, __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
ebx,
ecx,
edx,
xmm0,
&slow,
false);
__ ret(0); __ ret(0);
} }
......
...@@ -606,29 +606,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -606,29 +606,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver // -- rdx : receiver
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label slow, slow_with_tagged_index, fast, array, extra; Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index); __ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver. // Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need // Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks. // to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset), __ testb(FieldOperand(r9, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded)); Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow_with_tagged_index); __ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow_with_tagged_index); __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
__ SmiToInteger32(rcx, rcx); __ SmiToInteger32(rcx, rcx);
__ CmpInstanceType(rbx, JS_ARRAY_TYPE); __ CmpInstanceType(r9, JS_ARRAY_TYPE);
__ j(equal, &array); __ j(equal, &array);
// Check that the object is some kind of JSObject. // Check that the object is some kind of JSObject.
__ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE); __ CmpInstanceType(r9, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow); __ j(below, &slow);
__ CmpInstanceType(rbx, JS_PROXY_TYPE); __ CmpInstanceType(r9, JS_PROXY_TYPE);
__ j(equal, &slow); __ j(equal, &slow);
__ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE); __ CmpInstanceType(r9, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &slow); __ j(equal, &slow);
// Object case: Check key against length in the elements array. // Object case: Check key against length in the elements array.
...@@ -636,15 +638,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -636,15 +638,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rdx: JSObject // rdx: JSObject
// rcx: index // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable. // Check array bounds.
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &slow);
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value // rax: value
// rbx: FixedArray // rbx: FixedArray
// rcx: index // rcx: index
__ j(above, &fast); __ j(above, &fast_object_with_map_check);
// Slow case: call runtime. // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
...@@ -666,9 +665,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -666,9 +665,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow); __ j(below_equal, &slow);
// Increment index to get new length. // Increment index to get new length.
__ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_extra_double);
__ leal(rdi, Operand(rcx, 1)); __ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast); __ jmp(&fast_object_without_map_check);
__ bind(&check_extra_double);
// rdi: elements array's map
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ j(not_equal, &slow);
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it // array. Check that the array is in fast mode (and writable); if it
...@@ -678,9 +688,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -678,9 +688,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rdx: receiver (a JSArray) // rdx: receiver (a JSArray)
// rcx: index // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &slow);
// Check the key against the length in the array, compute the // Check the key against the length in the array, compute the
// address to store into and fall through to fast case. // address to store into and fall through to fast case.
...@@ -688,11 +695,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -688,11 +695,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ j(below_equal, &extra); __ j(below_equal, &extra);
// Fast case: Do the store. // Fast case: Do the store.
__ bind(&fast); __ bind(&fast_object_with_map_check);
// rax: value // rax: value
// rbx: receiver's elements array (a FixedArray) // rbx: receiver's elements array (a FixedArray)
// rcx: index // rcx: index
// rdx: receiver (a JSArray)
__ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value; Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value); __ JumpIfNotSmi(rax, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi. // It's irrelevant whether array is smi-only or not when writing a smi.
...@@ -703,17 +715,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -703,17 +715,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&non_smi_value); __ bind(&non_smi_value);
if (FLAG_smi_only_arrays) { if (FLAG_smi_only_arrays) {
// Writing a non-smi, check whether array allows non-smi elements. // Writing a non-smi, check whether array allows non-smi elements.
__ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); // r9: receiver's map
__ CheckFastObjectElements(rdi, &slow, Label::kNear); __ CheckFastObjectElements(r9, &slow, Label::kNear);
} }
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax);
__ lea(rcx, __ lea(rcx,
FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize)); FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize));
__ movq(Operand(rcx, 0), rax);
__ movq(rdx, rax);
__ RecordWrite( __ RecordWrite(
rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0); __ ret(0);
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
// rdi: elements array's map
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ j(not_equal, &slow);
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
__ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow);
__ ret(0);
} }
......
...@@ -2680,6 +2680,59 @@ void MacroAssembler::CheckFastSmiOnlyElements(Register map, ...@@ -2680,6 +2680,59 @@ void MacroAssembler::CheckFastSmiOnlyElements(Register map,
} }
void MacroAssembler::StoreNumberToDoubleElements(
Register maybe_number,
Register elements,
Register key,
XMMRegister xmm_scratch,
Label* fail) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
CheckMap(maybe_number,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
// Double value, canonicalize NaN.
uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
cmpl(FieldOperand(maybe_number, offset),
Immediate(kNaNOrInfinityLowerBoundUpper32));
j(greater_equal, &maybe_nan, Label::kNear);
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
xmm_scratch);
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
j(greater, &is_nan, Label::kNear);
cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
j(zero, &not_nan);
bind(&is_nan);
// Convert all NaNs to the same canonical NaN value when they are stored in
// the double array.
Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
movq(xmm_scratch, kScratchRegister);
jmp(&have_double_value, Label::kNear);
bind(&smi_value);
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
xmm_scratch);
bind(&done);
}
void MacroAssembler::CheckMap(Register obj, void MacroAssembler::CheckMap(Register obj,
Handle<Map> map, Handle<Map> map,
Label* fail, Label* fail,
......
...@@ -859,6 +859,16 @@ class MacroAssembler: public Assembler { ...@@ -859,6 +859,16 @@ class MacroAssembler: public Assembler {
Label* fail, Label* fail,
Label::Distance distance = Label::kFar); Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
// Note that key must not be smi-tagged.
void StoreNumberToDoubleElements(Register maybe_number,
Register elements,
Register key,
XMMRegister xmm_scratch,
Label* fail);
// Check if the map of an object is equal to a specified map and // Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required // branch to label if not. Skip the smi check if not required
// (object is known to be a heap object) // (object is known to be a heap object)
......
...@@ -3758,8 +3758,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( ...@@ -3758,8 +3758,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- rdx : receiver // -- rdx : receiver
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label miss_force_generic, smi_value, is_nan, maybe_nan; Label miss_force_generic;
Label have_double_value, not_nan;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
...@@ -3780,50 +3779,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( ...@@ -3780,50 +3779,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ j(above_equal, &miss_force_generic); __ j(above_equal, &miss_force_generic);
// Handle smi values specially // Handle smi values specially
__ JumpIfSmi(rax, &smi_value, Label::kNear);
__ CheckMap(rax,
masm->isolate()->factory()->heap_number_map(),
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Double value, canonicalize NaN.
uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
__ cmpl(FieldOperand(rax, offset),
Immediate(kNaNOrInfinityLowerBoundUpper32));
__ j(greater_equal, &maybe_nan, Label::kNear);
__ bind(&not_nan);
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ bind(&have_double_value);
__ SmiToInteger32(rcx, rcx);
__ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
xmm0);
__ ret(0);
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ j(greater, &is_nan, Label::kNear);
__ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
__ j(zero, &not_nan);
__ bind(&is_nan);
// Convert all NaNs to the same canonical NaN value when they are stored in
// the double array.
__ Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
__ movq(xmm0, kScratchRegister);
__ jmp(&have_double_value, Label::kNear);
__ bind(&smi_value);
// Value is a smi. convert to a double and store.
// Preserve original value.
__ SmiToInteger32(rdx, rax);
__ push(rdx);
__ fild_s(Operand(rsp, 0));
__ pop(rdx);
__ SmiToInteger32(rcx, rcx); __ SmiToInteger32(rcx, rcx);
__ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize)); __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, &miss_force_generic);
__ ret(0); __ ret(0);
// Handle store cache miss, replacing the ic with the generic stub. // Handle store cache miss, replacing the ic with the generic stub.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment