Commit ca9a3c71 authored by danno@chromium.org's avatar danno@chromium.org

Remove platform-specific dead code for KeyedStores

R=jkummerow@chromium.org

Review URL: https://chromiumcodereview.appspot.com/22745003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16227 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent bbf9018c
......@@ -1150,21 +1150,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register scratch1) {
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
}
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
......@@ -3190,509 +3175,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
DwVfpRegister double_scratch0,
LowDwVfpRegister double_scratch1,
Label* fail) {
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(key, scratch0, fail);
__ bind(&key_ok);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, check_heap_number, miss_force_generic;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the index is in range
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(key, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &miss_force_generic);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
__ UntagAndJumpIfNotSmi(r5, value, &slow);
} else {
__ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
}
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r5: value (integer).
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
// Clamp the value to [0..255].
__ Usat(r5, 8, Operand(r5));
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(r4, key);
StoreIntAsFloat(masm, r3, r4, r5, r7);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ vmov(s2, r5);
__ vcvt_f64_s32(d0, s2);
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
__ vstr(d0, r3, 0);
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// r3: external array.
__ bind(&check_heap_number);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 1));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 2));
__ vstr(d0, r5, 0);
} else {
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32(r5, d0, r6, r7, r9, d1);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register scratch = r4;
Register elements_reg = r3;
Register length_reg = r5;
Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
// Check that the key is within bounds.
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis.
__ cmp(key_reg, scratch);
if (is_js_array && IsGrowStoreMode(store_mode)) {
__ b(hs, &grow);
} else {
__ b(hs, &miss_force_generic);
}
// Make sure elements is a fast element array, not 'cow'.
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// value_reg (r0) is preserved.
// Done.
__ Ret();
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags already set by previous compare.
__ b(ne, &miss_force_generic);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ ldr(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
}
// Store the element at index zero.
__ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret();
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub
__ CheckMap(elements_reg,
scratch,
Heap::kFixedCOWArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ cmp(length_reg, scratch);
__ b(hs, &slow);
// Grow the array and finish the store.
__ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch (elements backing store)
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register elements_reg = r3;
Register scratch1 = r4;
Register scratch2 = r5;
Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
// Check that the key is within bounds.
if (is_js_array) {
__ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
if (IsGrowStoreMode(store_mode)) {
__ b(hs, &grow);
} else {
__ b(hs, &miss_force_generic);
}
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
scratch1, d0, &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags already set by previous compare.
__ b(ne, &miss_force_generic);
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(value_reg, &value_is_smi);
__ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
__ b(ne, &transition_elements_kind);
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ ldr(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
// Initialize the new FixedDoubleArray.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1,
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
scratch2, d0, &transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32));
for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
int offset = FixedDoubleArray::OffsetOfElementAt(i);
__ str(scratch1, FieldMemOperand(elements_reg, offset));
__ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
}
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ Ret();
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
__ ldr(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ cmp(length_reg, scratch1);
__ b(hs, &slow);
// Grow the array and finish the store.
__ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
#undef __
} } // namespace v8::internal
......
......@@ -596,19 +596,9 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
store_mode_);
}
break;
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
store_mode_);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
......@@ -618,7 +608,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
......
......@@ -194,8 +194,6 @@ DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
"generate keyed store stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
......
......@@ -3256,520 +3256,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch,
XMMRegister xmm_scratch0,
XMMRegister xmm_scratch1,
Label* fail) {
// Check that key is a smi and if SSE2 is available a heap number
// containing a smi and branch if the check fails.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
Label key_ok;
__ JumpIfSmi(key, &key_ok);
__ cmp(FieldOperand(key, HeapObject::kMapOffset),
Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
__ j(not_equal, fail);
__ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
__ cvttsd2si(scratch, Operand(xmm_scratch0));
__ cvtsi2sd(xmm_scratch1, scratch);
__ ucomisd(xmm_scratch1, xmm_scratch0);
__ j(not_equal, fail);
__ j(parity_even, fail); // NaN.
// Check if the key fits in the smi range.
__ cmp(scratch, 0xc0000000);
__ j(sign, fail);
__ SmiTag(scratch);
__ mov(key, scratch);
__ bind(&key_ok);
} else {
__ JumpIfNotSmi(key, fail);
}
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, slow, check_heap_number;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
// Check that the index is in range.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
__ JumpIfNotSmi(eax, &slow);
} else {
__ JumpIfNotSmi(eax, &check_heap_number);
}
// smi case
__ mov(ebx, eax); // Preserve the value in eax as the return value.
__ SmiUntag(ebx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// edi: base pointer of external storage
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
__ ClampUint8(ebx);
__ SmiUntag(ecx);
__ mov_b(Operand(edi, ecx, times_1, 0), ebx);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ SmiUntag(ecx);
__ mov_b(Operand(edi, ecx, times_1, 0), ebx);
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ecx, times_1, 0), ebx);
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(Operand(edi, ecx, times_2, 0), ebx);
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
// Need to perform int-to-float conversion.
__ push(ebx);
__ fild_s(Operand(esp, 0));
__ pop(ebx);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ fstp_s(Operand(edi, ecx, times_2, 0));
} else { // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
__ fstp_d(Operand(edi, ecx, times_4, 0));
}
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
// TODO(danno): handle heap number -> pixel array conversion
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
__ bind(&check_heap_number);
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// edi: base pointer of external storage
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_s(Operand(edi, ecx, times_2, 0));
__ ret(0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_d(Operand(edi, ecx, times_4, 0));
__ ret(0);
} else {
// Perform float-to-int conversion with truncation (round-to-zero)
// behavior.
// For the moment we make the slow call to the runtime on
// processors that don't support SSE2. The code in IntegerConvert
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
if (CpuFeatures::IsSupported(SSE2)) {
if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
// fisttp stores values as signed integers. To represent the
// entire range of int and unsigned int arrays, store as a
// 64-bit int and discard the high 32 bits.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ sub(esp, Immediate(2 * kPointerSize));
__ fisttp_d(Operand(esp, 0));
// If conversion failed (NaN, infinity, or a number outside
// signed int64 range), the result is 0x8000000000000000, and
// we must handle this case in the runtime.
Label ok;
__ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
__ j(not_equal, &ok);
__ cmp(Operand(esp, 0), Immediate(0));
__ j(not_equal, &ok);
__ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
__ jmp(&slow);
__ bind(&ok);
__ pop(ebx);
__ add(esp, Immediate(kPointerSize));
__ mov(Operand(edi, ecx, times_2, 0), ebx);
} else {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatureScope scope(masm, SSE2);
__ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
__ cmp(ebx, 0x80000000u);
__ j(equal, &slow);
// ebx: untagged integer value
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
__ ClampUint8(ebx);
// Fall through.
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ SmiUntag(ecx);
__ mov_b(Operand(edi, ecx, times_1, 0), ebx);
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ecx, times_1, 0), ebx);
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(Operand(edi, ecx, times_2, 0), ebx);
break;
default:
UNREACHABLE();
break;
}
}
__ ret(0); // Return original value.
}
}
}
// Slow case: call runtime.
__ bind(&slow);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, grow, slow, transition_elements_kind;
Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
}
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
if (IsGrowStoreMode(store_mode)) {
__ j(above_equal, &grow);
} else {
__ j(above_equal, &miss_force_generic);
}
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
__ j(above_equal, &miss_force_generic);
}
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_array_map()));
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) {
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
ecx,
times_half_pointer_size,
FixedArray::kHeaderSize), eax);
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
// Do the store and update the write barrier.
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ lea(ecx, FieldOperand(edi,
ecx,
times_half_pointer_size,
FixedArray::kHeaderSize));
__ mov(Operand(ecx, 0), eax);
// Make sure to preserve the value in register eax.
__ mov(ebx, eax);
__ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
}
// Done.
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
// Handle transition to other elements kinds without using the generic stub.
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Handle transition requiring the array to grow.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags are already set by previous
// compare.
__ j(not_equal, &miss_force_generic);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(not_equal, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
// Restore the key, which is known to be the array length.
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Make sure that the backing store can hold additional elements.
__ mov(FieldOperand(edi, JSObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
}
// Store the element at index zero.
__ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
// Install the new backing store in the JSArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
__ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ ret(0);
__ bind(&check_capacity);
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
__ j(equal, &miss_force_generic);
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Make sure that the backing store can hold additional elements.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Grow the array and finish the store.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&finish_store);
__ bind(&prepare_slow);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
// Get the elements array.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ AssertFastElements(edi);
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
if (IsGrowStoreMode(store_mode)) {
__ j(above_equal, &grow);
} else {
__ j(above_equal, &miss_force_generic);
}
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
__ j(above_equal, &miss_force_generic);
}
__ bind(&finish_store);
__ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
&transition_elements_kind, true);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
// Handle transition to other elements kinds without using the generic stub.
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Handle transition requiring the array to grow.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags are already set by previous
// compare.
__ j(not_equal, &miss_force_generic);
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(eax, &value_is_smi);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
__ j(not_equal, &transition_elements_kind);
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(not_equal, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Initialize the new FixedDoubleArray.
__ mov(FieldOperand(edi, JSObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
__ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
&transition_elements_kind, true);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
int offset = FixedDoubleArray::OffsetOfElementAt(i);
__ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
__ mov(FieldOperand(edi, offset + kPointerSize),
Immediate(kHoleNanUpper32));
}
// Install the new backing store in the JSArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
__ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ ret(0);
__ bind(&check_capacity);
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Make sure that the backing store can hold additional elements.
__ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &slow);
// Grow the array and finish the store.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&finish_store);
__ bind(&prepare_slow);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
#undef __
} } // namespace v8::internal
......
......@@ -1847,18 +1847,6 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
}
if (!FLAG_compiled_keyed_stores &&
(store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS)) {
// TODO(danno): We'll soon handle MONOMORPHIC ICs that also support
// copying COW arrays and silently ignoring some OOB stores into external
// arrays, but for now use the generic.
TRACE_GENERIC_IC(isolate(), "KeyedIC", "COW/OOB external array");
return strict_mode == kStrictMode
? generic_stub_strict()
: generic_stub();
}
State ic_state = target()->ic_state();
Handle<Map> receiver_map(receiver->map(), isolate());
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
......@@ -2139,8 +2127,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
bool key_is_smi_like = key->IsSmi() ||
(FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
......
......@@ -1855,9 +1855,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
if (FLAG_compiled_keyed_stores &&
(receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements())) {
if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
stub = KeyedStoreFastElementStub(
is_jsarray,
elements_kind,
......@@ -1998,9 +1997,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
is_js_array,
store_mode_).GetCode(isolate());
} else {
if (FLAG_compiled_keyed_stores &&
(receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements())) {
if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
cached_stub = KeyedStoreFastElementStub(
is_js_array,
elements_kind,
......
......@@ -984,18 +984,6 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
static void GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array,
ElementsKind element_kind,
KeyedAccessStoreMode store_mode);
static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
bool is_js_array,
KeyedAccessStoreMode store_mode);
static void GenerateStoreExternalArray(MacroAssembler* masm,
ElementsKind elements_kind);
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
protected:
......
......@@ -152,12 +152,8 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
bool standard_store = FLAG_compiled_keyed_stores ||
(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
STANDARD_STORE);
bool preliminary_checks =
code->is_keyed_store_stub() &&
standard_store &&
code->ic_state() == MONOMORPHIC &&
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
......@@ -174,10 +170,7 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
bool standard_store = FLAG_compiled_keyed_stores ||
(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
STANDARD_STORE);
return code->is_keyed_store_stub() && standard_store &&
return code->is_keyed_store_stub() &&
code->ic_state() == POLYMORPHIC;
}
return false;
......
......@@ -3037,484 +3037,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch,
XMMRegister xmm_scratch0,
XMMRegister xmm_scratch1,
Label* fail) {
// Check that key is a smi or a heap number containing a smi and branch
// if the check fails.
Label key_ok;
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
masm->isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
__ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
__ cvttsd2si(scratch, xmm_scratch0);
__ cvtlsi2sd(xmm_scratch1, scratch);
__ ucomisd(xmm_scratch1, xmm_scratch0);
__ j(not_equal, fail);
__ j(parity_even, fail); // NaN.
__ Integer32ToSmi(key, scratch);
__ bind(&key_ok);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow, miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
// Check that the index is in range.
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &miss_force_generic);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
Label check_heap_number;
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Float to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(rax, &slow);
} else {
__ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
}
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
{ // Clamp the value to [0..255].
Label done;
__ testl(rdx, Immediate(0xFFFFFF00));
__ j(zero, &done, Label::kNear);
__ setcc(negative, rdx); // 1 if negative, 0 if positive.
__ decb(rdx); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
case EXTERNAL_FLOAT_ELEMENTS:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
// Need to perform int-to-float conversion.
__ cvtlsi2sd(xmm0, rdx);
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
__ ret(0);
// TODO(danno): handle heap number -> pixel array conversion
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
__ bind(&check_heap_number);
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
__ j(not_equal, &slow);
// No more branches to slow case on this path.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
__ ret(0);
} else {
// Perform float-to-int conversion with truncation (round-to-zero)
// behavior.
// Fast path: use machine instruction to convert to int64. If that
// fails (out-of-range), go into the runtime.
__ cvttsd2siq(r8, xmm0);
__ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
__ cmpq(r8, kScratchRegister);
__ j(equal, &slow);
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movb(Operand(rbx, rdi, times_1, 0), r8);
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movw(Operand(rbx, rdi, times_2, 0), r8);
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(Operand(rbx, rdi, times_4, 0), r8);
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
__ ret(0);
}
}
// Slow case: call runtime.
__ bind(&slow);
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
// Miss case: call runtime.
__ bind(&miss_force_generic);
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, transition_elements_kind, finish_store, grow;
Label check_capacity, slow;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(rax, &transition_elements_kind);
}
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the key is within bounds.
if (is_js_array) {
__ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
if (IsGrowStoreMode(store_mode)) {
__ j(above_equal, &grow);
} else {
__ j(above_equal, &miss_force_generic);
}
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
}
__ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) {
__ SmiToInteger32(rcx, rcx);
__ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
} else {
// Do the store and update the write barrier.
ASSERT(IsFastObjectElementsKind(elements_kind));
__ SmiToInteger32(rcx, rcx);
__ lea(rcx,
FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
__ movq(Operand(rcx, 0), rax);
// Make sure to preserve the value in register rax.
__ movq(rbx, rax);
__ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
}
// Done.
__ ret(0);
// Handle store cache miss.
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags are already set by previous
// compare.
__ j(not_equal, &miss_force_generic);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
__ j(not_equal, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
// rax: value
// rcx: key
// rdx: receiver
// rdi: elements
// Make sure that the backing store can hold additional elements.
__ Move(FieldOperand(rdi, JSObject::kMapOffset),
masm->isolate()->factory()->fixed_array_map());
__ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
Smi::FromInt(JSArray::kPreallocatedArrayElements));
__ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
}
// Store the element at index zero.
__ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
// Install the new backing store in the JSArray.
__ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
__ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
__ ret(0);
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub.
__ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
__ j(equal, &miss_force_generic);
// rax: value
// rcx: key
// rdx: receiver
// rdi: elements
// Make sure that the backing store can hold additional elements.
__ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Grow the array and finish the store.
__ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
Smi::FromInt(1));
__ jmp(&finish_store);
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array,
KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, transition_elements_kind, finish_store;
Label grow, slow, check_capacity, restore_key_transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
// Get the elements array.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ AssertFastElements(rdi);
// Check that the key is within bounds.
if (is_js_array) {
__ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
if (IsGrowStoreMode(store_mode)) {
__ j(above_equal, &grow);
} else {
__ j(above_equal, &miss_force_generic);
}
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
}
// Handle smi values specially
__ bind(&finish_store);
__ SmiToInteger32(rcx, rcx);
__ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
&restore_key_transition_elements_kind);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&restore_key_transition_elements_kind);
// Restore smi-tagging of rcx.
__ Integer32ToSmi(rcx, rcx);
__ bind(&transition_elements_kind);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags are already set by previous
// compare.
__ j(not_equal, &miss_force_generic);
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(rax, &value_is_smi);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &transition_elements_kind);
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
__ j(not_equal, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
// rax: value
// rcx: key
// rdx: receiver
// rdi: elements
// Initialize the new FixedDoubleArray. Leave elements unitialized for
// efficiency, they are guaranteed to be initialized before use.
__ Move(FieldOperand(rdi, JSObject::kMapOffset),
masm->isolate()->factory()->fixed_double_array_map());
__ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
Smi::FromInt(JSArray::kPreallocatedArrayElements));
// Increment the length of the array.
__ SmiToInteger32(rcx, rcx);
__ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
&restore_key_transition_elements_kind);
__ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
__ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
}
// Install the new backing store in the JSArray.
__ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
__ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ ret(0);
__ bind(&check_capacity);
// rax: value
// rcx: key
// rdx: receiver
// rdi: elements
// Make sure that the backing store can hold additional elements.
__ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &slow);
// Grow the array and finish the store.
__ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
Smi::FromInt(1));
__ jmp(&finish_store);
__ bind(&slow);
TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
#undef __
} } // namespace v8::internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment