Commit 6b4d2976 authored by ager@chromium.org's avatar ager@chromium.org

Revert r6376 and r6373 which changes external array support. The ARM

version did not work.

TBR=kbr@chromium.org
Review URL: http://codereview.chromium.org/6295013

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6381 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 49144ee6
...@@ -1337,6 +1337,311 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { ...@@ -1337,6 +1337,311 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, failed_allocation;
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi
__ BranchOnSmi(receiver, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
switch (array_type) {
case kExternalByteArray:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// r2: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r2: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(value, Operand(0xC0000000));
__ b(mi, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(value, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, value);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(value, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(value, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
Register hiword = value; // r2.
Register loword = r3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, hiword, loword, r4, 1);
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ mov(r0, r4);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
// r3: heap number for result
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
// the slow case from here.
__ and_(r0, value, Operand(kBinary32MantissaMask));
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to
// the slow case from here.
__ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r1, Operand(0x00, RelocInfo::NONE));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));
__ mov(r1, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r1,
r1,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r2, value, Operand(kBinary32SignMask));
value = no_reg;
__ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
__ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- lr : return address // -- lr : return address
...@@ -1533,6 +1838,384 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -1533,6 +1838,384 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
}
static bool IsElementTypeSigned(ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
case kExternalShortArray:
case kExternalIntArray:
return true;
case kExternalUnsignedByteArray:
case kExternalUnsignedShortArray:
case kExternalUnsignedIntArray:
return false;
default:
UNREACHABLE();
return false;
}
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, check_heap_number;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
default:
UNREACHABLE();
break;
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else {
// Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero).
__ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
// Hoisted load. vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs and Infinities have all-one exponents so they sign extend to -1.
__ cmp(r6, Operand(-1));
__ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
} else {
__ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
}
__ vmov(r5, s0, ne);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
} else {
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r5, MemOperand(r3, r4, LSL, 2));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r9, r9, Operand(0, RelocInfo::NONE));
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r7, Operand(0, RelocInfo::NONE));
__ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
}
// Slow case: call runtime.
__ bind(&slow);
// Entry registers are intact.
// r0: value
// r1: key
// r2: receiver
GenerateRuntimeSetProperty(masm);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
......
...@@ -902,111 +902,6 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells( ...@@ -902,111 +902,6 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
} }
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
#undef __ #undef __
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
...@@ -3329,603 +3224,6 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { ...@@ -3329,603 +3224,6 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
} }
static bool IsElementTypeSigned(ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
case kExternalShortArray:
case kExternalIntArray:
return true;
case kExternalUnsignedByteArray:
case kExternalUnsignedShortArray:
case kExternalUnsignedIntArray:
return false;
default:
UNREACHABLE();
return false;
}
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, failed_allocation;
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi
__ BranchOnSmi(receiver, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
switch (array_type) {
case kExternalByteArray:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// r2: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r2: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(value, Operand(0xC0000000));
__ b(mi, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(value, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, value);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(value, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(value, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
Register hiword = value; // r2.
Register loword = r3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm(), hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm(), hiword, loword, r4, 1);
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ mov(r0, r4);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
// r3: heap number for result
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
// the slow case from here.
__ and_(r0, value, Operand(kBinary32MantissaMask));
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to
// the slow case from here.
__ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r1, Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));
__ mov(r1, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r1,
r1,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r2, value, Operand(kBinary32SignMask));
value = no_reg;
__ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
__ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
return GetCode(flags);
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, check_heap_number;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
break;
default:
UNREACHABLE();
break;
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else {
// Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero).
__ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
// Hoisted load. vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs and Infinities have all-one exponents so they sign extend to -1.
__ cmp(r6, Operand(-1));
__ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
} else {
__ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
}
__ vmov(r5, s0, ne);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
} else {
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r5, MemOperand(r3, r4, LSL, 2));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
__ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r9, r9, Operand(0, RelocInfo::NONE));
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r7, Operand(0, RelocInfo::NONE));
__ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
}
// Slow case: call runtime.
__ bind(&slow);
// Entry registers are intact.
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
return GetCode(flags);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal
......
...@@ -1282,6 +1282,44 @@ static void Generate_KeyedLoadIC_String(MacroAssembler* masm) { ...@@ -1282,6 +1282,44 @@ static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
} }
static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
}
static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
}
static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
}
static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
}
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) { static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm); KeyedLoadIC::GeneratePreMonomorphic(masm);
} }
...@@ -1326,6 +1364,44 @@ static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { ...@@ -1326,6 +1364,44 @@ static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
} }
static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
}
static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
}
static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
}
static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
}
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) { static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm); KeyedStoreIC::GenerateMiss(masm);
} }
......
...@@ -93,6 +93,13 @@ enum BuiltinExtraArguments { ...@@ -93,6 +93,13 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
\ \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
...@@ -103,6 +110,13 @@ enum BuiltinExtraArguments { ...@@ -103,6 +110,13 @@ enum BuiltinExtraArguments {
\ \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalFloatArray, KEYED_STORE_IC, MEGAMORPHIC) \
\ \
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \ V(FunctionCall, BUILTIN, UNINITIALIZED) \
......
...@@ -203,9 +203,7 @@ namespace internal { ...@@ -203,9 +203,7 @@ namespace internal {
V(zero_symbol, "0") \ V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \ V(global_eval_symbol, "GlobalEval") \
V(identity_hash_symbol, "v8::IdentityHash") \ V(identity_hash_symbol, "v8::IdentityHash") \
V(closure_symbol, "(closure)") \ V(closure_symbol, "(closure)")
V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \
V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray")
// Forward declarations. // Forward declarations.
......
...@@ -718,6 +718,160 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { ...@@ -718,6 +718,160 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, failed_allocation;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow, not_taken);
// eax: key, known to be a smi.
// edx: receiver, known to be a JSObject.
// ebx: elements object, known to be an external array.
// Check that the index is in range.
__ mov(ecx, eax);
__ SmiUntag(ecx); // Untag the index.
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
case kExternalFloatArray:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// ecx: value
// For floating-point array type:
// FP(0): value
if (array_type == kExternalIntArray ||
array_type == kExternalUnsignedIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
__ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (array_type == kExternalIntArray) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
__ push(ecx);
__ fild_d(Operand(esp, 0));
__ pop(ecx);
__ pop(ecx);
}
// FP(0): value
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
}
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ ffree();
__ fincstp();
// Fall through to slow case.
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key // -- eax : key
...@@ -877,6 +1031,194 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -877,6 +1031,194 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, check_heap_number;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
// edx: receiver, a JSObject
// ecx: key, a smi
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
&slow, true);
// Check that the index is in range.
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
__ fstp_s(Operand(edi, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
__ bind(&check_heap_number);
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// edi: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the top of the FP stack for NaN.
Label is_nan;
__ fucomi(0);
__ j(parity_even, &is_nan);
if (array_type != kExternalUnsignedIntArray) {
__ push(ecx); // Make room on stack
__ fistp_s(Operand(esp, 0));
__ pop(ecx);
} else {
// fistp stores values as signed integers.
// To represent the entire range, we need to store as a 64-bit
// int and discard the high 32 bits.
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fistp_d(Operand(esp, 0));
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
}
// ecx: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// We also need to explicitly check for +/-Infinity. These are
// converted to MIN_INT, but we need to be careful not to
// confuse with legal uses of MIN_INT.
Label not_infinity;
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
__ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
__ and_(edx, 0x7FF0);
__ cmp(edx, 0x7FF0);
__ j(not_equal, &not_infinity);
__ mov(ecx, 0);
__ bind(&not_infinity);
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0); // Return original value.
__ bind(&is_nan);
__ ffree();
__ fincstp();
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), 0);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ Set(ecx, Immediate(0));
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
}
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
}
// The generated code does not accept smi keys. // The generated code does not accept smi keys.
// The generated code falls through if both probes miss. // The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
......
...@@ -3306,395 +3306,6 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { ...@@ -3306,395 +3306,6 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
} }
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, failed_allocation;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of
// ExternalArray.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow, not_taken);
// eax: key, known to be a smi.
// edx: receiver, known to be a JSObject.
// ebx: elements object, known to be an external array.
// Check that the index is in range.
__ mov(ecx, eax);
__ SmiUntag(ecx); // Untag the index.
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
case kExternalFloatArray:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// ecx: value
// For floating-point array type:
// FP(0): value
if (array_type == kExternalIntArray ||
array_type == kExternalUnsignedIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
__ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (array_type == kExternalIntArray) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
__ push(ecx);
__ fild_d(Operand(esp, 0));
__ pop(ecx);
__ pop(ecx);
}
// FP(0): value
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
}
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ ffree();
__ fincstp();
// Fall through to slow case.
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx); // receiver
__ push(eax); // name
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
// Return the generated code.
return GetCode(flags);
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, check_heap_number;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
// edx: receiver, a JSObject
// ecx: key, a smi
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
&slow, true);
// Check that the index is in range.
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
__ fstp_s(Operand(edi, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
__ ret(0); // Return the original value.
__ bind(&check_heap_number);
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// edi: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
} else {
// Perform float-to-int conversion with truncation (round-to-zero)
// behavior.
if (array_type != kExternalIntArray &&
array_type != kExternalUnsignedIntArray) {
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
__ push(ecx); // Make room on stack
__ fisttp_s(Operand(esp, 0));
__ pop(ecx);
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
// Free the top of the FP stack, which we don't use in this code
// path.
__ ffree();
__ fincstp();
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
} else {
// TODO(kbr): consider supporting non-SSE2 processors properly.
// The code in IntegerConvert (code-stubs-ia32.cc) is roughly what
// is needed though the conversion failure case does not need to be
// handled. The code below is not correct; it doesn't truncate, it
// rounds.
__ push(ecx); // Make room on stack
__ fistp_s(Operand(esp, 0));
__ pop(ecx);
}
} else {
bool have_sse3 = CpuFeatures::IsSupported(SSE3);
if (have_sse3 || !CpuFeatures::IsSupported(SSE2)) {
// fisttp stores values as signed integers. To represent the
// entire range of unsigned int arrays, store as a 64-bit
// int and discard the high 32 bits.
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
__ sub(Operand(esp), Immediate(2 * kPointerSize));
if (have_sse3) {
CpuFeatures::Scope scope(SSE3);
__ fisttp_d(Operand(esp, 0));
} else {
// TODO(kbr): consider supporting non-SSE2 processors properly.
__ fistp_d(Operand(esp, 0));
}
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
} else {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
// We can easily implement the correct rounding behavior for the
// range [0, 2^31-1]. For the time being, to keep this code simple,
// use the wrong rounding behavior for values outside this range.
__ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
__ LoadPowerOf2(xmm1, ecx, 31);
Label is_outside_range;
Label continuation_point;
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &is_outside_range);
// Free the top of the FP stack, which we don't use in this code
// path.
__ ffree();
__ fincstp();
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&continuation_point);
__ bind(&is_outside_range);
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fistp_d(Operand(esp, 0));
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
__ bind(&continuation_point);
}
}
// ecx: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0); // Return original value.
}
// Slow case: call runtime.
__ bind(&slow);
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
return GetCode(flags);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal
......
...@@ -367,6 +367,55 @@ void KeyedStoreIC::Clear(Address address, Code* target) { ...@@ -367,6 +367,55 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
} }
Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
case JSObject::EXTERNAL_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
default:
UNREACHABLE();
return NULL;
}
}
Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
case JSObject::EXTERNAL_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
default:
UNREACHABLE();
return NULL;
}
}
static bool HasInterceptorGetter(JSObject* object) { static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined(); return !object->GetNamedInterceptor()->getter()->IsUndefined();
} }
...@@ -1194,10 +1243,7 @@ MaybeObject* KeyedLoadIC::Load(State state, ...@@ -1194,10 +1243,7 @@ MaybeObject* KeyedLoadIC::Load(State state,
} else if (object->IsJSObject()) { } else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
MaybeObject* probe = stub = external_array_stub(receiver->GetElementsKind());
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false);
stub =
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasIndexedInterceptor()) { } else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub(); stub = indexed_interceptor_stub();
} else if (state == UNINITIALIZED && } else if (state == UNINITIALIZED &&
...@@ -1590,10 +1636,7 @@ MaybeObject* KeyedStoreIC::Store(State state, ...@@ -1590,10 +1636,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (object->IsJSObject()) { if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
MaybeObject* probe = stub = external_array_stub(receiver->GetElementsKind());
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
stub =
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (state == UNINITIALIZED && } else if (state == UNINITIALIZED &&
key->IsSmi() && key->IsSmi() &&
receiver->map()->has_fast_elements()) { receiver->map()->has_fast_elements()) {
......
...@@ -345,6 +345,12 @@ class KeyedLoadIC: public IC { ...@@ -345,6 +345,12 @@ class KeyedLoadIC: public IC {
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm); static void GenerateString(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
// operating upon external array types but fall back to the runtime
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
static void GenerateIndexedInterceptor(MacroAssembler* masm); static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version. // Clear the use of the inlined version.
...@@ -380,6 +386,7 @@ class KeyedLoadIC: public IC { ...@@ -380,6 +386,7 @@ class KeyedLoadIC: public IC {
static Code* string_stub() { static Code* string_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_String); return Builtins::builtin(Builtins::KeyedLoadIC_String);
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static Code* indexed_interceptor_stub() { static Code* indexed_interceptor_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor); return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
...@@ -463,6 +470,13 @@ class KeyedStoreIC: public IC { ...@@ -463,6 +470,13 @@ class KeyedStoreIC: public IC {
static void GenerateRuntimeSetProperty(MacroAssembler* masm); static void GenerateRuntimeSetProperty(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
// operating upon external array types but fall back to the runtime
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
// Clear the inlined version so the IC is always hit. // Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
...@@ -487,6 +501,7 @@ class KeyedStoreIC: public IC { ...@@ -487,6 +501,7 @@ class KeyedStoreIC: public IC {
static Code* generic_stub() { static Code* generic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic); return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);
......
...@@ -172,11 +172,23 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { ...@@ -172,11 +172,23 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
} }
......
...@@ -397,20 +397,6 @@ Object* ConstructStubCompiler::CompileConstructStub( ...@@ -397,20 +397,6 @@ Object* ConstructStubCompiler::CompileConstructStub(
} }
Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal
......
...@@ -507,74 +507,6 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) { ...@@ -507,74 +507,6 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
} }
namespace {
ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
switch (kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return kExternalByteArray;
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return kExternalUnsignedByteArray;
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return kExternalShortArray;
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return kExternalUnsignedShortArray;
case JSObject::EXTERNAL_INT_ELEMENTS:
return kExternalIntArray;
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return kExternalUnsignedIntArray;
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return kExternalFloatArray;
default:
UNREACHABLE();
return static_cast<ExternalArrayType>(0);
}
}
} // anonymous namespace
MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver,
bool is_store) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(
is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
NORMAL);
ExternalArrayType array_type =
ElementsKindToExternalArrayType(receiver->GetElementsKind());
String* name =
is_store ? Heap::KeyedStoreExternalArray_symbol()
: Heap::KeyedLoadExternalArray_symbol();
// Use the global maps for the particular external array types,
// rather than the receiver's map, when looking up the cached code,
// so that we actually canonicalize these stubs.
Map* map = Heap::MapForExternalArrayType(array_type);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
ExternalArrayStubCompiler compiler;
{ MaybeObject* maybe_code =
is_store ? compiler.CompileKeyedStoreStub(array_type, flags) :
compiler.CompileKeyedLoadStub(array_type, flags);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
if (is_store) {
PROFILE(
CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
} else {
PROFILE(
CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
}
Object* result;
{ MaybeObject* maybe_result =
map->UpdateCodeCache(name, Code::cast(code));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
}
return code;
}
MaybeObject* StubCache::ComputeStoreNormal() { MaybeObject* StubCache::ComputeStoreNormal() {
return Builtins::builtin(Builtins::StoreIC_Normal); return Builtins::builtin(Builtins::StoreIC_Normal);
} }
...@@ -1777,16 +1709,4 @@ void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) { ...@@ -1777,16 +1709,4 @@ void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
} }
MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
Object* result;
{ MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
return result;
}
} } // namespace v8::internal } } // namespace v8::internal
...@@ -167,10 +167,6 @@ class StubCache : public AllStatic { ...@@ -167,10 +167,6 @@ class StubCache : public AllStatic {
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized( MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver); JSObject* receiver);
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver,
bool is_store);
// --- // ---
MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc, MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
...@@ -801,20 +797,6 @@ class CallOptimization BASE_EMBEDDED { ...@@ -801,20 +797,6 @@ class CallOptimization BASE_EMBEDDED {
CallHandlerInfo* api_call_info_; CallHandlerInfo* api_call_info_;
}; };
class ExternalArrayStubCompiler: public StubCompiler {
public:
explicit ExternalArrayStubCompiler() {}
MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags);
private:
MaybeObject* GetCode(Code::Flags flags);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_STUB_CACHE_H_ #endif // V8_STUB_CACHE_H_
...@@ -2721,17 +2721,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) { ...@@ -2721,17 +2721,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
} }
void Assembler::cvttss2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2C);
emit_sse_operand(dst, src);
}
void Assembler::cvttsd2si(Register dst, const Operand& src) { void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
...@@ -2743,17 +2732,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) { ...@@ -2743,17 +2732,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
} }
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2C);
emit_sse_operand(dst, src);
}
void Assembler::cvttsd2siq(Register dst, XMMRegister src) { void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
......
...@@ -1205,9 +1205,7 @@ class Assembler : public Malloced { ...@@ -1205,9 +1205,7 @@ class Assembler : public Malloced {
void movss(const Operand& dst, XMMRegister src); void movss(const Operand& dst, XMMRegister src);
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src); void cvttsd2siq(Register dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, const Operand& src); void cvtlsi2sd(XMMRegister dst, const Operand& src);
......
...@@ -1113,11 +1113,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { ...@@ -1113,11 +1113,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x2C) { } else if (opcode == 0x2C) {
// CVTTSS2SI: // CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer. // Convert with truncation scalar single-precision FP to dword integer.
int mod, regop, rm; // Assert that mod is not 3, so source is memory, not an XMM register.
get_modrm(*current, &mod, &regop, &rm); ASSERT_NE(0xC0, *current & 0xC0);
AppendToBuffer("cvttss2si%c %s,", current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
operand_size_code(), NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x5A) { } else if (opcode == 0x5A) {
// CVTSS2SD: // CVTSS2SD:
// Convert scalar single-precision FP to scalar double-precision FP. // Convert scalar single-precision FP to scalar double-precision FP.
......
...@@ -727,6 +727,131 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { ...@@ -727,6 +727,131 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
// rdx: JSObject
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// rax: index (as a smi)
// rdx: receiver (JSObject)
// rcx: untagged index
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// rax: index
// rdx: receiver
// For integer array types:
// rcx: value
// For floating-point array type:
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
if (array_type == kExternalUnsignedIntArray) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
NearLabel box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
// The value is zero-extended since we loaded the value from memory
// with movl.
__ cvtqsi2sd(xmm0, rcx);
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else {
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : key // -- rax : key
...@@ -898,6 +1023,149 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -898,6 +1023,149 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow);
// Check that the object is a JS object.
__ CmpInstanceType(rbx, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
NearLabel check_heap_number;
__ JumpIfNotSmi(rax, &check_heap_number);
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
default:
UNREACHABLE();
break;
}
__ ret(0);
__ bind(&check_heap_number);
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
__ j(not_equal, &slow);
// No more branches to slow case on this path.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the value for NaN.
// Convert to int32 and store the low byte/word.
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ cvtsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ cvtsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// Convert to int64, so that NaN and infinities become
// 0x8000000000000000, which is zero mod 2^32.
__ cvtsd2siq(rdx, xmm0);
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0);
}
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
}
// The generated code does not accept smi keys. // The generated code does not accept smi keys.
// The generated code falls through if both probes miss. // The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
......
...@@ -3144,306 +3144,6 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { ...@@ -3144,306 +3144,6 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
} }
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
// rdx: JSObject
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// rax: index (as a smi)
// rdx: receiver (JSObject)
// rcx: untagged index
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// rax: index
// rdx: receiver
// For integer array types:
// rcx: value
// For floating-point array type:
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
if (array_type == kExternalUnsignedIntArray) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
NearLabel box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
// The value is zero-extended since we loaded the value from memory
// with movl.
__ cvtqsi2sd(xmm0, rcx);
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else {
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
__ push(rdx); // receiver
__ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
// Return the generated code.
return GetCode(flags);
}
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
ExternalArrayType array_type, Code::Flags flags) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow);
// Check that the object is a JS object.
__ CmpInstanceType(rbx, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
NearLabel check_heap_number;
__ JumpIfNotSmi(rax, &check_heap_number);
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
default:
UNREACHABLE();
break;
}
__ ret(0);
__ bind(&check_heap_number);
// rax: value
// rcx: key (a smi)
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
__ j(not_equal, &slow);
// No more branches to slow case on this path.
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
} else {
// Perform float-to-int conversion with truncation (round-to-zero)
// behavior.
// Convert to int32 and store the low byte/word.
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ cvttsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ cvttsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// Convert to int64, so that NaN and infinities become
// 0x8000000000000000, which is zero mod 2^32.
__ cvttsd2siq(rdx, xmm0);
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
}
default:
UNREACHABLE();
break;
}
__ ret(0);
}
// Slow case: call runtime.
__ bind(&slow);
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
__ pop(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
__ push(rbx); // return address
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
return GetCode(flags);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment