Commit 391ef3be authored by danno@chromium.org's avatar danno@chromium.org

Implement ICs for FastDoubleArray loads and stores

Implemented on ia32, x64, ARM. Stubbed out with UNIMPLEMENTED on MIPS.

BUG=none
TEST=unbox-double-arrays.js

Review URL: http://codereview.chromium.org/7307030

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8637 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 907065c1
......@@ -2548,6 +2548,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
......
......@@ -4168,6 +4168,77 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
Register key_reg = r0;
Register receiver_reg = r1;
Register elements_reg = r2;
Register heap_number_reg = r2;
Register indexed_double_offset = r3;
Register scratch = r4;
Register scratch2 = r5;
Register scratch3 = r6;
Register heap_number_map = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array.
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
// Check that the key is within bounds.
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ cmp(key_reg, Operand(scratch));
__ b(hs, &miss_force_generic);
// Load the upper word of the double in the fixed array and test for NaN.
__ add(indexed_double_offset, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
__ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
__ cmp(scratch, Operand(kHoleNanUpper32));
__ b(&miss_force_generic, eq);
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
heap_number_map, &slow_allocate_heapnumber);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
__ str(scratch, FieldMemOperand(heap_number_reg,
HeapNumber::kExponentOffset));
__ ldr(scratch, FieldMemOperand(indexed_double_offset,
FixedArray::kHeaderSize));
__ str(scratch, FieldMemOperand(heap_number_reg,
HeapNumber::kMantissaOffset));
__ mov(r0, heap_number_reg);
__ Ret();
__ bind(&slow_allocate_heapnumber);
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ Jump(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
......@@ -4231,6 +4302,123 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register scratch = r3;
Register elements_reg = r4;
Register mantissa_reg = r5;
Register exponent_reg = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
// Check that the key is within bounds.
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
// Handle smi values specially.
__ JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
__ CheckMap(value_reg,
scratch,
masm->isolate()->factory()->heap_number_map(),
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
__ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
__ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
__ cmp(exponent_reg, scratch);
__ b(ge, &maybe_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ bind(&have_double_value);
__ add(scratch, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
__ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ str(exponent_reg, FieldMemOperand(scratch, offset));
__ Ret();
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ b(gt, &is_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ cmp(mantissa_reg, Operand(0));
__ b(eq, &have_double_value);
__ bind(&is_nan);
// Load canonical NaN for storing into the double array.
__ mov(mantissa_reg, Operand(kCanonicalNonHoleNanLower32));
__ mov(exponent_reg, Operand(kCanonicalNonHoleNanUpper32));
__ jmp(&have_double_value);
__ bind(&smi_value);
__ add(scratch, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
__ SmiUntag(value_reg, value_reg);
FloatingPointHelper::ConvertIntToDouble(
masm, value_reg, destination,
d0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
scratch4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
__ vstr(d0, scratch, 0);
} else {
__ str(mantissa_reg, MemOperand(scratch, 0));
__ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
}
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
}
#undef __
} } // namespace v8::internal
......
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// Copyright (c) 2011 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
......@@ -71,7 +71,9 @@ const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::uint8_max_value = 255;
const double DoubleConstant::zero = 0.0;
const double DoubleConstant::nan = OS::nan_value();
const double DoubleConstant::canonical_non_hole_nan =
BitCast<double>(kCanonicalNonHoleNanInt64);
const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
......@@ -921,9 +923,15 @@ ExternalReference ExternalReference::address_of_negative_infinity() {
}
ExternalReference ExternalReference::address_of_nan() {
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::nan)));
const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::the_hole_nan)));
}
......
......@@ -70,7 +70,8 @@ class DoubleConstant: public AllStatic {
static const double zero;
static const double uint8_max_value;
static const double negative_infinity;
static const double nan;
static const double canonical_non_hole_nan;
static const double the_hole_nan;
};
......@@ -629,7 +630,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_nan();
static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
......
......@@ -250,7 +250,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case JSObject::FAST_DOUBLE_ELEMENTS:
UNIMPLEMENTED();
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case JSObject::EXTERNAL_BYTE_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
......@@ -279,7 +279,8 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
break;
case JSObject::FAST_DOUBLE_ELEMENTS:
UNIMPLEMENTED();
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_);
break;
case JSObject::EXTERNAL_BYTE_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
......
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -199,6 +199,8 @@ const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
const int kDoubleSizeLog2 = 3;
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
......
......@@ -2790,7 +2790,8 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
__ ucomisd(input_reg, xmm0);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan = ExternalReference::address_of_nan();
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
......@@ -3451,7 +3452,8 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN.
ExternalReference nan = ExternalReference::address_of_nan();
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(result_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
......
......@@ -2043,6 +2043,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_double_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
......
......@@ -3790,6 +3790,71 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(eax, &miss_force_generic);
// Get the elements array.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
__ AssertFastElements(ecx);
// Check that the key is within bounds.
__ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Check for the hole
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
__ j(equal, &miss_force_generic);
// Always allocate a heap number for the result.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
FixedDoubleArray::kHeaderSize));
} else {
__ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
}
__ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
// Set the value.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
}
__ mov(eax, ecx);
__ ret(0);
__ bind(&slow_allocate_heapnumber);
// A value was pushed on the floating point stack before the allocation, if
// the allocation fails it needs to be removed.
if (!CpuFeatures::IsSupported(SSE2)) {
__ ffree();
__ fincstp();
}
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
......@@ -3839,6 +3904,98 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, smi_value, is_nan, maybe_nan;
Label have_double_value, not_nan;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(ecx, &miss_force_generic);
// Get the elements array.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ AssertFastElements(edi);
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
}
__ j(above_equal, &miss_force_generic);
__ JumpIfSmi(eax, &smi_value, Label::kNear);
__ CheckMap(eax,
masm->isolate()->factory()->heap_number_map(),
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Double value, canonicalize NaN.
uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
__ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
__ j(greater_equal, &maybe_nan, Label::kNear);
__ bind(&not_nan);
ExternalReference canonical_nan_reference =
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&have_double_value);
__ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
__ ret(0);
} else {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&have_double_value);
__ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
__ ret(0);
}
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ j(greater, &is_nan, Label::kNear);
__ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
__ j(zero, &not_nan);
__ bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
} else {
__ fld_d(Operand::StaticVariable(canonical_nan_reference));
}
__ jmp(&have_double_value, Label::kNear);
__ bind(&smi_value);
// Value is a smi. convert to a double and store.
__ SmiUntag(eax);
__ push(eax);
__ fild_s(Operand(esp, 0));
__ pop(eax);
__ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
}
#undef __
} } // namespace v8::internal
......
......@@ -4229,6 +4229,12 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
UNIMPLEMENTED();
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
......@@ -4292,6 +4298,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array) {
UNIMPLEMENTED();
}
#undef __
} } // namespace v8::internal
......
......@@ -1610,6 +1610,21 @@ void FixedArray::set(int index, Object* value) {
}
inline bool FixedDoubleArray::is_the_hole_nan(double value) {
return BitCast<uint64_t, double>(value) == kHoleNanInt64;
}
inline double FixedDoubleArray::hole_nan_as_double() {
return BitCast<double, uint64_t>(kHoleNanInt64);
}
inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
return BitCast<double, uint64_t>(kCanonicalNonHoleNanInt64);
}
double FixedDoubleArray::get(int index) {
ASSERT(map() != HEAP->fixed_cow_array_map() &&
map() != HEAP->fixed_array_map());
......
......@@ -58,11 +58,6 @@ namespace internal {
const int kGetterIndex = 0;
const int kSetterIndex = 1;
uint64_t FixedDoubleArray::kHoleNanInt64 = -1;
uint64_t FixedDoubleArray::kCanonicalNonHoleNanLower32 = 0x7FF00000;
uint64_t FixedDoubleArray::kCanonicalNonHoleNanInt64 =
kCanonicalNonHoleNanLower32 << 32;
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
Object* result;
......@@ -2811,16 +2806,18 @@ MaybeObject* JSObject::NormalizeElements() {
ASSERT(!HasExternalArrayElements());
// Find the backing store.
FixedArray* array = FixedArray::cast(elements());
FixedArrayBase* array = FixedArrayBase::cast(elements());
Map* old_map = array->map();
bool is_arguments =
(old_map == old_map->heap()->non_strict_arguments_elements_map());
if (is_arguments) {
array = FixedArray::cast(array->get(1));
array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
}
if (array->IsDictionary()) return array;
ASSERT(HasFastElements() || HasFastArgumentsElements());
ASSERT(HasFastElements() ||
HasFastDoubleElements() ||
HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
......@@ -2833,7 +2830,7 @@ MaybeObject* JSObject::NormalizeElements() {
}
// Copy the elements to the new backing store.
bool has_double_elements = old_map->has_fast_double_elements();
bool has_double_elements = array->IsFixedDoubleArray();
for (int i = 0; i < length; i++) {
Object* value = NULL;
if (has_double_elements) {
......@@ -2851,7 +2848,7 @@ MaybeObject* JSObject::NormalizeElements() {
}
} else {
ASSERT(old_map->has_fast_elements());
value = array->get(i);
value = FixedArray::cast(array)->get(i);
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
if (!value->IsTheHole()) {
......@@ -7317,22 +7314,28 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
new_map = Map::cast(object);
}
AssertNoAllocation no_gc;
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) {
case FAST_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
set_map(new_map);
set_elements(new_elements);
break;
case DICTIONARY_ELEMENTS:
}
case DICTIONARY_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
CopySlowElementsToFast(NumberDictionary::cast(elements()),
new_elements,
mode);
set_map(new_map);
set_elements(new_elements);
break;
}
case NON_STRICT_ARGUMENTS_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
// The object's map and the parameter map are unchanged, the unaliased
// arguments are copied to the new backing store.
FixedArray* parameter_map = FixedArray::cast(elements());
......@@ -7368,6 +7371,8 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
new_elements->set(i, obj, UPDATE_WRITE_BARRIER);
}
}
set_map(new_map);
set_elements(new_elements);
break;
}
case EXTERNAL_BYTE_ELEMENTS:
......@@ -7430,7 +7435,9 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
break;
}
ASSERT(new_map->has_fast_double_elements());
set_map(new_map);
ASSERT(elems->IsFixedDoubleArray());
set_elements(elems);
if (IsJSArray()) {
......@@ -8407,8 +8414,9 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
} else {
new_length = dictionary->max_number_key() + 1;
}
MaybeObject* result =
SetFastElementsCapacityAndLength(new_length, new_length);
MaybeObject* result = ShouldConvertToFastDoubleElements()
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
: SetFastElementsCapacityAndLength(new_length, new_length);
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
......@@ -8495,6 +8503,9 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
}
// Otherwise default to slow case.
ASSERT(HasFastDoubleElements());
ASSERT(map()->has_fast_double_elements());
ASSERT(elements()->IsFixedDoubleArray());
Object* obj;
{ MaybeObject* maybe_obj = NormalizeElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
......@@ -8948,10 +8959,13 @@ bool JSObject::HasDenseElements() {
int capacity = 0;
int number_of_elements = 0;
FixedArray* backing_store = FixedArray::cast(elements());
FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
FixedArray* backing_store = NULL;
switch (GetElementsKind()) {
case NON_STRICT_ARGUMENTS_ELEMENTS:
backing_store = FixedArray::cast(backing_store->get(1));
backing_store_base =
FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
backing_store = FixedArray::cast(backing_store_base);
if (backing_store->IsDictionary()) {
NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
capacity = dictionary->Capacity();
......@@ -8960,13 +8974,15 @@ bool JSObject::HasDenseElements() {
}
// Fall through.
case FAST_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
capacity = backing_store->length();
for (int i = 0; i < capacity; ++i) {
if (!backing_store->get(i)->IsTheHole()) ++number_of_elements;
}
break;
case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
NumberDictionary* dictionary =
NumberDictionary::cast(FixedArray::cast(elements()));
capacity = dictionary->Capacity();
number_of_elements = dictionary->NumberOfElements();
break;
......
......@@ -2173,23 +2173,9 @@ class FixedDoubleArray: public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
// The following can't be declared inline as const static
// because they're 64-bit.
static uint64_t kCanonicalNonHoleNanLower32;
static uint64_t kCanonicalNonHoleNanInt64;
static uint64_t kHoleNanInt64;
inline static bool is_the_hole_nan(double value) {
return BitCast<uint64_t, double>(value) == kHoleNanInt64;
}
inline static double hole_nan_as_double() {
return BitCast<double, uint64_t>(kHoleNanInt64);
}
inline static double canonical_not_the_hole_nan_as_double() {
return BitCast<double, uint64_t>(kCanonicalNonHoleNanInt64);
}
inline static bool is_the_hole_nan(double value);
inline static double hole_nan_as_double();
inline static double canonical_not_the_hole_nan_as_double();
// Casting.
static inline FixedDoubleArray* cast(Object* obj);
......
......@@ -662,6 +662,8 @@ class KeyedLoadStubCompiler: public StubCompiler {
static void GenerateLoadFastElement(MacroAssembler* masm);
static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
......@@ -717,6 +719,9 @@ class KeyedStoreStubCompiler: public StubCompiler {
static void GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array);
static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
bool is_js_array);
static void GenerateStoreExternalArray(MacroAssembler* masm,
JSObject::ElementsKind elements_kind);
......
......@@ -506,6 +506,21 @@ enum CallKind {
CALL_AS_FUNCTION
};
static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
static const uint32_t kCanonicalNonHoleNanUpper32 = 0x7FF10000;
static const uint32_t kCanonicalNonHoleNanLower32 = 0xFFFFFFFF;
static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
const uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
const uint64_t kCanonicalNonHoleNanInt64 =
(static_cast<uint64_t>(kCanonicalNonHoleNanUpper32) << 32) |
kCanonicalNonHoleNanLower32;
const uint64_t kLastNonNaNInt64 =
(static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
......@@ -384,6 +384,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
j(equal, &ok, Label::kNear);
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedDoubleArrayMapRootIndex);
j(equal, &ok, Label::kNear);
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
j(equal, &ok, Label::kNear);
......
......@@ -3584,6 +3584,57 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &miss_force_generic);
// Get the elements array.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
__ AssertFastElements(rcx);
// Check that the key is within bounds.
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Check for the hole
__ SmiToInteger32(kScratchRegister, rax);
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
Immediate(kHoleNanUpper32));
__ j(equal, &miss_force_generic);
// Always allocate a heap number for the result.
__ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
FixedDoubleArray::kHeaderSize));
__ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
// Set the value.
__ movq(rax, rcx);
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ ret(0);
__ bind(&slow_allocate_heapnumber);
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
......@@ -3634,6 +3685,91 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, smi_value, is_nan, maybe_nan;
Label have_double_value, not_nan;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &miss_force_generic);
// Get the elements array.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ AssertFastElements(rdi);
// Check that the key is within bounds.
if (is_js_array) {
__ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
}
__ j(above_equal, &miss_force_generic);
// Handle smi values specially
__ JumpIfSmi(rax, &smi_value, Label::kNear);
__ CheckMap(rax,
masm->isolate()->factory()->heap_number_map(),
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Double value, canonicalize NaN.
uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
__ cmpl(FieldOperand(rax, offset),
Immediate(kNaNOrInfinityLowerBoundUpper32));
__ j(greater_equal, &maybe_nan, Label::kNear);
__ bind(&not_nan);
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ bind(&have_double_value);
__ SmiToInteger32(rcx, rcx);
__ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
xmm0);
__ ret(0);
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ j(greater, &is_nan, Label::kNear);
__ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
__ j(zero, &not_nan);
__ bind(&is_nan);
// Convert all NaNs to the same canonical NaN value when they are stored in
// the double array.
ExternalReference canonical_nan_reference =
ExternalReference::address_of_canonical_non_hole_nan();
__ Set(kScratchRegister, kCanonicalNonHoleNanInt64);
__ movq(xmm0, kScratchRegister);
__ jmp(&have_double_value, Label::kNear);
__ bind(&smi_value);
// Value is a smi. convert to a double and store.
__ SmiToInteger32(rax, rax);
__ push(rax);
__ fild_s(Operand(rsp, 0));
__ pop(rax);
__ SmiToInteger32(rcx, rcx);
__ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
}
#undef __
} } // namespace v8::internal
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment