Commit 85d7140d authored by ishell@chromium.org's avatar ishell@chromium.org

Hydrogenize (and share) part of StoreTransition handler as a...

Hydrogenize (and share) part of StoreTransition handler as a StoreTransitionStub and StoreField handler simplification.

R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/609463003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24333 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 95525a22
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
const Register ExtendStorageDescriptor::MapRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
......
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
const Register ExtendStorageDescriptor::MapRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
......
......@@ -776,44 +776,56 @@ Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<ExtendStorageStub>::BuildCodeStub() {
HValue* object = GetParameter(ExtendStorageDescriptor::kReceiverIndex);
HValue* properties =
Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
HObjectAccess::ForPropertiesPointer());
HValue* length = AddLoadFixedArrayLength(properties);
HValue* delta = Add<HConstant>(static_cast<int32_t>(JSObject::kFieldsAdded));
HValue* new_capacity = AddUncasted<HAdd>(length, delta);
// Grow properties array.
ElementsKind kind = FAST_ELEMENTS;
Add<HBoundsCheck>(new_capacity,
Add<HConstant>((Page::kMaxRegularHeapObjectSize -
FixedArray::kHeaderSize) >>
ElementsKindToShiftSize(kind)));
// Reuse this code for properties backing store allocation.
HValue* new_properties = BuildAllocateAndInitializeArray(kind, new_capacity);
BuildCopyProperties(properties, new_properties, length, new_capacity);
// Store the new value into the "extended" object.
Add<HStoreNamedField>(object, HObjectAccess::ForPropertiesPointer(),
new_properties);
BuildStoreNamedField(
object, GetParameter(ExtendStorageDescriptor::kValueIndex),
casted_stub()->index(), casted_stub()->representation(), true);
// And finally update the map after the new field is added.
Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
GetParameter(ExtendStorageDescriptor::kMapIndex));
return GetParameter(ExtendStorageDescriptor::kValueIndex);
HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
HValue* object = GetParameter(StoreTransitionDescriptor::kReceiverIndex);
switch (casted_stub()->store_mode()) {
case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
HValue* properties =
Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
HObjectAccess::ForPropertiesPointer());
HValue* length = AddLoadFixedArrayLength(properties);
HValue* delta =
Add<HConstant>(static_cast<int32_t>(JSObject::kFieldsAdded));
HValue* new_capacity = AddUncasted<HAdd>(length, delta);
// Grow properties array.
ElementsKind kind = FAST_ELEMENTS;
Add<HBoundsCheck>(new_capacity,
Add<HConstant>((Page::kMaxRegularHeapObjectSize -
FixedArray::kHeaderSize) >>
ElementsKindToShiftSize(kind)));
// Reuse this code for properties backing store allocation.
HValue* new_properties =
BuildAllocateAndInitializeArray(kind, new_capacity);
BuildCopyProperties(properties, new_properties, length, new_capacity);
// Store the new value into the "extended" object.
Add<HStoreNamedField>(object, HObjectAccess::ForPropertiesPointer(),
new_properties);
}
// Fall through.
case StoreTransitionStub::StoreMapAndValue:
BuildStoreNamedField(
object, GetParameter(StoreTransitionDescriptor::kValueIndex),
casted_stub()->index(), casted_stub()->representation(), true);
// Fall through.
case StoreTransitionStub::StoreMapOnly:
// And finally update the map.
Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
GetParameter(StoreTransitionDescriptor::kMapIndex));
break;
}
return GetParameter(StoreTransitionDescriptor::kValueIndex);
}
Handle<Code> ExtendStorageStub::GenerateCode() { return DoGenerateCode(this); }
Handle<Code> StoreTransitionStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
......
......@@ -616,8 +616,8 @@ void ElementsTransitionAndStoreStub::InitializeDescriptor(
}
CallInterfaceDescriptor ExtendStorageStub::GetCallInterfaceDescriptor() {
return ExtendStorageDescriptor(isolate());
CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor() {
return StoreTransitionDescriptor(isolate());
}
......
......@@ -80,12 +80,12 @@ namespace internal {
V(VectorKeyedLoad) \
V(VectorLoad) \
/* IC Handler stubs */ \
V(ExtendStorage) \
V(LoadConstant) \
V(LoadField) \
V(KeyedLoadSloppyArguments) \
V(StoreField) \
V(StoreGlobal) \
V(StoreTransition) \
V(StringLength)
// List of code stubs only used on ARM 32 bits platforms.
......@@ -999,30 +999,45 @@ class StoreFieldStub : public HandlerStub {
};
// Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a
// JSObject.
class ExtendStorageStub : public HandlerStub {
class StoreTransitionStub : public HandlerStub {
public:
ExtendStorageStub(Isolate* isolate, FieldIndex index,
Representation representation)
enum StoreMode {
StoreMapOnly,
StoreMapAndValue,
ExtendStorageAndStoreMapAndValue
};
explicit StoreTransitionStub(Isolate* isolate) : HandlerStub(isolate) {
set_sub_minor_key(StoreModeBits::encode(StoreMapOnly));
}
StoreTransitionStub(Isolate* isolate, FieldIndex index,
Representation representation, StoreMode store_mode)
: HandlerStub(isolate) {
DCHECK(store_mode != StoreMapOnly);
int property_index_key = index.GetFieldAccessStubKey();
uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
RepresentationBits::encode(repr));
RepresentationBits::encode(repr) |
StoreModeBits::encode(store_mode));
}
FieldIndex index() const {
DCHECK(store_mode() != StoreMapOnly);
int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
return FieldIndex::FromFieldAccessStubKey(property_index_key);
}
Representation representation() {
DCHECK(store_mode() != StoreMapOnly);
uint8_t repr = RepresentationBits::decode(sub_minor_key());
return PropertyDetails::DecodeRepresentation(repr);
}
StoreMode store_mode() const {
return StoreModeBits::decode(sub_minor_key());
}
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
protected:
......@@ -1032,8 +1047,9 @@ class ExtendStorageStub : public HandlerStub {
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
class RepresentationBits : public BitField<uint8_t, 13, 4> {};
class StoreModeBits : public BitField<StoreMode, 17, 2> {};
DEFINE_HANDLER_CODE_STUB(ExtendStorage, HandlerStub);
DEFINE_HANDLER_CODE_STUB(StoreTransition, HandlerStub);
};
......
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
const Register ExtendStorageDescriptor::MapRegister() { return ebx; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() {
......
......@@ -326,182 +326,41 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Generate StoreTransition code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
// r0 : value
Label exit;
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1, it.Current(), &do_store);
it.Advance();
if (it.Done()) {
__ b(ne, miss_label);
break;
}
__ b(eq, &do_store);
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
TAG_RESULT, MUTABLE);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
__ vmov(s0, scratch1);
__ vcvt_f64_s32(d0, s0);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
DONT_DO_SMI_CHECK);
__ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ mov(ExtendStorageDescriptor::NameRegister(), Operand(name));
__ mov(ExtendStorageDescriptor::MapRegister(), Operand(transition));
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ mov(scratch1, Operand(transition));
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(r0));
__ Ret();
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ mov(this->name(), Operand(name));
__ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
// Return the value (register r0).
DCHECK(value_reg.is(r0));
__ bind(&exit);
__ Ret();
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ Move(scratch1(), handle(constant, isolate()));
__ cmp(value_reg, scratch1());
__ b(ne, miss_label);
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
__ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1(), it.Current(), &do_store);
it.Advance();
if (it.Done()) {
__ b(ne, miss_label);
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1(), it.Current(), &do_store);
it.Advance();
if (it.Done()) {
__ b(ne, miss_label);
break;
}
__ b(eq, &do_store);
}
__ b(eq, &do_store);
__ bind(&do_store);
}
__ bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -370,176 +370,41 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Generate StoreTransition code, value is passed in x0 register.
// When leaving generated code after success, the receiver_reg and storage_reg
// may be clobbered. Upon branch to miss_label, the receiver and name registers
// have their original values.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
Label exit;
DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2,
scratch3));
// We don't need scratch3.
scratch3 = NoReg;
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ LoadObject(scratch1, constant);
__ Cmp(value_reg, scratch1);
__ B(ne, miss_label);
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1, it.Current());
it.Advance();
if (it.Done()) {
__ B(ne, miss_label);
break;
}
__ B(eq, &do_store);
}
__ Bind(&do_store);
}
} else if (representation.IsDouble()) {
UseScratchRegisterScope temps(masm());
DoubleRegister temp_double = temps.AcquireD();
__ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
Label do_store;
__ JumpIfSmi(value_reg, &do_store);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
DONT_DO_SMI_CHECK);
__ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ Bind(&do_store);
__ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
NoReg, MUTABLE);
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ Mov(ExtendStorageDescriptor::NameRegister(), Operand(name));
__ Mov(ExtendStorageDescriptor::MapRegister(), Operand(transition));
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ Mov(scratch1, Operand(transition));
__ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(x0));
__ Ret();
return;
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ Mov(this->name(), Operand(name));
__ Mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
__ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ Mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ Ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ Str(prop_reg, FieldMemOperand(scratch1, offset));
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ Mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
__ Bind(&exit);
// Return the value (register x0).
DCHECK(value_reg.is(x0));
__ Ret();
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ LoadObject(scratch1(), handle(constant, isolate()));
__ Cmp(value_reg, scratch1());
__ B(ne, miss_label);
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
__ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1(), it.Current());
it.Advance();
if (it.Done()) {
__ B(ne, miss_label);
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1(), it.Current());
it.Advance();
if (it.Done()) {
__ B(ne, miss_label);
break;
}
__ B(eq, &do_store);
}
__ B(eq, &do_store);
__ Bind(&do_store);
}
__ Bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -309,7 +309,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
// TODO(verwaest): Cleanup. holder() is actually the receiver.
Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss, slow;
Label miss;
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1(), &miss);
......@@ -331,21 +331,55 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(holder()->HasFastProperties());
}
GenerateStoreTransition(transition, name, receiver(), this->name(), value(),
scratch1(), scratch2(), scratch3(), &miss, &slow);
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
// Stub is never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
if (details.type() == CONSTANT) {
GenerateConstantCheck(descriptors->GetValue(descriptor), value(), &miss);
GenerateRestoreNameAndMap(name, transition);
StoreTransitionStub stub(isolate());
GenerateTailCall(masm(), stub.GetCode());
} else {
if (representation.IsHeapObject()) {
GenerateFieldTypeChecks(descriptors->GetFieldType(descriptor), value(),
&miss);
}
StoreTransitionStub::StoreMode store_mode =
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0
? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
: StoreTransitionStub::StoreMapAndValue;
GenerateRestoreNameAndMap(name, transition);
StoreTransitionStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation, store_mode);
GenerateTailCall(masm(), stub.GetCode());
}
GenerateRestoreName(&miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
GenerateRestoreName(&slow, name);
TailCallBuiltin(masm(), SlowBuiltin(kind()));
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
Label miss;
GenerateStoreField(it, value(), &miss);
DCHECK(it->representation().IsHeapObject());
GenerateFieldTypeChecks(*it->GetFieldType(), value(), &miss);
StoreFieldStub stub(isolate(), it->GetFieldIndex(), it->representation());
GenerateTailCall(masm(), stub.GetCode());
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, it->name());
......
......@@ -230,14 +230,13 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
void GenerateRestoreName(Label* label, Handle<Name> name);
private:
void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name,
Register receiver_reg, Register name_reg,
Register value_reg, Register scratch1,
Register scratch2, Register scratch3,
Label* miss_label, Label* slow);
void GenerateStoreField(LookupIterator* lookup, Register value_reg,
Label* miss_label);
void GenerateRestoreNameAndMap(Handle<Name> name, Handle<Map> transition);
void GenerateConstantCheck(Object* constant, Register value_reg,
Label* miss_label);
void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
Label* miss_label);
static Builtins::Name SlowBuiltin(Code::Kind kind) {
switch (kind) {
......
......@@ -329,167 +329,39 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register unused, Label* miss_label, Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
}
__ j(equal, &do_store, Label::kNear);
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
__ Cvtsi2sd(xmm0, value_reg);
__ SmiTag(value_reg);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ mov(ExtendStorageDescriptor::NameRegister(), Immediate(name));
__ mov(ExtendStorageDescriptor::MapRegister(), Immediate(transition));
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ mov(scratch1, Immediate(transition));
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(eax));
__ ret(0);
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
if (representation.IsDouble()) {
__ mov(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ mov(FieldOperand(receiver_reg, offset), value_reg);
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ mov(FieldOperand(scratch1, offset), storage_reg);
} else {
__ mov(FieldOperand(scratch1, offset), value_reg);
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ mov(this->name(), Immediate(name));
__ mov(StoreTransitionDescriptor::MapRegister(), Immediate(transition));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
}
// Return the value (register eax).
DCHECK(value_reg.is(eax));
__ ret(0);
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ CmpObject(value_reg, handle(constant, isolate()));
__ j(not_equal, miss_label);
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
}
__ j(equal, &do_store, Label::kNear);
}
__ j(equal, &do_store, Label::kNear);
__ bind(&do_store);
}
__ bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -321,186 +321,42 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
// a0 : value.
Label exit;
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
Handle<Map> current;
if (!it.Done()) {
__ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
// Do the CompareMap() directly within the Branch() functions.
current = it.Current();
it.Advance();
if (it.Done()) {
__ Branch(miss_label, ne, scratch1, Operand(current));
break;
}
__ Branch(&do_store, eq, scratch1, Operand(current));
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
TAG_RESULT, MUTABLE);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
__ mtc1(scratch1, f6);
__ cvt_d_w(f4, f6);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ li(ExtendStorageDescriptor::NameRegister(), Operand(name));
__ li(ExtendStorageDescriptor::MapRegister(), Operand(transition));
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ li(scratch1, Operand(transition));
__ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ li(this->name(), Operand(name));
__ li(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ lw(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
// Return the value (register v0).
DCHECK(value_reg.is(a0));
__ bind(&exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ li(scratch1(), handle(constant, isolate()));
__ Branch(miss_label, ne, value_reg, Operand(scratch1()));
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
__ lw(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
Handle<Map> current;
while (true) {
// Do the CompareMap() directly within the Branch() functions.
current = it.Current();
it.Advance();
if (it.Done()) {
__ Branch(miss_label, ne, scratch1(), Operand(current));
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ lw(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
Handle<Map> current;
while (true) {
// Do the CompareMap() directly within the Branch() functions.
current = it.Current();
it.Advance();
if (it.Done()) {
__ Branch(miss_label, ne, scratch1(), Operand(current));
break;
}
__ Branch(&do_store, eq, scratch1(), Operand(current));
}
__ Branch(&do_store, eq, scratch1(), Operand(current));
__ bind(&do_store);
}
__ bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -321,185 +321,42 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
// a0 : value.
Label exit;
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
Handle<Map> current;
if (!it.Done()) {
__ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
// Do the CompareMap() directly within the Branch() functions.
current = it.Current();
it.Advance();
if (it.Done()) {
__ Branch(miss_label, ne, scratch1, Operand(current));
break;
}
__ Branch(&do_store, eq, scratch1, Operand(current));
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
TAG_RESULT, MUTABLE);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
__ mtc1(scratch1, f6);
__ cvt_d_w(f4, f6);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ li(ExtendStorageDescriptor::NameRegister(), Operand(name));
__ li(ExtendStorageDescriptor::MapRegister(), Operand(transition));
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ li(scratch1, Operand(transition));
__ sd(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
if (representation.IsDouble()) {
__ sd(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sd(value_reg, FieldMemOperand(receiver_reg, offset));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ld(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ sd(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sd(value_reg, FieldMemOperand(scratch1, offset));
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ li(this->name(), Operand(name));
__ li(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
// Return the value (register v0).
DCHECK(value_reg.is(a0));
__ bind(&exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ li(scratch1(), handle(constant, isolate()));
__ Branch(miss_label, ne, value_reg, Operand(scratch1()));
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
__ ld(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
Handle<Map> current;
while (true) {
// Do the CompareMap() directly within the Branch() functions.
current = it.Current();
it.Advance();
if (it.Done()) {
__ Branch(miss_label, ne, scratch1(), Operand(current));
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ ld(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
Handle<Map> current;
while (true) {
// Do the CompareMap() directly within the Branch() functions.
current = it.Current();
it.Advance();
if (it.Done()) {
__ Branch(miss_label, ne, scratch1(), Operand(current));
break;
}
__ Branch(&do_store, eq, scratch1(), Operand(current));
}
__ Branch(&do_store, eq, scratch1(), Operand(current));
__ bind(&do_store);
}
__ bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -324,166 +324,39 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register unused, Label* miss_label, Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
}
__ j(equal, &do_store, Label::kNear);
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow, MUTABLE);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch1, value_reg);
__ Cvtlsi2sd(xmm0, scratch1);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ Move(ExtendStorageDescriptor::NameRegister(), name);
__ Move(ExtendStorageDescriptor::MapRegister(), transition);
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ Move(scratch1, transition);
__ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(rax));
__ ret(0);
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
if (representation.IsDouble()) {
__ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ movp(FieldOperand(receiver_reg, offset), value_reg);
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
__ movp(FieldOperand(scratch1, offset), value_reg);
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ Move(this->name(), name);
__ Move(StoreTransitionDescriptor::MapRegister(), transition);
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
}
// Return the value (register rax).
DCHECK(value_reg.is(rax));
__ ret(0);
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ Cmp(value_reg, handle(constant, isolate()));
__ j(not_equal, miss_label);
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
}
__ j(equal, &do_store, Label::kNear);
}
__ j(equal, &do_store, Label::kNear);
__ bind(&do_store);
}
__ bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -329,169 +329,39 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
void NamedStoreHandlerCompiler::GenerateStoreTransition(
Handle<Map> transition, Handle<Name> name, Register receiver_reg,
Register storage_reg, Register value_reg, Register scratch1,
Register scratch2, Register unused, Label* miss_label, Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
}
__ j(equal, &do_store, Label::kNear);
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
__ push(value_reg);
__ fild_s(Operand(esp, 0));
__ pop(value_reg);
__ SmiTag(value_reg);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
DONT_DO_SMI_CHECK);
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for objects that require access checks.
DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
__ mov(ExtendStorageDescriptor::NameRegister(), Immediate(name));
__ mov(ExtendStorageDescriptor::MapRegister(), Immediate(transition));
ExtendStorageStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation);
GenerateTailCall(masm(), stub.GetCode());
return;
}
// Update the map of the object.
__ mov(scratch1, Immediate(transition));
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(eax));
__ ret(0);
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
if (representation.IsDouble()) {
__ mov(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ mov(FieldOperand(receiver_reg, offset), value_reg);
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ mov(FieldOperand(scratch1, offset), storage_reg);
} else {
__ mov(FieldOperand(scratch1, offset), value_reg);
}
void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
Handle<Name> name, Handle<Map> transition) {
__ mov(this->name(), Immediate(name));
__ mov(StoreTransitionDescriptor::MapRegister(), Immediate(transition));
}
if (!representation.IsSmi()) {
// Update the write barrier for the array address.
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
}
// Return the value (register eax).
DCHECK(value_reg.is(eax));
__ ret(0);
void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
Register value_reg,
Label* miss_label) {
__ CmpObject(value_reg, handle(constant, isolate()));
__ j(not_equal, miss_label);
}
void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
while (true) {
__ CompareMap(value_reg, it.Current());
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
break;
}
__ j(equal, &do_store, Label::kNear);
}
__ j(equal, &do_store, Label::kNear);
__ bind(&do_store);
}
__ bind(&do_store);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
GenerateTailCall(masm(), stub.GetCode());
}
......
......@@ -74,7 +74,7 @@ void StoreDescriptor::Initialize(CallInterfaceDescriptorData* data) {
}
void ExtendStorageDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void StoreTransitionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
ValueRegister(), MapRegister()};
data->Initialize(arraysize(registers), registers, NULL);
......
......@@ -16,7 +16,7 @@ class PlatformInterfaceDescriptor;
#define INTERFACE_DESCRIPTOR_LIST(V) \
V(Load) \
V(Store) \
V(ExtendStorage) \
V(StoreTransition) \
V(ElementTransitionAndStore) \
V(Instanceof) \
V(VectorLoadICTrampoline) \
......@@ -214,9 +214,9 @@ class StoreDescriptor : public CallInterfaceDescriptor {
};
class ExtendStorageDescriptor : public StoreDescriptor {
class StoreTransitionDescriptor : public StoreDescriptor {
public:
DECLARE_DESCRIPTOR(ExtendStorageDescriptor, StoreDescriptor)
DECLARE_DESCRIPTOR(StoreTransitionDescriptor, StoreDescriptor)
// Extends StoreDescriptor with Map parameter.
enum ParameterIndices {
......
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
const Register ExtendStorageDescriptor::MapRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
......
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
const Register ExtendStorageDescriptor::MapRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
......
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return rcx; }
const Register StoreDescriptor::ValueRegister() { return rax; }
const Register ExtendStorageDescriptor::MapRegister() { return rbx; }
const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() {
......
......@@ -29,7 +29,7 @@ const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
const Register ExtendStorageDescriptor::MapRegister() { return ebx; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment