Commit 1f5f0889 authored by ishell's avatar ishell Committed by Commit bot

Reland of "Enable inobject double fields unboxing for 64-bit archs."

Review URL: https://codereview.chromium.org/751643005

Cr-Commit-Position: refs/heads/master@{#25483}
parent 25b8efff
......@@ -84,7 +84,7 @@ namespace internal {
// Determine whether double field unboxing feature is enabled.
#if (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64)
#define V8_DOUBLE_FIELDS_UNBOXING 0
#define V8_DOUBLE_FIELDS_UNBOXING 1
#else
#define V8_DOUBLE_FIELDS_UNBOXING 0
#endif
......
......@@ -2764,7 +2764,8 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
#if V8_DOUBLE_FIELDS_UNBOXING
if (!may_contain_raw_values &&
(has_only_tagged_fields || helper.IsTagged(src_slot - src_addr)))
(has_only_tagged_fields ||
helper.IsTagged(static_cast<int>(src_slot - src_addr))))
#else
if (!may_contain_raw_values)
#endif
......
......@@ -111,9 +111,9 @@ class StaticVisitorBase : public AllStatic {
// Determine which specialized visitor should be used for given map.
static VisitorId GetVisitorId(Map* map) {
return GetVisitorId(map->instance_type(), map->instance_size(),
FLAG_unbox_double_fields &&
!map->layout_descriptor()->IsFastPointerLayout());
return GetVisitorId(
map->instance_type(), map->instance_size(),
FLAG_unbox_double_fields && !map->HasFastPointerLayout());
}
// For visitors that allow specialization by size calculate VisitorId based
......@@ -198,15 +198,13 @@ class BodyVisitorBase : public AllStatic {
public:
INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
int start_offset, int end_offset)) {
DCHECK(!FLAG_unbox_double_fields ||
object->map()->layout_descriptor()->IsFastPointerLayout());
DCHECK(!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout());
IterateRawPointers(heap, object, start_offset, end_offset);
}
INLINE(static void IterateBody(Heap* heap, HeapObject* object,
int start_offset, int end_offset)) {
if (!FLAG_unbox_double_fields ||
object->map()->layout_descriptor()->IsFastPointerLayout()) {
if (!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout()) {
IterateRawPointers(heap, object, start_offset, end_offset);
} else {
IterateBodyUsingLayoutDescriptor(heap, object, start_offset, end_offset);
......
......@@ -519,7 +519,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
if (!has_only_tagged_fields) {
for (Address slot = start_address; slot < end_address;
slot += kPointerSize) {
if (helper.IsTagged(slot - obj_address)) {
if (helper.IsTagged(static_cast<int>(slot - obj_address))) {
// TODO(ishell): call this once for contiguous region
// of tagged fields.
FindPointersToNewSpaceInRegion(slot, slot + kPointerSize,
......
......@@ -120,7 +120,12 @@ bool LayoutDescriptor::IsTagged(int field_index) {
bool LayoutDescriptor::IsFastPointerLayout() {
return IsSmi() && (Smi::cast(this)->value() == 0);
return this == FastPointerLayout();
}
bool LayoutDescriptor::IsFastPointerLayout(Object* layout_descriptor) {
return layout_descriptor == FastPointerLayout();
}
......
......@@ -28,6 +28,7 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
// Returns true if this is a layout of the object having only tagged fields.
V8_INLINE bool IsFastPointerLayout();
V8_INLINE static bool IsFastPointerLayout(Object* layout_descriptor);
// Returns true if the layout descriptor is in non-Smi form.
V8_INLINE bool IsSlowLayout();
......
......@@ -5241,6 +5241,12 @@ LayoutDescriptor* Map::layout_descriptor_gc_safe() {
}
bool Map::HasFastPointerLayout() const {
Object* layout_desc = READ_FIELD(this, kLayoutDecriptorOffset);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
void Map::UpdateDescriptors(DescriptorArray* descriptors,
LayoutDescriptor* layout_desc) {
set_instance_descriptors(descriptors);
......@@ -7378,8 +7384,7 @@ template<int start_offset, int end_offset, int size>
void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
HeapObject* obj,
ObjectVisitor* v) {
if (!FLAG_unbox_double_fields ||
obj->map()->layout_descriptor()->IsFastPointerLayout()) {
if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
v->VisitPointers(HeapObject::RawField(obj, start_offset),
HeapObject::RawField(obj, end_offset));
} else {
......@@ -7392,8 +7397,7 @@ template<int start_offset>
void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
int object_size,
ObjectVisitor* v) {
if (!FLAG_unbox_double_fields ||
obj->map()->layout_descriptor()->IsFastPointerLayout()) {
if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
v->VisitPointers(HeapObject::RawField(obj, start_offset),
HeapObject::RawField(obj, object_size));
} else {
......
......@@ -5889,6 +5889,7 @@ class Map: public HeapObject {
DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
// |layout descriptor| accessor which can be used from GC.
inline LayoutDescriptor* layout_descriptor_gc_safe();
inline bool HasFastPointerLayout() const;
// |layout descriptor| accessor that is safe to call even when
// FLAG_unbox_double_fields is disabled (in this case Map does not contain
......
......@@ -37,7 +37,7 @@ enum PropertyKind {
PROP_SMI,
PROP_DOUBLE,
PROP_TAGGED,
PROP_KIND_NUMBER,
PROP_KIND_NUMBER
};
static Representation representations[PROP_KIND_NUMBER] = {
......@@ -653,8 +653,8 @@ TEST(StoreBufferScanOnScavenge) {
double boom_value = bit_cast<double>(fake_object);
FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
obj->FastPropertyAtPut(field_index,
*factory->NewHeapNumber(boom_value, MUTABLE));
Handle<HeapNumber> boom_number = factory->NewHeapNumber(boom_value, MUTABLE);
obj->FastPropertyAtPut(field_index, *boom_number);
// Enforce scan on scavenge for the obj's page.
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment