Commit 17448030 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Prepare for changing kTaggedSize, pt.3

This CL also gives up trying to maintain double and system word
fields at aligned addresses because currently it's not always
maintained (v8:8875) and Torque object definitions do not support
padding fields (v8:8863).

Given that both platforms where pointer compression is going to be
enabled (x64 and arm64) support loading of doubles and full words
from 4-byte aligned addresses we are fine.

Bug: v8:7703
Change-Id: I99fc6da5a0927f4db9b8fb24c7cc0bfc416523bc
Reviewed-on: https://chromium-review.googlesource.com/c/1496974
Auto-Submit: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60013}
parent 6422aa92
......@@ -31,7 +31,7 @@ static const Address kNullAddress = 0;
const int kApiSystemPointerSize = sizeof(void*);
const int kApiTaggedSize = kApiSystemPointerSize;
const int kApiDoubleSize = sizeof(double);
const int kApiIntSize = sizeof(int);
const int kApiInt32Size = sizeof(int32_t);
const int kApiInt64Size = sizeof(int64_t);
// Tag information for HeapObject.
......@@ -88,16 +88,16 @@ struct SmiTagging<8> {
}
};
#if defined(V8_COMPRESS_POINTERS)
#ifdef V8_COMPRESS_POINTERS
static_assert(
kApiSystemPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
#endif
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
typedef SmiTagging<kApiIntSize> PlatformSmiTagging;
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
typedef SmiTagging<kApiInt32Size> PlatformSmiTagging;
#else
typedef SmiTagging<kApiSystemPointerSize> PlatformSmiTagging;
typedef SmiTagging<kApiTaggedSize> PlatformSmiTagging;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
......@@ -122,15 +122,13 @@ class Internals {
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiIntSize;
static const int kStringResourceOffset = 1 * kApiTaggedSize + 2 * kApiIntSize;
static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size;
static const int kStringResourceOffset =
1 * kApiTaggedSize + 2 * kApiInt32Size;
static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
static const int kForeignAddressOffset = kApiTaggedSize;
static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
static const int kJSObjectHeaderSizeForEmbedderFields =
(kJSObjectHeaderSize + kApiSystemPointerSize - 1) &
-kApiSystemPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataSlotSize =
......@@ -148,7 +146,7 @@ class Internals {
static const int kIsolateEmbedderDataOffset = 0;
static const int kExternalMemoryOffset =
kNumIsolateDataSlots * kApiTaggedSize;
kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
......@@ -163,8 +161,8 @@ class Internals {
static const int kFalseValueRootIndex = 8;
static const int kEmptyStringRootIndex = 9;
static const int kNodeClassIdOffset = 1 * kApiTaggedSize;
static const int kNodeFlagsOffset = 1 * kApiTaggedSize + 3;
static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
static const int kNodeStateMask = 0x7;
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
......
......@@ -10323,8 +10323,7 @@ Local<Value> Object::GetInternalField(int index) {
if (instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType) {
int offset = I::kJSObjectHeaderSizeForEmbedderFields +
(I::kEmbedderDataSlotSize * index);
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadTaggedAnyField(obj, offset);
internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj);
......@@ -10347,8 +10346,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
if (V8_LIKELY(instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType)) {
int offset = I::kJSObjectHeaderSizeForEmbedderFields +
(I::kEmbedderDataSlotSize * index);
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
return I::ReadRawField<void*>(obj, offset);
}
#endif
......
......@@ -1202,6 +1202,12 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
......@@ -4826,8 +4832,9 @@ void CodeStubAssembler::CopyFixedArrayElements(
doubles_to_objects_conversion ||
(barrier_mode == UPDATE_WRITE_BARRIER && IsObjectElementsKind(to_kind));
bool element_offset_matches =
!needs_write_barrier && (Is64() || IsDoubleElementsKind(from_kind) ==
IsDoubleElementsKind(to_kind));
!needs_write_barrier &&
(kTaggedSize == kDoubleSize ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind));
Node* double_hole =
Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
: ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
......
......@@ -291,9 +291,18 @@ static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
from_address += kDoubleSize * from_start;
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): we use CopyTagged() in order to avoid unaligned
// access to double values in the arrays. This will no longed be necessary
// once the allocations alignment issue is fixed.
int words_per_double = (kDoubleSize / kTaggedSize);
CopyTagged(to_address, from_address,
static_cast<size_t>(words_per_double * copy_size));
#else
int words_per_double = (kDoubleSize / kSystemPointerSize);
CopyWords(to_address, from_address,
static_cast<size_t>(words_per_double * copy_size));
#endif
}
static void CopySmiToDoubleElements(FixedArrayBase from_base,
......@@ -459,8 +468,6 @@ static void SortIndices(
AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size,
[isolate](Tagged_t elementA, Tagged_t elementB) {
// TODO(ishell): revisit the code below
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
#ifdef V8_COMPRESS_POINTERS
Object a(DecompressTaggedAny(isolate->isolate_root(), elementA));
Object b(DecompressTaggedAny(isolate->isolate_root(), elementB));
......
......@@ -2447,13 +2447,20 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.IterateAll(&external_string_table_visitor);
}
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
// is only kTaggedSize aligned but we can keep using unaligned access since
// both x64 and arm64 architectures (where pointer compression supported)
// allow unaligned access to doubles.
STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kTaggedSize));
#else
STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kDoubleAlignment));
#endif
#ifdef V8_HOST_ARCH_32_BIT
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
0); // NOLINT
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
......
......@@ -510,8 +510,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
ALLOCATE_MAP(JS_OBJECT_TYPE,
JSObject::kHeaderSizeForEmbedderFields + kEmbedderDataSlotSize,
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kEmbedderDataSlotSize,
external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
......
......@@ -517,6 +517,12 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes);
#endif
}
......
......@@ -83,11 +83,8 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
// There are embedder fields.
IteratePointers(obj, start_offset, header_size, v);
// Iterate only tagged payload of the embedder slots and skip raw payload.
int embedder_fields_offset = RoundUp(header_size, kSystemPointerSize);
DCHECK_EQ(embedder_fields_offset,
JSObject::GetEmbedderFieldsStartOffset(map));
for (int offset =
embedder_fields_offset + EmbedderDataSlot::kTaggedPayloadOffset;
DCHECK_EQ(header_size, JSObject::GetEmbedderFieldsStartOffset(map));
for (int offset = header_size + EmbedderDataSlot::kTaggedPayloadOffset;
offset < inobject_fields_offset; offset += kEmbedderDataSlotSize) {
IteratePointer(obj, offset, v);
}
......
......@@ -823,6 +823,12 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
}
AllocationAlignment HeapObject::RequiredAlignment(Map map) {
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
#ifdef V8_HOST_ARCH_32_BIT
int instance_type = map->instance_type();
if (instance_type == FIXED_FLOAT64_ARRAY_TYPE ||
......
......@@ -37,10 +37,12 @@ class EmbedderDataSlot
V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
// TODO(ishell): these offsets are currently little-endian specific.
// The less significant part contains tagged value and the other part
// contains the raw value.
static constexpr int kTaggedPayloadOffset = 0;
#ifdef V8_COMPRESS_POINTERS
static constexpr int kRawPayloadOffset = kTaggedSize;
#endif
static constexpr int kTaggedPayloadOffset = 0;
static constexpr int kRequiredPtrAlignment = kSmiTagSize;
// Opaque type used for storing raw embedder data.
......
......@@ -594,7 +594,15 @@ class FixedTypedArrayBase : public FixedArrayBase {
TORQUE_GENERATED_FIXED_TYPED_ARRAY_BASE_FIELDS)
static const int kHeaderSize = kSize;
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
// is only kTaggedSize aligned but we can keep using unaligned access since
// both x64 and arm64 architectures (where pointer compression supported)
// allow unaligned access to doubles.
STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
#else
STATIC_ASSERT(IsAligned(kHeaderSize, kDoubleAlignment));
#endif
static const int kDataOffset = kHeaderSize;
......
......@@ -31,7 +31,16 @@ class Foreign : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FOREIGN_FIELDS)
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): When pointer compression is enabled the
// kForeignAddressOffset is only kTaggedSize aligned but we can keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to full words.
STATIC_ASSERT(IsAligned(kForeignAddressOffset, kTaggedSize));
#else
STATIC_ASSERT(IsAligned(kForeignAddressOffset, kSystemPointerSize));
#endif
STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
class BodyDescriptor;
......
......@@ -269,16 +269,8 @@ int JSObject::GetHeaderSize(const Map map) {
// static
int JSObject::GetEmbedderFieldsStartOffset(const Map map) {
// Embedder fields are located after the header size rounded up to the
// kSystemPointerSize, whereas in-object properties are at the end of the
// object.
int header_size = GetHeaderSize(map);
if (kTaggedSize == kSystemPointerSize) {
DCHECK(IsAligned(header_size, kSystemPointerSize));
return header_size;
} else {
return RoundUp(header_size, kSystemPointerSize);
}
// Embedder fields are located after the object header.
return GetHeaderSize(map);
}
int JSObject::GetEmbedderFieldsStartOffset() {
......@@ -289,12 +281,13 @@ int JSObject::GetEmbedderFieldsStartOffset() {
int JSObject::GetEmbedderFieldCount(const Map map) {
int instance_size = map->instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
// Embedder fields are located after the header size rounded up to the
// kSystemPointerSize, whereas in-object properties are at the end of the
// object. We don't have to round up the header size here because division by
// kEmbedderDataSlotSizeInTaggedSlots will swallow potential padding in case
// of (kTaggedSize != kSystemPointerSize) anyway.
return (((instance_size - GetHeaderSize(map)) >> kTaggedSizeLog2) -
// Embedder fields are located after the object header, whereas in-object
// properties are located at the end of the object. We don't have to round up
// the header size here because division by kEmbedderDataSlotSizeInTaggedSlots
// will swallow potential padding in case of (kTaggedSize !=
// kSystemPointerSize) anyway.
return (((instance_size - GetEmbedderFieldsStartOffset(map)) >>
kTaggedSizeLog2) -
map->GetInObjectProperties()) /
kEmbedderDataSlotSizeInTaggedSlots;
}
......
......@@ -761,14 +761,10 @@ class JSObject : public JSReceiver {
PropertyArray::kMaxLength);
// Layout description.
#define JS_OBJECT_FIELDS(V) \
V(kElementsOffset, kTaggedSize) \
/* Header size. */ \
V(kHeaderSize, 0) \
V(kOptionalEmbedderFieldPadding, \
POINTER_SIZE_PADDING(kOptionalEmbedderFieldPadding)) \
/* Header size aligned to kSystemPointerSize. */ \
V(kHeaderSizeForEmbedderFields, 0)
#define JS_OBJECT_FIELDS(V) \
V(kElementsOffset, kTaggedSize) \
/* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_OBJECT_FIELDS)
#undef JS_OBJECT_FIELDS
......@@ -778,14 +774,11 @@ class JSObject : public JSReceiver {
(kMaxInstanceSize - kHeaderSize) >> kTaggedSizeLog2;
STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
STATIC_ASSERT(kHeaderSizeForEmbedderFields ==
Internals::kJSObjectHeaderSizeForEmbedderFields);
static const int kMaxFirstInobjectPropertyOffset =
(1 << kFirstInobjectPropertyOffsetBitCount) - 1;
static const int kMaxEmbedderFields =
(kMaxFirstInobjectPropertyOffset - kHeaderSizeForEmbedderFields) /
kEmbedderDataSlotSize;
STATIC_ASSERT(kHeaderSizeForEmbedderFields +
(kMaxFirstInobjectPropertyOffset - kHeaderSize) / kEmbedderDataSlotSize;
STATIC_ASSERT(kHeaderSize +
kMaxEmbedderFields * kEmbedderDataSlotSizeInTaggedSlots <=
kMaxInstanceSize);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment