Commit 392316dd authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][x64] Define kTaggedSize as kInt32Size

... when pointer compression is enabled and some number of cleanups.

Bug: v8:7703
Change-Id: If7344abf68a1c4d54e4a79d066dc185f25055d7d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1477737
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Auto-Submit: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60056}
parent 6eb397c4
...@@ -29,7 +29,6 @@ static const Address kNullAddress = 0; ...@@ -29,7 +29,6 @@ static const Address kNullAddress = 0;
* Configuration of tagging scheme. * Configuration of tagging scheme.
*/ */
const int kApiSystemPointerSize = sizeof(void*); const int kApiSystemPointerSize = sizeof(void*);
const int kApiTaggedSize = kApiSystemPointerSize;
const int kApiDoubleSize = sizeof(double); const int kApiDoubleSize = sizeof(double);
const int kApiInt32Size = sizeof(int32_t); const int kApiInt32Size = sizeof(int32_t);
const int kApiInt64Size = sizeof(int64_t); const int kApiInt64Size = sizeof(int64_t);
...@@ -92,6 +91,9 @@ struct SmiTagging<8> { ...@@ -92,6 +91,9 @@ struct SmiTagging<8> {
static_assert( static_assert(
kApiSystemPointerSize == kApiInt64Size, kApiSystemPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures"); "Pointer compression can be enabled only for 64-bit architectures");
const int kApiTaggedSize = kApiInt32Size;
#else
const int kApiTaggedSize = kApiSystemPointerSize;
#endif #endif
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
...@@ -131,11 +133,7 @@ class Internals { ...@@ -131,11 +133,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiTaggedSize; static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize; static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize; static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataSlotSize = static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
#ifdef V8_COMPRESS_POINTERS
2 *
#endif
kApiSystemPointerSize;
static const int kNativeContextEmbedderDataOffset = 7 * kApiTaggedSize; static const int kNativeContextEmbedderDataOffset = 7 * kApiTaggedSize;
static const int kFullStringRepresentationMask = 0x0f; static const int kFullStringRepresentationMask = 0x0f;
static const int kStringEncodingMask = 0x8; static const int kStringEncodingMask = 0x8;
...@@ -301,22 +299,8 @@ class Internals { ...@@ -301,22 +299,8 @@ class Internals {
#endif #endif
} }
V8_INLINE static internal::Address ReadTaggedAnyField(
internal::Address heap_object_ptr, int offset) {
#ifdef V8_COMPRESS_POINTERS
int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
internal::Address root_mask = static_cast<internal::Address>(
-static_cast<intptr_t>(value & kSmiTagMask));
internal::Address root_or_zero =
root_mask & GetRootFromOnHeapAddress(heap_object_ptr);
return root_or_zero +
static_cast<internal::Address>(static_cast<intptr_t>(value));
#else
return ReadRawField<internal::Address>(heap_object_ptr, offset);
#endif
}
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32; static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
static constexpr size_t kPtrComprIsolateRootBias = static constexpr size_t kPtrComprIsolateRootBias =
kPtrComprHeapReservationSize / 2; kPtrComprHeapReservationSize / 2;
...@@ -328,18 +312,14 @@ class Internals { ...@@ -328,18 +312,14 @@ class Internals {
-static_cast<intptr_t>(kPtrComprIsolateRootAlignment); -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
} }
#else V8_INLINE static internal::Address DecompressTaggedAnyField(
internal::Address heap_object_ptr, int32_t value) {
template <typename T> internal::Address root_mask = static_cast<internal::Address>(
V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) { -static_cast<intptr_t>(value & kSmiTagMask));
typedef internal::Address A; internal::Address root_or_zero =
typedef internal::Internals I; root_mask & GetRootFromOnHeapAddress(heap_object_ptr);
A ctx = *reinterpret_cast<const A*>(context); return root_or_zero +
A embedder_data = static_cast<internal::Address>(static_cast<intptr_t>(value));
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
return I::ReadRawField<T>(embedder_data, value_offset);
} }
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS
}; };
......
...@@ -10322,7 +10322,7 @@ AccessorSignature* AccessorSignature::Cast(Data* data) { ...@@ -10322,7 +10322,7 @@ AccessorSignature* AccessorSignature::Cast(Data* data) {
} }
Local<Value> Object::GetInternalField(int index) { Local<Value> Object::GetInternalField(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) #ifndef V8_ENABLE_CHECKS
typedef internal::Address A; typedef internal::Address A;
typedef internal::Internals I; typedef internal::Internals I;
A obj = *reinterpret_cast<A*>(this); A obj = *reinterpret_cast<A*>(this);
...@@ -10333,7 +10333,12 @@ Local<Value> Object::GetInternalField(int index) { ...@@ -10333,7 +10333,12 @@ Local<Value> Object::GetInternalField(int index) {
instance_type == I::kJSApiObjectType || instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType) { instance_type == I::kJSSpecialApiObjectType) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadTaggedAnyField(obj, offset); A value = I::ReadRawField<A>(obj, offset);
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value = I::DecompressTaggedAnyField(obj, static_cast<int32_t>(value));
#endif
internal::Isolate* isolate = internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj); internal::IsolateFromNeverReadOnlySpaceObject(obj);
A* result = HandleScope::CreateHandle(isolate, value); A* result = HandleScope::CreateHandle(isolate, value);
...@@ -10345,7 +10350,7 @@ Local<Value> Object::GetInternalField(int index) { ...@@ -10345,7 +10350,7 @@ Local<Value> Object::GetInternalField(int index) {
void* Object::GetAlignedPointerFromInternalField(int index) { void* Object::GetAlignedPointerFromInternalField(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) #ifndef V8_ENABLE_CHECKS
typedef internal::Address A; typedef internal::Address A;
typedef internal::Internals I; typedef internal::Internals I;
A obj = *reinterpret_cast<A*>(this); A obj = *reinterpret_cast<A*>(this);
...@@ -10956,13 +10961,24 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( ...@@ -10956,13 +10961,24 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
} }
Local<Value> Context::GetEmbedderData(int index) { Local<Value> Context::GetEmbedderData(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) #ifndef V8_ENABLE_CHECKS
typedef internal::Address A; typedef internal::Address A;
typedef internal::Internals I; typedef internal::Internals I;
A ctx = *reinterpret_cast<const A*>(this);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadRawField<A>(embedder_data, value_offset);
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value =
I::DecompressTaggedAnyField(embedder_data, static_cast<int32_t>(value));
#endif
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject( internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
*reinterpret_cast<A*>(this)); *reinterpret_cast<A*>(this));
A* result = A* result = HandleScope::CreateHandle(isolate, value);
HandleScope::CreateHandle(isolate, I::ReadEmbedderData<A>(this, index));
return Local<Value>(reinterpret_cast<Value*>(result)); return Local<Value>(reinterpret_cast<Value*>(result));
#else #else
return SlowGetEmbedderData(index); return SlowGetEmbedderData(index);
...@@ -10971,9 +10987,15 @@ Local<Value> Context::GetEmbedderData(int index) { ...@@ -10971,9 +10987,15 @@ Local<Value> Context::GetEmbedderData(int index) {
void* Context::GetAlignedPointerFromEmbedderData(int index) { void* Context::GetAlignedPointerFromEmbedderData(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) #ifndef V8_ENABLE_CHECKS
typedef internal::Address A;
typedef internal::Internals I; typedef internal::Internals I;
return I::ReadEmbedderData<void*>(this, index); A ctx = *reinterpret_cast<const A*>(this);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
return I::ReadRawField<void*>(embedder_data, value_offset);
#else #else
return SlowGetAlignedPointerFromEmbedderData(index); return SlowGetAlignedPointerFromEmbedderData(index);
#endif #endif
......
...@@ -200,28 +200,47 @@ constexpr size_t kReservedCodeRangePages = 0; ...@@ -200,28 +200,47 @@ constexpr size_t kReservedCodeRangePages = 0;
STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2)); STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
#ifdef V8_COMPRESS_POINTERS
static_assert(
kSystemPointerSize == kInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
constexpr int kTaggedSize = kInt32Size;
constexpr int kTaggedSizeLog2 = 2;
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
using Tagged_t = int32_t;
using AtomicTagged_t = base::Atomic32;
#else
constexpr int kTaggedSize = kSystemPointerSize; constexpr int kTaggedSize = kSystemPointerSize;
constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2; constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
// These types define raw and atomic storage types for tagged values stored // These types define raw and atomic storage types for tagged values stored
// on V8 heap. // on V8 heap.
using Tagged_t = Address; using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord; using AtomicTagged_t = base::AtomicWord;
#endif // V8_COMPRESS_POINTERS
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>; using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize); STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize); STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize);
STATIC_ASSERT(kTaggedSize == kApiTaggedSize);
// TODO(ishell): use kTaggedSize or kSystemPointerSize instead. // TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
#ifndef V8_COMPRESS_POINTERS
constexpr int kPointerSize = kSystemPointerSize; constexpr int kPointerSize = kSystemPointerSize;
constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2; constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2)); STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
constexpr int kEmbedderDataSlotSize =
#ifdef V8_COMPRESS_POINTERS
kTaggedSize +
#endif #endif
kTaggedSize;
constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
constexpr int kEmbedderDataSlotSizeInTaggedSlots = constexpr int kEmbedderDataSlotSizeInTaggedSlots =
kEmbedderDataSlotSize / kTaggedSize; kEmbedderDataSlotSize / kTaggedSize;
...@@ -870,24 +889,24 @@ constexpr int kIeeeDoubleExponentWordOffset = 0; ...@@ -870,24 +889,24 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
::i::kHeapObjectTag)) ::i::kHeapObjectTag))
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \ #define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) (((value) + ::i::kObjectAlignmentMask) & ~::i::kObjectAlignmentMask)
// OBJECT_POINTER_PADDING returns the padding size required to align value // OBJECT_POINTER_PADDING returns the padding size required to align value
// as a HeapObject pointer // as a HeapObject pointer
#define OBJECT_POINTER_PADDING(value) (OBJECT_POINTER_ALIGN(value) - (value)) #define OBJECT_POINTER_PADDING(value) (OBJECT_POINTER_ALIGN(value) - (value))
// POINTER_SIZE_ALIGN returns the value aligned as a system pointer. // POINTER_SIZE_ALIGN returns the value aligned as a system pointer.
#define POINTER_SIZE_ALIGN(value) \ #define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) (((value) + ::i::kPointerAlignmentMask) & ~::i::kPointerAlignmentMask)
// POINTER_SIZE_PADDING returns the padding size required to align value // POINTER_SIZE_PADDING returns the padding size required to align value
// as a system pointer. // as a system pointer.
#define POINTER_SIZE_PADDING(value) (POINTER_SIZE_ALIGN(value) - (value)) #define POINTER_SIZE_PADDING(value) (POINTER_SIZE_ALIGN(value) - (value))
// CODE_POINTER_ALIGN returns the value aligned as a generated code segment. // CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value) \ #define CODE_POINTER_ALIGN(value) \
(((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask) (((value) + ::i::kCodeAlignmentMask) & ~::i::kCodeAlignmentMask)
// CODE_POINTER_PADDING returns the padding size required to align value // CODE_POINTER_PADDING returns the padding size required to align value
// as a generated code segment. // as a generated code segment.
...@@ -895,8 +914,7 @@ constexpr int kIeeeDoubleExponentWordOffset = 0; ...@@ -895,8 +914,7 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
// DOUBLE_POINTER_ALIGN returns the value algined for double pointers. // DOUBLE_POINTER_ALIGN returns the value algined for double pointers.
#define DOUBLE_POINTER_ALIGN(value) \ #define DOUBLE_POINTER_ALIGN(value) \
(((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask) (((value) + ::i::kDoubleAlignmentMask) & ~::i::kDoubleAlignmentMask)
// Defines hints about receiver values based on structural knowledge. // Defines hints about receiver values based on structural knowledge.
enum class ConvertReceiverMode : unsigned { enum class ConvertReceiverMode : unsigned {
......
...@@ -138,6 +138,32 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { ...@@ -138,6 +138,32 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
CopyImpl<kMinComplexMemCopy>(dst, src, num_bytes); CopyImpl<kMinComplexMemCopy>(dst, src, num_bytes);
} }
inline void MemsetInt32(int32_t* dest, int32_t value, size_t counter) {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
#define STOS "stosl"
#endif
#if defined(MEMORY_SANITIZER)
// MemorySanitizer does not understand inline assembly.
#undef STOS
#endif
#if defined(__GNUC__) && defined(STOS)
asm volatile(
"cld;"
"rep ; " STOS
: "+&c"(counter), "+&D"(dest)
: "a"(value)
: "memory", "cc");
#else
for (size_t i = 0; i < counter; i++) {
dest[i] = value;
}
#endif
#undef STOS
}
inline void MemsetPointer(Address* dest, Address value, size_t counter) { inline void MemsetPointer(Address* dest, Address value, size_t counter) {
#if V8_HOST_ARCH_IA32 #if V8_HOST_ARCH_IA32
#define STOS "stosl" #define STOS "stosl"
......
...@@ -614,33 +614,18 @@ HeapObject MapWord::ToForwardingAddress() { ...@@ -614,33 +614,18 @@ HeapObject MapWord::ToForwardingAddress() {
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(Isolate* isolate, int offset) { void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
VerifyPointer(isolate, READ_FIELD(*this, offset)); VerifyPointer(isolate, READ_FIELD(*this, offset));
#ifdef V8_COMPRESS_POINTERS STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
// Ensure upper 32-bits are zeros.
Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
#endif
} }
void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) { void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
MaybeObject::VerifyMaybeObjectPointer(isolate, MaybeObject::VerifyMaybeObjectPointer(isolate,
READ_WEAK_FIELD(*this, offset)); READ_WEAK_FIELD(*this, offset));
#ifdef V8_COMPRESS_POINTERS STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
// Ensure upper 32-bits are zeros.
Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
#endif
} }
void HeapObject::VerifySmiField(int offset) { void HeapObject::VerifySmiField(int offset) {
CHECK(READ_FIELD(*this, offset)->IsSmi()); CHECK(READ_FIELD(*this, offset)->IsSmi());
#ifdef V8_COMPRESS_POINTERS STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
// Ensure upper 32-bits are zeros.
Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
#endif
} }
#endif #endif
......
...@@ -402,11 +402,11 @@ class Code : public HeapObject { ...@@ -402,11 +402,11 @@ class Code : public HeapObject {
// This documents the amount of free space we have in each Code object header // This documents the amount of free space we have in each Code object header
// due to padding for code alignment. // due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
static constexpr int kHeaderPaddingSize = 0; static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 0; static constexpr int kHeaderPaddingSize = 0;
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
static constexpr int kHeaderPaddingSize = 0; static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 20; static constexpr int kHeaderPaddingSize = 20;
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
......
...@@ -33,6 +33,7 @@ Object EmbedderDataSlot::load_tagged() const { ...@@ -33,6 +33,7 @@ Object EmbedderDataSlot::load_tagged() const {
void EmbedderDataSlot::store_smi(Smi value) { void EmbedderDataSlot::store_smi(Smi value) {
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(value); ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(value);
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
// See gc_safe_store() for the reasons behind two stores.
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Smi::kZero); ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Smi::kZero);
#endif #endif
} }
...@@ -43,8 +44,9 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index, ...@@ -43,8 +44,9 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
int slot_offset = EmbedderDataArray::OffsetOfElementAt(entry_index); int slot_offset = EmbedderDataArray::OffsetOfElementAt(entry_index);
ObjectSlot(FIELD_ADDR(array, slot_offset + kTaggedPayloadOffset)) ObjectSlot(FIELD_ADDR(array, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value); .Relaxed_Store(value);
WRITE_BARRIER(array, slot_offset, value); WRITE_BARRIER(array, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
// See gc_safe_store() for the reasons behind two stores.
ObjectSlot(FIELD_ADDR(array, slot_offset + kRawPayloadOffset)) ObjectSlot(FIELD_ADDR(array, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::kZero); .Relaxed_Store(Smi::kZero);
#endif #endif
...@@ -56,68 +58,64 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index, ...@@ -56,68 +58,64 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index); int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index);
ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset)) ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value); .Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset, value); WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
// See gc_safe_store() for the reasons behind two stores.
ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset)) ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::kZero); .Relaxed_Store(Smi::kZero);
#endif #endif
} }
bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const { bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
Object tagged_value = // We don't care about atomicity of access here because embedder slots
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load(); // are accessed this way only from the main thread via API during "mutator"
if (!tagged_value->IsSmi()) return false; // phase which is propely synched with GC (concurrent marker may still look
#ifdef V8_COMPRESS_POINTERS // at the tagged part of the embedder slot but read-only access is ok).
STATIC_ASSERT(kSmiShiftSize == 0); Address raw_value = *location();
STATIC_ASSERT(SmiValuesAre31Bits()); *out_pointer = reinterpret_cast<void*>(raw_value);
Address value_lo = static_cast<uint32_t>(tagged_value->ptr()); return HAS_SMI_TAG(raw_value);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Address value_hi =
FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr();
Address value = value_lo | (value_hi << 32);
*out_pointer = reinterpret_cast<void*>(value);
#else
*out_pointer = reinterpret_cast<void*>(tagged_value->ptr());
#endif
return true;
} }
bool EmbedderDataSlot::store_aligned_pointer(void* ptr) { bool EmbedderDataSlot::store_aligned_pointer(void* ptr) {
Address value = reinterpret_cast<Address>(ptr); Address value = reinterpret_cast<Address>(ptr);
if (!HAS_SMI_TAG(value)) return false; if (!HAS_SMI_TAG(value)) return false;
#ifdef V8_COMPRESS_POINTERS gc_safe_store(value);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(SmiValuesAre31Bits());
// Sign-extend lower 32-bits in order to form a proper Smi value.
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Address lo = static_cast<intptr_t>(static_cast<int32_t>(value));
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo));
Address hi = value >> 32;
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi));
#else
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value));
#endif
return true; return true;
} }
EmbedderDataSlot::RawData EmbedderDataSlot::load_raw( EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
const DisallowHeapAllocation& no_gc) const { const DisallowHeapAllocation& no_gc) const {
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); // We don't care about atomicity of access here because embedder slots
return RawData{ // are accessed this way only by serializer from the main thread when
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load()->ptr(), // GC is not active (concurrent marker may still look at the tagged part
#ifdef V8_COMPRESS_POINTERS // of the embedder slot but read-only access is ok).
FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr() return *location();
#endif
};
} }
void EmbedderDataSlot::store_raw(const EmbedderDataSlot::RawData& data, void EmbedderDataSlot::store_raw(EmbedderDataSlot::RawData data,
const DisallowHeapAllocation& no_gc) { const DisallowHeapAllocation& no_gc) {
ObjectSlot(address() + kTaggedPayloadOffset) gc_safe_store(data);
.Relaxed_Store(Object(data.data_[0])); }
void EmbedderDataSlot::gc_safe_store(Address value) {
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
ObjectSlot(address() + kRawPayloadOffset) STATIC_ASSERT(kSmiShiftSize == 0);
.Relaxed_Store(Object(data.data_[1])); STATIC_ASSERT(SmiValuesAre31Bits());
STATIC_ASSERT(kTaggedSize == kInt32Size);
// We have to do two 32-bit stores here because
// 1) tagged part modifications must be atomic to be properly synchronized
// with the concurrent marker.
// 2) atomicity of full pointer store is not guaranteed for embedder slots
// since the address of the slot may not be kSystemPointerSize aligned
// (only kTaggedSize alignment is guaranteed).
// TODO(ishell, v8:8875): revisit this once the allocation alignment
// inconsistency is fixed.
Address lo = static_cast<intptr_t>(static_cast<int32_t>(value));
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo));
Address hi = value >> 32;
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi));
#else
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value));
#endif #endif
} }
......
...@@ -30,41 +30,42 @@ class Object; ...@@ -30,41 +30,42 @@ class Object;
// Storing heap object through this slot may require triggering write barriers // Storing heap object through this slot may require triggering write barriers
// so this operation must be done via static store_tagged() methods. // so this operation must be done via static store_tagged() methods.
class EmbedderDataSlot class EmbedderDataSlot
: public SlotBase<EmbedderDataSlot, Address, kEmbedderDataSlotSize> { : public SlotBase<EmbedderDataSlot, Address, kTaggedSize> {
public: public:
EmbedderDataSlot() : SlotBase(kNullAddress) {} EmbedderDataSlot() : SlotBase(kNullAddress) {}
V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index); V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);
V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index); V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
// TODO(ishell): these offsets are currently little-endian specific. #ifdef V8_TARGET_LITTLE_ENDIAN
// The less significant part contains tagged value and the other part
// contains the raw value.
static constexpr int kTaggedPayloadOffset = 0; static constexpr int kTaggedPayloadOffset = 0;
#else
static constexpr int kTaggedPayloadOffset = kTaggedSize;
#endif
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
static constexpr int kRawPayloadOffset = kTaggedSize; // The raw payload is located in the other tagged part of the full pointer.
static constexpr int kRawPayloadOffset = kTaggedSize - kTaggedPayloadOffset;
#endif #endif
static constexpr int kRequiredPtrAlignment = kSmiTagSize; static constexpr int kRequiredPtrAlignment = kSmiTagSize;
// Opaque type used for storing raw embedder data. // Opaque type used for storing raw embedder data.
struct RawData { typedef Address RawData;
const Address data_[kEmbedderDataSlotSizeInTaggedSlots];
};
V8_INLINE Object load_tagged() const; V8_INLINE Object load_tagged() const;
V8_INLINE void store_smi(Smi value); V8_INLINE void store_smi(Smi value);
// Setting an arbitrary tagged value requires triggering a write barrier // Setting an arbitrary tagged value requires triggering a write barrier
// which requires separate object and offset values, therefore these static // which requires separate object and offset values, therefore these static
// functions a // functions also has the target object parameter.
static V8_INLINE void store_tagged(EmbedderDataArray array, int entry_index, static V8_INLINE void store_tagged(EmbedderDataArray array, int entry_index,
Object value); Object value);
static V8_INLINE void store_tagged(JSObject object, int embedder_field_index, static V8_INLINE void store_tagged(JSObject object, int embedder_field_index,
Object value); Object value);
// Tries reinterpret the value as an aligned pointer and on success sets // Tries reinterpret the value as an aligned pointer and sets *out_result to
// *out_result to the pointer-like value and returns true. Note, that some // the pointer-like value. Note, that some Smis could still look like an
// Smis could still look like an aligned pointers. // aligned pointers.
// Returns false otherwise. // Returns true on success.
V8_INLINE bool ToAlignedPointer(void** out_result) const; V8_INLINE bool ToAlignedPointer(void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer // Returns true if the pointer was successfully stored or false it the pointer
...@@ -72,8 +73,12 @@ class EmbedderDataSlot ...@@ -72,8 +73,12 @@ class EmbedderDataSlot
V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(void* ptr); V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(void* ptr);
V8_INLINE RawData load_raw(const DisallowHeapAllocation& no_gc) const; V8_INLINE RawData load_raw(const DisallowHeapAllocation& no_gc) const;
V8_INLINE void store_raw(const RawData& data, V8_INLINE void store_raw(RawData data, const DisallowHeapAllocation& no_gc);
const DisallowHeapAllocation& no_gc);
private:
// Stores given value to the embedder data slot in a concurrent-marker
// friendly manner (tagged part of the slot is written atomically).
V8_INLINE void gc_safe_store(Address value);
}; };
} // namespace internal } // namespace internal
......
...@@ -25,7 +25,7 @@ namespace internal { ...@@ -25,7 +25,7 @@ namespace internal {
// Note how the comparator operates on Address values, representing the raw // Note how the comparator operates on Address values, representing the raw
// data found at the given heap location, so you probably want to construct // data found at the given heap location, so you probably want to construct
// an Object from it. // an Object from it.
class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> { class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t> {
public: public:
// This class is a stand-in for "Address&" that uses custom atomic // This class is a stand-in for "Address&" that uses custom atomic
// read/write operations for the actual memory accesses. // read/write operations for the actual memory accesses.
......
...@@ -118,13 +118,14 @@ inline void CopyTagged(Address dst, const Address src, size_t num_tagged) { ...@@ -118,13 +118,14 @@ inline void CopyTagged(Address dst, const Address src, size_t num_tagged) {
// Sets |counter| number of kTaggedSize-sized values starting at |start| slot. // Sets |counter| number of kTaggedSize-sized values starting at |start| slot.
inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) { inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
// TODO(ishell): revisit this implementation, maybe use "rep stosl"
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Address raw_value = value.ptr();
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
raw_value = CompressTagged(raw_value); Tagged_t raw_value = CompressTagged(value.ptr());
#endif STATIC_ASSERT(kTaggedSize == kInt32Size);
MemsetInt32(start.location(), raw_value, counter);
#else
Address raw_value = value.ptr();
MemsetPointer(start.location(), raw_value, counter); MemsetPointer(start.location(), raw_value, counter);
#endif
} }
// Sets |counter| number of kSystemPointerSize-sized values starting at |start| // Sets |counter| number of kSystemPointerSize-sized values starting at |start|
......
...@@ -12,14 +12,14 @@ namespace internal { ...@@ -12,14 +12,14 @@ namespace internal {
class Object; class Object;
template <typename Subclass, typename Data, size_t SlotDataSize> template <typename Subclass, typename Data,
size_t SlotDataAlignment = sizeof(Data)>
class SlotBase { class SlotBase {
public: public:
using TData = Data; using TData = Data;
// TODO(ishell): This should eventually become just sizeof(TData) once static constexpr size_t kSlotDataSize = sizeof(Data);
// pointer compression is implemented. static constexpr size_t kSlotDataAlignment = SlotDataAlignment;
static constexpr size_t kSlotDataSize = SlotDataSize;
Subclass& operator++() { // Prefix increment. Subclass& operator++() { // Prefix increment.
ptr_ += kSlotDataSize; ptr_ += kSlotDataSize;
...@@ -72,9 +72,8 @@ class SlotBase { ...@@ -72,9 +72,8 @@ class SlotBase {
TData* location() const { return reinterpret_cast<TData*>(ptr_); } TData* location() const { return reinterpret_cast<TData*>(ptr_); }
protected: protected:
STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize));
explicit SlotBase(Address ptr) : ptr_(ptr) { explicit SlotBase(Address ptr) : ptr_(ptr) {
DCHECK(IsAligned(ptr, kTaggedSize)); DCHECK(IsAligned(ptr, kSlotDataAlignment));
} }
private: private:
...@@ -88,8 +87,7 @@ class SlotBase { ...@@ -88,8 +87,7 @@ class SlotBase {
// ("slot") holding a tagged pointer (smi or strong heap object). // ("slot") holding a tagged pointer (smi or strong heap object).
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
class FullObjectSlot class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
: public SlotBase<FullObjectSlot, Address, kSystemPointerSize> {
public: public:
using TObject = Object; using TObject = Object;
using THeapObjectSlot = FullHeapObjectSlot; using THeapObjectSlot = FullHeapObjectSlot;
...@@ -103,7 +101,7 @@ class FullObjectSlot ...@@ -103,7 +101,7 @@ class FullObjectSlot
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
inline explicit FullObjectSlot(Object* object); inline explicit FullObjectSlot(Object* object);
template <typename T> template <typename T>
explicit FullObjectSlot(SlotBase<T, TData, kSlotDataSize> slot) explicit FullObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
// Compares memory representation of a value stored in the slot with given // Compares memory representation of a value stored in the slot with given
...@@ -140,7 +138,7 @@ class FullMaybeObjectSlot ...@@ -140,7 +138,7 @@ class FullMaybeObjectSlot
explicit FullMaybeObjectSlot(MaybeObject* ptr) explicit FullMaybeObjectSlot(MaybeObject* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit FullMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot) explicit FullMaybeObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline const MaybeObject operator*() const; inline const MaybeObject operator*() const;
...@@ -158,15 +156,14 @@ class FullMaybeObjectSlot ...@@ -158,15 +156,14 @@ class FullMaybeObjectSlot
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
// In case it is known that that slot contains a strong heap object pointer, // In case it is known that that slot contains a strong heap object pointer,
// ToHeapObject() can be used to retrieve that heap object. // ToHeapObject() can be used to retrieve that heap object.
class FullHeapObjectSlot class FullHeapObjectSlot : public SlotBase<FullHeapObjectSlot, Address> {
: public SlotBase<FullHeapObjectSlot, Address, kSystemPointerSize> {
public: public:
FullHeapObjectSlot() : SlotBase(kNullAddress) {} FullHeapObjectSlot() : SlotBase(kNullAddress) {}
explicit FullHeapObjectSlot(Address ptr) : SlotBase(ptr) {} explicit FullHeapObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit FullHeapObjectSlot(Object* ptr) explicit FullHeapObjectSlot(Object* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit FullHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot) explicit FullHeapObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline const HeapObjectReference operator*() const; inline const HeapObjectReference operator*() const;
......
...@@ -29,8 +29,6 @@ V8_INLINE Address GetRootFromOnHeapAddress(Address addr) { ...@@ -29,8 +29,6 @@ V8_INLINE Address GetRootFromOnHeapAddress(Address addr) {
// preserving both weak- and smi- tags. // preserving both weak- and smi- tags.
V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr, V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
Tagged_t raw_value) { Tagged_t raw_value) {
static_assert(kTaggedSize == kSystemPointerSize, "has to be updated");
static_assert(!std::is_same<int32_t, Tagged_t>::value, "remove cast below");
int32_t value = static_cast<int32_t>(raw_value); int32_t value = static_cast<int32_t>(raw_value);
Address root = GetRootFromOnHeapAddress(on_heap_addr); Address root = GetRootFromOnHeapAddress(on_heap_addr);
// Current compression scheme requires value to be sign-extended to inptr_t // Current compression scheme requires value to be sign-extended to inptr_t
...@@ -41,8 +39,6 @@ V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr, ...@@ -41,8 +39,6 @@ V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
// Decompresses any tagged value, preserving both weak- and smi- tags. // Decompresses any tagged value, preserving both weak- and smi- tags.
V8_INLINE Address DecompressTaggedAny(Address on_heap_addr, V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
Tagged_t raw_value) { Tagged_t raw_value) {
static_assert(kTaggedSize == kSystemPointerSize, "has to be updated");
static_assert(!std::is_same<int32_t, Tagged_t>::value, "remove cast below");
int32_t value = static_cast<int32_t>(raw_value); int32_t value = static_cast<int32_t>(raw_value);
// |root_mask| is 0 if the |value| was a smi or -1 otherwise. // |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = -static_cast<Address>(value & kSmiTagMask); Address root_mask = -static_cast<Address>(value & kSmiTagMask);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// See v8:7703 for details about how pointer compression works.
constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB; constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB;
constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2; constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2;
constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB; constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
...@@ -21,8 +22,7 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB; ...@@ -21,8 +22,7 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
// holding a compressed tagged pointer (smi or heap object). // holding a compressed tagged pointer (smi or heap object).
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
class CompressedObjectSlot class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
: public SlotBase<CompressedObjectSlot, Tagged_t, kTaggedSize> {
public: public:
using TObject = Object; using TObject = Object;
using THeapObjectSlot = CompressedHeapObjectSlot; using THeapObjectSlot = CompressedHeapObjectSlot;
...@@ -37,7 +37,7 @@ class CompressedObjectSlot ...@@ -37,7 +37,7 @@ class CompressedObjectSlot
explicit CompressedObjectSlot(Object const* const* ptr) explicit CompressedObjectSlot(Object const* const* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataSize> slot) explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline Object operator*() const; inline Object operator*() const;
...@@ -57,8 +57,7 @@ class CompressedObjectSlot ...@@ -57,8 +57,7 @@ class CompressedObjectSlot
// forwarding pointer is different. // forwarding pointer is different.
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
class CompressedMapWordSlot class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
: public SlotBase<CompressedMapWordSlot, Tagged_t, kTaggedSize> {
public: public:
using TObject = Object; using TObject = Object;
...@@ -88,7 +87,7 @@ class CompressedMapWordSlot ...@@ -88,7 +87,7 @@ class CompressedMapWordSlot
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
class CompressedMaybeObjectSlot class CompressedMaybeObjectSlot
: public SlotBase<CompressedMaybeObjectSlot, Tagged_t, kTaggedSize> { : public SlotBase<CompressedMaybeObjectSlot, Tagged_t> {
public: public:
using TObject = MaybeObject; using TObject = MaybeObject;
using THeapObjectSlot = CompressedHeapObjectSlot; using THeapObjectSlot = CompressedHeapObjectSlot;
...@@ -102,7 +101,8 @@ class CompressedMaybeObjectSlot ...@@ -102,7 +101,8 @@ class CompressedMaybeObjectSlot
explicit CompressedMaybeObjectSlot(MaybeObject* ptr) explicit CompressedMaybeObjectSlot(MaybeObject* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit CompressedMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot) explicit CompressedMaybeObjectSlot(
SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline MaybeObject operator*() const; inline MaybeObject operator*() const;
...@@ -121,14 +121,14 @@ class CompressedMaybeObjectSlot ...@@ -121,14 +121,14 @@ class CompressedMaybeObjectSlot
// In case it is known that that slot contains a strong heap object pointer, // In case it is known that that slot contains a strong heap object pointer,
// ToHeapObject() can be used to retrieve that heap object. // ToHeapObject() can be used to retrieve that heap object.
class CompressedHeapObjectSlot class CompressedHeapObjectSlot
: public SlotBase<CompressedHeapObjectSlot, Tagged_t, kTaggedSize> { : public SlotBase<CompressedHeapObjectSlot, Tagged_t> {
public: public:
CompressedHeapObjectSlot() : SlotBase(kNullAddress) {} CompressedHeapObjectSlot() : SlotBase(kNullAddress) {}
explicit CompressedHeapObjectSlot(Address ptr) : SlotBase(ptr) {} explicit CompressedHeapObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit CompressedHeapObjectSlot(Object* ptr) explicit CompressedHeapObjectSlot(Object* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit CompressedHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot) explicit CompressedHeapObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const; inline HeapObjectReference operator*() const;
......
...@@ -182,7 +182,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) { ...@@ -182,7 +182,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// with embedder callbacks. // with embedder callbacks.
for (int i = 0; i < embedder_fields_count; i++) { for (int i = 0; i < embedder_fields_count; i++) {
if (!DataIsEmpty(serialized_data[i])) { if (!DataIsEmpty(serialized_data[i])) {
EmbedderDataSlot(js_obj, i).store_raw({kNullAddress}, no_gc); EmbedderDataSlot(js_obj, i).store_raw(kNullAddress, no_gc);
} }
} }
......
...@@ -426,50 +426,45 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -426,50 +426,45 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'. // - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'. // - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \ #define DECLARE_INSTRUCTION(instruction) \
template <class P1> \ template <class P1> \
void instruction##_tagged(P1 p1) { \ void instruction##_tagged(P1 p1) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \ emit_##instruction(p1, kTaggedSize); \
/* TODO(ishell): change to kTaggedSize */ \ } \
emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \ \
} \ template <class P1> \
\ void instruction##l(P1 p1) { \
template <class P1> \ emit_##instruction(p1, kInt32Size); \
void instruction##l(P1 p1) { \ } \
emit_##instruction(p1, kInt32Size); \ \
} \ template <class P1> \
\ void instruction##q(P1 p1) { \
template <class P1> \ emit_##instruction(p1, kInt64Size); \
void instruction##q(P1 p1) { \ } \
emit_##instruction(p1, kInt64Size); \ \
} \ template <class P1, class P2> \
\ void instruction##_tagged(P1 p1, P2 p2) { \
template <class P1, class P2> \ emit_##instruction(p1, p2, kTaggedSize); \
void instruction##_tagged(P1 p1, P2 p2) { \ } \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \ \
/* TODO(ishell): change to kTaggedSize */ \ template <class P1, class P2> \
emit_##instruction(p1, p2, \ void instruction##l(P1 p1, P2 p2) { \
COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \ emit_##instruction(p1, p2, kInt32Size); \
} \ } \
\ \
template <class P1, class P2> \ template <class P1, class P2> \
void instruction##l(P1 p1, P2 p2) { \ void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt32Size); \ emit_##instruction(p1, p2, kInt64Size); \
} \ } \
\ \
template <class P1, class P2> \ template <class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2) { \ void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, kInt64Size); \ emit_##instruction(p1, p2, p3, kInt32Size); \
} \ } \
\ \
template <class P1, class P2, class P3> \ template <class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \ void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \ emit_##instruction(p1, p2, p3, kInt64Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt64Size); \
} }
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION) ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION #undef DECLARE_INSTRUCTION
......
...@@ -269,7 +269,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, ...@@ -269,7 +269,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
RecordComment("[ StoreTagged"); RecordComment("[ StoreTagged");
movl(dst_field_operand, value); movl(dst_field_operand, value);
movl(Operand(dst_field_operand, 4), Immediate(0));
RecordComment("]"); RecordComment("]");
#else #else
movq(dst_field_operand, value); movq(dst_field_operand, value);
...@@ -281,7 +280,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, ...@@ -281,7 +280,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
RecordComment("[ StoreTagged"); RecordComment("[ StoreTagged");
movl(dst_field_operand, value); movl(dst_field_operand, value);
movl(Operand(dst_field_operand, 4), Immediate(0));
RecordComment("]"); RecordComment("]");
#else #else
movq(dst_field_operand, value); movq(dst_field_operand, value);
...@@ -1124,7 +1122,11 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { ...@@ -1124,7 +1122,11 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
movsxlq(dst, dst); movsxlq(dst, dst);
} else { } else {
DCHECK(SmiValuesAre31Bits()); DCHECK(SmiValuesAre31Bits());
#ifdef V8_COMPRESS_POINTERS
movsxlq(dst, src);
#else
movq(dst, src); movq(dst, src);
#endif
sarq(dst, Immediate(kSmiShift)); sarq(dst, Immediate(kSmiShift));
} }
} }
...@@ -1132,7 +1134,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { ...@@ -1132,7 +1134,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
void MacroAssembler::SmiCompare(Register smi1, Register smi2) { void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1); AssertSmi(smi1);
AssertSmi(smi2); AssertSmi(smi2);
cmpq(smi1, smi2); cmp_tagged(smi1, smi2);
} }
void MacroAssembler::SmiCompare(Register dst, Smi src) { void MacroAssembler::SmiCompare(Register dst, Smi src) {
......
...@@ -131,9 +131,18 @@ class BufferedRawMachineAssemblerTester ...@@ -131,9 +131,18 @@ class BufferedRawMachineAssemblerTester
// Store node is provided as a parameter. By storing the return value in // Store node is provided as a parameter. By storing the return value in
// memory it is possible to return 64 bit values. // memory it is possible to return 64 bit values.
void Return(Node* input) { void Return(Node* input) {
Store(MachineTypeForC<ReturnType>().representation(), if (COMPRESS_POINTERS_BOOL && MachineTypeForC<ReturnType>().IsTagged()) {
RawMachineAssembler::Parameter(return_parameter_index_), input, // Since we are returning values via storing to off-heap location
kNoWriteBarrier); // generate full-word store here.
Store(MachineType::PointerRepresentation(),
RawMachineAssembler::Parameter(return_parameter_index_),
BitcastTaggedToWord(input), kNoWriteBarrier);
} else {
Store(MachineTypeForC<ReturnType>().representation(),
RawMachineAssembler::Parameter(return_parameter_index_), input,
kNoWriteBarrier);
}
RawMachineAssembler::Return(Int32Constant(1234)); RawMachineAssembler::Return(Int32Constant(1234));
} }
......
...@@ -199,13 +199,10 @@ void CheckEq(CType in_value, CType out_value) { ...@@ -199,13 +199,10 @@ void CheckEq(CType in_value, CType out_value) {
// Specializations for checking the result of compressing store. // Specializations for checking the result of compressing store.
template <> template <>
void CheckEq<Object>(Object in_value, Object out_value) { void CheckEq<Object>(Object in_value, Object out_value) {
Isolate* isolate = CcTest::InitIsolateOnce(); // Compare only lower 32-bits of the value because tagged load/stores are
// |out_value| is compressed. Check that it's valid. // 32-bit operations anyway.
CHECK_EQ(CompressTagged(in_value->ptr()), out_value->ptr()); CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); static_cast<Tagged_t>(out_value.ptr()));
CHECK_EQ(in_value->ptr(),
DecompressTaggedAny(isolate->isolate_root(),
static_cast<int32_t>(out_value->ptr())));
} }
template <> template <>
...@@ -269,7 +266,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { ...@@ -269,7 +266,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
// When pointer compression is enabled then we need to access only // When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains // the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values. // full 64-bit values.
base_pointer = LSB(base_pointer, kSystemPointerSize / 2); base_pointer = LSB(base_pointer, kTaggedSize);
} }
#endif #endif
Node* base = m.PointerConstant(base_pointer); Node* base = m.PointerConstant(base_pointer);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment