Commit 392316dd authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][x64] Define kTaggedSize as kInt32Size

... when pointer compression is enabled and some number of cleanups.

Bug: v8:7703
Change-Id: If7344abf68a1c4d54e4a79d066dc185f25055d7d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1477737
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Auto-Submit: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60056}
parent 6eb397c4
......@@ -29,7 +29,6 @@ static const Address kNullAddress = 0;
* Configuration of tagging scheme.
*/
const int kApiSystemPointerSize = sizeof(void*);
const int kApiTaggedSize = kApiSystemPointerSize;
const int kApiDoubleSize = sizeof(double);
const int kApiInt32Size = sizeof(int32_t);
const int kApiInt64Size = sizeof(int64_t);
......@@ -92,6 +91,9 @@ struct SmiTagging<8> {
static_assert(
kApiSystemPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
const int kApiTaggedSize = kApiInt32Size;
#else
const int kApiTaggedSize = kApiSystemPointerSize;
#endif
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
......@@ -131,11 +133,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataSlotSize =
#ifdef V8_COMPRESS_POINTERS
2 *
#endif
kApiSystemPointerSize;
static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
static const int kNativeContextEmbedderDataOffset = 7 * kApiTaggedSize;
static const int kFullStringRepresentationMask = 0x0f;
static const int kStringEncodingMask = 0x8;
......@@ -301,22 +299,8 @@ class Internals {
#endif
}
V8_INLINE static internal::Address ReadTaggedAnyField(
internal::Address heap_object_ptr, int offset) {
#ifdef V8_COMPRESS_POINTERS
int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
internal::Address root_mask = static_cast<internal::Address>(
-static_cast<intptr_t>(value & kSmiTagMask));
internal::Address root_or_zero =
root_mask & GetRootFromOnHeapAddress(heap_object_ptr);
return root_or_zero +
static_cast<internal::Address>(static_cast<intptr_t>(value));
#else
return ReadRawField<internal::Address>(heap_object_ptr, offset);
#endif
}
#ifdef V8_COMPRESS_POINTERS
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
static constexpr size_t kPtrComprIsolateRootBias =
kPtrComprHeapReservationSize / 2;
......@@ -328,18 +312,14 @@ class Internals {
-static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
}
#else
template <typename T>
V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
typedef internal::Address A;
typedef internal::Internals I;
A ctx = *reinterpret_cast<const A*>(context);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
return I::ReadRawField<T>(embedder_data, value_offset);
V8_INLINE static internal::Address DecompressTaggedAnyField(
internal::Address heap_object_ptr, int32_t value) {
internal::Address root_mask = static_cast<internal::Address>(
-static_cast<intptr_t>(value & kSmiTagMask));
internal::Address root_or_zero =
root_mask & GetRootFromOnHeapAddress(heap_object_ptr);
return root_or_zero +
static_cast<internal::Address>(static_cast<intptr_t>(value));
}
#endif // V8_COMPRESS_POINTERS
};
......
......@@ -10322,7 +10322,7 @@ AccessorSignature* AccessorSignature::Cast(Data* data) {
}
Local<Value> Object::GetInternalField(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
#ifndef V8_ENABLE_CHECKS
typedef internal::Address A;
typedef internal::Internals I;
A obj = *reinterpret_cast<A*>(this);
......@@ -10333,7 +10333,12 @@ Local<Value> Object::GetInternalField(int index) {
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadTaggedAnyField(obj, offset);
A value = I::ReadRawField<A>(obj, offset);
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value = I::DecompressTaggedAnyField(obj, static_cast<int32_t>(value));
#endif
internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj);
A* result = HandleScope::CreateHandle(isolate, value);
......@@ -10345,7 +10350,7 @@ Local<Value> Object::GetInternalField(int index) {
void* Object::GetAlignedPointerFromInternalField(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
#ifndef V8_ENABLE_CHECKS
typedef internal::Address A;
typedef internal::Internals I;
A obj = *reinterpret_cast<A*>(this);
......@@ -10956,13 +10961,24 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
}
Local<Value> Context::GetEmbedderData(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
#ifndef V8_ENABLE_CHECKS
typedef internal::Address A;
typedef internal::Internals I;
A ctx = *reinterpret_cast<const A*>(this);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadRawField<A>(embedder_data, value_offset);
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value =
I::DecompressTaggedAnyField(embedder_data, static_cast<int32_t>(value));
#endif
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
*reinterpret_cast<A*>(this));
A* result =
HandleScope::CreateHandle(isolate, I::ReadEmbedderData<A>(this, index));
A* result = HandleScope::CreateHandle(isolate, value);
return Local<Value>(reinterpret_cast<Value*>(result));
#else
return SlowGetEmbedderData(index);
......@@ -10971,9 +10987,15 @@ Local<Value> Context::GetEmbedderData(int index) {
void* Context::GetAlignedPointerFromEmbedderData(int index) {
#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
#ifndef V8_ENABLE_CHECKS
typedef internal::Address A;
typedef internal::Internals I;
return I::ReadEmbedderData<void*>(this, index);
A ctx = *reinterpret_cast<const A*>(this);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
return I::ReadRawField<void*>(embedder_data, value_offset);
#else
return SlowGetAlignedPointerFromEmbedderData(index);
#endif
......
......@@ -200,28 +200,47 @@ constexpr size_t kReservedCodeRangePages = 0;
STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
#ifdef V8_COMPRESS_POINTERS
static_assert(
kSystemPointerSize == kInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
constexpr int kTaggedSize = kInt32Size;
constexpr int kTaggedSizeLog2 = 2;
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
using Tagged_t = int32_t;
using AtomicTagged_t = base::Atomic32;
#else
constexpr int kTaggedSize = kSystemPointerSize;
constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord;
#endif // V8_COMPRESS_POINTERS
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize);
STATIC_ASSERT(kTaggedSize == kApiTaggedSize);
// TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
#ifndef V8_COMPRESS_POINTERS
constexpr int kPointerSize = kSystemPointerSize;
constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
constexpr int kEmbedderDataSlotSize =
#ifdef V8_COMPRESS_POINTERS
kTaggedSize +
#endif
kTaggedSize;
constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
constexpr int kEmbedderDataSlotSizeInTaggedSlots =
kEmbedderDataSlotSize / kTaggedSize;
......@@ -870,24 +889,24 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
::i::kHeapObjectTag))
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
#define OBJECT_POINTER_ALIGN(value) \
(((value) + ::i::kObjectAlignmentMask) & ~::i::kObjectAlignmentMask)
// OBJECT_POINTER_PADDING returns the padding size required to align value
// as a HeapObject pointer
#define OBJECT_POINTER_PADDING(value) (OBJECT_POINTER_ALIGN(value) - (value))
// POINTER_SIZE_ALIGN returns the value aligned as a system pointer.
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
#define POINTER_SIZE_ALIGN(value) \
(((value) + ::i::kPointerAlignmentMask) & ~::i::kPointerAlignmentMask)
// POINTER_SIZE_PADDING returns the padding size required to align value
// as a system pointer.
#define POINTER_SIZE_PADDING(value) (POINTER_SIZE_ALIGN(value) - (value))
// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value) \
(((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
#define CODE_POINTER_ALIGN(value) \
(((value) + ::i::kCodeAlignmentMask) & ~::i::kCodeAlignmentMask)
// CODE_POINTER_PADDING returns the padding size required to align value
// as a generated code segment.
......@@ -895,8 +914,7 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
// DOUBLE_POINTER_ALIGN returns the value algined for double pointers.
#define DOUBLE_POINTER_ALIGN(value) \
(((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask)
(((value) + ::i::kDoubleAlignmentMask) & ~::i::kDoubleAlignmentMask)
// Defines hints about receiver values based on structural knowledge.
enum class ConvertReceiverMode : unsigned {
......
......@@ -138,6 +138,32 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
CopyImpl<kMinComplexMemCopy>(dst, src, num_bytes);
}
inline void MemsetInt32(int32_t* dest, int32_t value, size_t counter) {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
#define STOS "stosl"
#endif
#if defined(MEMORY_SANITIZER)
// MemorySanitizer does not understand inline assembly.
#undef STOS
#endif
#if defined(__GNUC__) && defined(STOS)
asm volatile(
"cld;"
"rep ; " STOS
: "+&c"(counter), "+&D"(dest)
: "a"(value)
: "memory", "cc");
#else
for (size_t i = 0; i < counter; i++) {
dest[i] = value;
}
#endif
#undef STOS
}
inline void MemsetPointer(Address* dest, Address value, size_t counter) {
#if V8_HOST_ARCH_IA32
#define STOS "stosl"
......
......@@ -614,33 +614,18 @@ HeapObject MapWord::ToForwardingAddress() {
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
VerifyPointer(isolate, READ_FIELD(*this, offset));
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
// Ensure upper 32-bits are zeros.
Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
#endif
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
MaybeObject::VerifyMaybeObjectPointer(isolate,
READ_WEAK_FIELD(*this, offset));
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
// Ensure upper 32-bits are zeros.
Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
#endif
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
void HeapObject::VerifySmiField(int offset) {
CHECK(READ_FIELD(*this, offset)->IsSmi());
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
// Ensure upper 32-bits are zeros.
Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
#endif
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
#endif
......
......@@ -402,11 +402,11 @@ class Code : public HeapObject {
// This documents the amount of free space we have in each Code object header
// due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64
static constexpr int kHeaderPaddingSize = 0;
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 0;
#elif V8_TARGET_ARCH_X64
static constexpr int kHeaderPaddingSize = 0;
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
#elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 20;
#elif V8_TARGET_ARCH_IA32
......
......@@ -33,6 +33,7 @@ Object EmbedderDataSlot::load_tagged() const {
void EmbedderDataSlot::store_smi(Smi value) {
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(value);
#ifdef V8_COMPRESS_POINTERS
// See gc_safe_store() for the reasons behind two stores.
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Smi::kZero);
#endif
}
......@@ -43,8 +44,9 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
int slot_offset = EmbedderDataArray::OffsetOfElementAt(entry_index);
ObjectSlot(FIELD_ADDR(array, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value);
WRITE_BARRIER(array, slot_offset, value);
WRITE_BARRIER(array, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS
// See gc_safe_store() for the reasons behind two stores.
ObjectSlot(FIELD_ADDR(array, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::kZero);
#endif
......@@ -56,68 +58,64 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index);
ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset, value);
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS
// See gc_safe_store() for the reasons behind two stores.
ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::kZero);
#endif
}
bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
Object tagged_value =
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
if (!tagged_value->IsSmi()) return false;
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(SmiValuesAre31Bits());
Address value_lo = static_cast<uint32_t>(tagged_value->ptr());
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Address value_hi =
FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr();
Address value = value_lo | (value_hi << 32);
*out_pointer = reinterpret_cast<void*>(value);
#else
*out_pointer = reinterpret_cast<void*>(tagged_value->ptr());
#endif
return true;
// We don't care about atomicity of access here because embedder slots
// are accessed this way only from the main thread via API during "mutator"
// phase which is propely synched with GC (concurrent marker may still look
// at the tagged part of the embedder slot but read-only access is ok).
Address raw_value = *location();
*out_pointer = reinterpret_cast<void*>(raw_value);
return HAS_SMI_TAG(raw_value);
}
bool EmbedderDataSlot::store_aligned_pointer(void* ptr) {
Address value = reinterpret_cast<Address>(ptr);
if (!HAS_SMI_TAG(value)) return false;
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(SmiValuesAre31Bits());
// Sign-extend lower 32-bits in order to form a proper Smi value.
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Address lo = static_cast<intptr_t>(static_cast<int32_t>(value));
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo));
Address hi = value >> 32;
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi));
#else
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value));
#endif
gc_safe_store(value);
return true;
}
EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
const DisallowHeapAllocation& no_gc) const {
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
return RawData{
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load()->ptr(),
#ifdef V8_COMPRESS_POINTERS
FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr()
#endif
};
// We don't care about atomicity of access here because embedder slots
// are accessed this way only by serializer from the main thread when
// GC is not active (concurrent marker may still look at the tagged part
// of the embedder slot but read-only access is ok).
return *location();
}
void EmbedderDataSlot::store_raw(const EmbedderDataSlot::RawData& data,
void EmbedderDataSlot::store_raw(EmbedderDataSlot::RawData data,
const DisallowHeapAllocation& no_gc) {
ObjectSlot(address() + kTaggedPayloadOffset)
.Relaxed_Store(Object(data.data_[0]));
gc_safe_store(data);
}
void EmbedderDataSlot::gc_safe_store(Address value) {
#ifdef V8_COMPRESS_POINTERS
ObjectSlot(address() + kRawPayloadOffset)
.Relaxed_Store(Object(data.data_[1]));
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(SmiValuesAre31Bits());
STATIC_ASSERT(kTaggedSize == kInt32Size);
// We have to do two 32-bit stores here because
// 1) tagged part modifications must be atomic to be properly synchronized
// with the concurrent marker.
// 2) atomicity of full pointer store is not guaranteed for embedder slots
// since the address of the slot may not be kSystemPointerSize aligned
// (only kTaggedSize alignment is guaranteed).
// TODO(ishell, v8:8875): revisit this once the allocation alignment
// inconsistency is fixed.
Address lo = static_cast<intptr_t>(static_cast<int32_t>(value));
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo));
Address hi = value >> 32;
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi));
#else
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value));
#endif
}
......
......@@ -30,41 +30,42 @@ class Object;
// Storing heap object through this slot may require triggering write barriers
// so this operation must be done via static store_tagged() methods.
class EmbedderDataSlot
: public SlotBase<EmbedderDataSlot, Address, kEmbedderDataSlotSize> {
: public SlotBase<EmbedderDataSlot, Address, kTaggedSize> {
public:
EmbedderDataSlot() : SlotBase(kNullAddress) {}
V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);
V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
// TODO(ishell): these offsets are currently little-endian specific.
// The less significant part contains tagged value and the other part
// contains the raw value.
#ifdef V8_TARGET_LITTLE_ENDIAN
static constexpr int kTaggedPayloadOffset = 0;
#else
static constexpr int kTaggedPayloadOffset = kTaggedSize;
#endif
#ifdef V8_COMPRESS_POINTERS
static constexpr int kRawPayloadOffset = kTaggedSize;
// The raw payload is located in the other tagged part of the full pointer.
static constexpr int kRawPayloadOffset = kTaggedSize - kTaggedPayloadOffset;
#endif
static constexpr int kRequiredPtrAlignment = kSmiTagSize;
// Opaque type used for storing raw embedder data.
struct RawData {
const Address data_[kEmbedderDataSlotSizeInTaggedSlots];
};
typedef Address RawData;
V8_INLINE Object load_tagged() const;
V8_INLINE void store_smi(Smi value);
// Setting an arbitrary tagged value requires triggering a write barrier
// which requires separate object and offset values, therefore these static
// functions a
// functions also has the target object parameter.
static V8_INLINE void store_tagged(EmbedderDataArray array, int entry_index,
Object value);
static V8_INLINE void store_tagged(JSObject object, int embedder_field_index,
Object value);
// Tries reinterpret the value as an aligned pointer and on success sets
// *out_result to the pointer-like value and returns true. Note, that some
// Smis could still look like an aligned pointers.
// Returns false otherwise.
// Tries reinterpret the value as an aligned pointer and sets *out_result to
// the pointer-like value. Note, that some Smis could still look like an
// aligned pointers.
// Returns true on success.
V8_INLINE bool ToAlignedPointer(void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer
......@@ -72,8 +73,12 @@ class EmbedderDataSlot
V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(void* ptr);
V8_INLINE RawData load_raw(const DisallowHeapAllocation& no_gc) const;
V8_INLINE void store_raw(const RawData& data,
const DisallowHeapAllocation& no_gc);
V8_INLINE void store_raw(RawData data, const DisallowHeapAllocation& no_gc);
private:
// Stores given value to the embedder data slot in a concurrent-marker
// friendly manner (tagged part of the slot is written atomically).
V8_INLINE void gc_safe_store(Address value);
};
} // namespace internal
......
......@@ -25,7 +25,7 @@ namespace internal {
// Note how the comparator operates on Address values, representing the raw
// data found at the given heap location, so you probably want to construct
// an Object from it.
class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t> {
public:
// This class is a stand-in for "Address&" that uses custom atomic
// read/write operations for the actual memory accesses.
......
......@@ -118,13 +118,14 @@ inline void CopyTagged(Address dst, const Address src, size_t num_tagged) {
// Sets |counter| number of kTaggedSize-sized values starting at |start| slot.
inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
// TODO(ishell): revisit this implementation, maybe use "rep stosl"
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Address raw_value = value.ptr();
#ifdef V8_COMPRESS_POINTERS
raw_value = CompressTagged(raw_value);
#endif
Tagged_t raw_value = CompressTagged(value.ptr());
STATIC_ASSERT(kTaggedSize == kInt32Size);
MemsetInt32(start.location(), raw_value, counter);
#else
Address raw_value = value.ptr();
MemsetPointer(start.location(), raw_value, counter);
#endif
}
// Sets |counter| number of kSystemPointerSize-sized values starting at |start|
......
......@@ -12,14 +12,14 @@ namespace internal {
class Object;
template <typename Subclass, typename Data, size_t SlotDataSize>
template <typename Subclass, typename Data,
size_t SlotDataAlignment = sizeof(Data)>
class SlotBase {
public:
using TData = Data;
// TODO(ishell): This should eventually become just sizeof(TData) once
// pointer compression is implemented.
static constexpr size_t kSlotDataSize = SlotDataSize;
static constexpr size_t kSlotDataSize = sizeof(Data);
static constexpr size_t kSlotDataAlignment = SlotDataAlignment;
Subclass& operator++() { // Prefix increment.
ptr_ += kSlotDataSize;
......@@ -72,9 +72,8 @@ class SlotBase {
TData* location() const { return reinterpret_cast<TData*>(ptr_); }
protected:
STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize));
explicit SlotBase(Address ptr) : ptr_(ptr) {
DCHECK(IsAligned(ptr, kTaggedSize));
DCHECK(IsAligned(ptr, kSlotDataAlignment));
}
private:
......@@ -88,8 +87,7 @@ class SlotBase {
// ("slot") holding a tagged pointer (smi or strong heap object).
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class FullObjectSlot
: public SlotBase<FullObjectSlot, Address, kSystemPointerSize> {
class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
public:
using TObject = Object;
using THeapObjectSlot = FullHeapObjectSlot;
......@@ -103,7 +101,7 @@ class FullObjectSlot
: SlotBase(reinterpret_cast<Address>(ptr)) {}
inline explicit FullObjectSlot(Object* object);
template <typename T>
explicit FullObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
explicit FullObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
// Compares memory representation of a value stored in the slot with given
......@@ -140,7 +138,7 @@ class FullMaybeObjectSlot
explicit FullMaybeObjectSlot(MaybeObject* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit FullMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
explicit FullMaybeObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
inline const MaybeObject operator*() const;
......@@ -158,15 +156,14 @@ class FullMaybeObjectSlot
// The slot's contents can be read and written using operator* and store().
// In case it is known that that slot contains a strong heap object pointer,
// ToHeapObject() can be used to retrieve that heap object.
class FullHeapObjectSlot
: public SlotBase<FullHeapObjectSlot, Address, kSystemPointerSize> {
class FullHeapObjectSlot : public SlotBase<FullHeapObjectSlot, Address> {
public:
FullHeapObjectSlot() : SlotBase(kNullAddress) {}
explicit FullHeapObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit FullHeapObjectSlot(Object* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit FullHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
explicit FullHeapObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
inline const HeapObjectReference operator*() const;
......
......@@ -29,8 +29,6 @@ V8_INLINE Address GetRootFromOnHeapAddress(Address addr) {
// preserving both weak- and smi- tags.
V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
Tagged_t raw_value) {
static_assert(kTaggedSize == kSystemPointerSize, "has to be updated");
static_assert(!std::is_same<int32_t, Tagged_t>::value, "remove cast below");
int32_t value = static_cast<int32_t>(raw_value);
Address root = GetRootFromOnHeapAddress(on_heap_addr);
// Current compression scheme requires value to be sign-extended to inptr_t
......@@ -41,8 +39,6 @@ V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
// Decompresses any tagged value, preserving both weak- and smi- tags.
V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
Tagged_t raw_value) {
static_assert(kTaggedSize == kSystemPointerSize, "has to be updated");
static_assert(!std::is_same<int32_t, Tagged_t>::value, "remove cast below");
int32_t value = static_cast<int32_t>(raw_value);
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = -static_cast<Address>(value & kSmiTagMask);
......
......@@ -13,6 +13,7 @@
namespace v8 {
namespace internal {
// See v8:7703 for details about how pointer compression works.
constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB;
constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2;
constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
......@@ -21,8 +22,7 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
// holding a compressed tagged pointer (smi or heap object).
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class CompressedObjectSlot
: public SlotBase<CompressedObjectSlot, Tagged_t, kTaggedSize> {
class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
public:
using TObject = Object;
using THeapObjectSlot = CompressedHeapObjectSlot;
......@@ -37,7 +37,7 @@ class CompressedObjectSlot
explicit CompressedObjectSlot(Object const* const* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
inline Object operator*() const;
......@@ -57,8 +57,7 @@ class CompressedObjectSlot
// forwarding pointer is different.
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class CompressedMapWordSlot
: public SlotBase<CompressedMapWordSlot, Tagged_t, kTaggedSize> {
class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
public:
using TObject = Object;
......@@ -88,7 +87,7 @@ class CompressedMapWordSlot
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class CompressedMaybeObjectSlot
: public SlotBase<CompressedMaybeObjectSlot, Tagged_t, kTaggedSize> {
: public SlotBase<CompressedMaybeObjectSlot, Tagged_t> {
public:
using TObject = MaybeObject;
using THeapObjectSlot = CompressedHeapObjectSlot;
......@@ -102,7 +101,8 @@ class CompressedMaybeObjectSlot
explicit CompressedMaybeObjectSlot(MaybeObject* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit CompressedMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
explicit CompressedMaybeObjectSlot(
SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
......@@ -121,14 +121,14 @@ class CompressedMaybeObjectSlot
// In case it is known that that slot contains a strong heap object pointer,
// ToHeapObject() can be used to retrieve that heap object.
class CompressedHeapObjectSlot
: public SlotBase<CompressedHeapObjectSlot, Tagged_t, kTaggedSize> {
: public SlotBase<CompressedHeapObjectSlot, Tagged_t> {
public:
CompressedHeapObjectSlot() : SlotBase(kNullAddress) {}
explicit CompressedHeapObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit CompressedHeapObjectSlot(Object* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit CompressedHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
explicit CompressedHeapObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
......
......@@ -182,7 +182,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// with embedder callbacks.
for (int i = 0; i < embedder_fields_count; i++) {
if (!DataIsEmpty(serialized_data[i])) {
EmbedderDataSlot(js_obj, i).store_raw({kNullAddress}, no_gc);
EmbedderDataSlot(js_obj, i).store_raw(kNullAddress, no_gc);
}
}
......
......@@ -426,50 +426,45 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \
template <class P1> \
void instruction##_tagged(P1 p1) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
} \
\
template <class P1> \
void instruction##l(P1 p1) { \
emit_##instruction(p1, kInt32Size); \
} \
\
template <class P1> \
void instruction##q(P1 p1) { \
emit_##instruction(p1, kInt64Size); \
} \
\
template <class P1, class P2> \
void instruction##_tagged(P1 p1, P2 p2) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
emit_##instruction(p1, p2, \
COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
} \
\
template <class P1, class P2> \
void instruction##l(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt32Size); \
} \
\
template <class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt64Size); \
#define DECLARE_INSTRUCTION(instruction) \
template <class P1> \
void instruction##_tagged(P1 p1) { \
emit_##instruction(p1, kTaggedSize); \
} \
\
template <class P1> \
void instruction##l(P1 p1) { \
emit_##instruction(p1, kInt32Size); \
} \
\
template <class P1> \
void instruction##q(P1 p1) { \
emit_##instruction(p1, kInt64Size); \
} \
\
template <class P1, class P2> \
void instruction##_tagged(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kTaggedSize); \
} \
\
template <class P1, class P2> \
void instruction##l(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt32Size); \
} \
\
template <class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
......
......@@ -269,7 +269,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
#ifdef V8_COMPRESS_POINTERS
RecordComment("[ StoreTagged");
movl(dst_field_operand, value);
movl(Operand(dst_field_operand, 4), Immediate(0));
RecordComment("]");
#else
movq(dst_field_operand, value);
......@@ -281,7 +280,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
#ifdef V8_COMPRESS_POINTERS
RecordComment("[ StoreTagged");
movl(dst_field_operand, value);
movl(Operand(dst_field_operand, 4), Immediate(0));
RecordComment("]");
#else
movq(dst_field_operand, value);
......@@ -1124,7 +1122,11 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
movsxlq(dst, dst);
} else {
DCHECK(SmiValuesAre31Bits());
#ifdef V8_COMPRESS_POINTERS
movsxlq(dst, src);
#else
movq(dst, src);
#endif
sarq(dst, Immediate(kSmiShift));
}
}
......@@ -1132,7 +1134,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
cmpq(smi1, smi2);
cmp_tagged(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi src) {
......
......@@ -131,9 +131,18 @@ class BufferedRawMachineAssemblerTester
// Store node is provided as a parameter. By storing the return value in
// memory it is possible to return 64 bit values.
void Return(Node* input) {
Store(MachineTypeForC<ReturnType>().representation(),
RawMachineAssembler::Parameter(return_parameter_index_), input,
kNoWriteBarrier);
if (COMPRESS_POINTERS_BOOL && MachineTypeForC<ReturnType>().IsTagged()) {
// Since we are returning values via storing to off-heap location
// generate full-word store here.
Store(MachineType::PointerRepresentation(),
RawMachineAssembler::Parameter(return_parameter_index_),
BitcastTaggedToWord(input), kNoWriteBarrier);
} else {
Store(MachineTypeForC<ReturnType>().representation(),
RawMachineAssembler::Parameter(return_parameter_index_), input,
kNoWriteBarrier);
}
RawMachineAssembler::Return(Int32Constant(1234));
}
......
......@@ -199,13 +199,10 @@ void CheckEq(CType in_value, CType out_value) {
// Specializations for checking the result of compressing store.
template <>
void CheckEq<Object>(Object in_value, Object out_value) {
Isolate* isolate = CcTest::InitIsolateOnce();
// |out_value| is compressed. Check that it's valid.
CHECK_EQ(CompressTagged(in_value->ptr()), out_value->ptr());
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
CHECK_EQ(in_value->ptr(),
DecompressTaggedAny(isolate->isolate_root(),
static_cast<int32_t>(out_value->ptr())));
// Compare only lower 32-bits of the value because tagged load/stores are
// 32-bit operations anyway.
CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
static_cast<Tagged_t>(out_value.ptr()));
}
template <>
......@@ -269,7 +266,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
base_pointer = LSB(base_pointer, kSystemPointerSize / 2);
base_pointer = LSB(base_pointer, kTaggedSize);
}
#endif
Node* base = m.PointerConstant(base_pointer);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment