Commit 3649dc18 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Use [Maybe]ObjectSlots as bottlenecks for accessing tagged fields

This CL introduces Tagged_t and AtomicTagged_t typedefs which represent
the storage type of tagged values in V8 heap.

Bug: v8:7703
Change-Id: Ib57e85ea073eaf896b6406cf0f62adcef9a114ce
Reviewed-on: https://chromium-review.googlesource.com/c/1352294Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57878}
parent 43a532fc
...@@ -457,9 +457,11 @@ static void SortIndices( ...@@ -457,9 +457,11 @@ static void SortIndices(
// store operations that are safe for concurrent marking. // store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress()); AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size, std::sort(start, start + sort_size,
[isolate](Address elementA, Address elementB) { [isolate](Tagged_t elementA, Tagged_t elementB) {
const Object* a = reinterpret_cast<Object*>(elementA); // TODO(ishell): revisit the code below
const Object* b = reinterpret_cast<Object*>(elementB); STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
ObjectPtr a(elementA);
ObjectPtr b(elementB);
if (a->IsSmi() || !a->IsUndefined(isolate)) { if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) { if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true; return true;
......
...@@ -166,7 +166,7 @@ void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) { ...@@ -166,7 +166,7 @@ void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0); DCHECK_GE(index, 0);
DCHECK_LT(index, this->length()); DCHECK_LT(index, this->length());
int offset = kFeedbackSlotsOffset + index * kPointerSize; int offset = kFeedbackSlotsOffset + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value); RELAXED_WRITE_WEAK_FIELD(this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <ostream> #include <ostream>
#include "include/v8-internal.h" #include "include/v8-internal.h"
#include "src/base/atomic-utils.h"
#include "src/base/build_config.h" #include "src/base/build_config.h"
#include "src/base/flags.h" #include "src/base/flags.h"
#include "src/base/logging.h" #include "src/base/logging.h"
...@@ -207,6 +208,14 @@ constexpr int kTaggedSize = kSystemPointerSize; ...@@ -207,6 +208,14 @@ constexpr int kTaggedSize = kSystemPointerSize;
constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2; constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord;
using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize);
// TODO(ishell): use kTaggedSize or kSystemPointerSize instead. // TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
constexpr int kPointerSize = kSystemPointerSize; constexpr int kPointerSize = kSystemPointerSize;
constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2; constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include "src/objects.h" #include "src/objects.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/tsan.h" #include "src/base/tsan.h"
#include "src/builtins/builtins.h" #include "src/builtins/builtins.h"
...@@ -889,11 +888,9 @@ ObjectSlot HeapObject::map_slot() { ...@@ -889,11 +888,9 @@ ObjectSlot HeapObject::map_slot() {
} }
MapWord HeapObject::map_word() const { MapWord HeapObject::map_word() const {
return MapWord( return MapWord(RELAXED_READ_FIELD(this, kMapOffset).ptr());
reinterpret_cast<uintptr_t>(RELAXED_READ_FIELD(this, kMapOffset)));
} }
void HeapObject::set_map_word(MapWord map_word) { void HeapObject::set_map_word(MapWord map_word) {
RELAXED_WRITE_FIELD(this, kMapOffset, RELAXED_WRITE_FIELD(this, kMapOffset,
reinterpret_cast<Object*>(map_word.value_)); reinterpret_cast<Object*>(map_word.value_));
...@@ -1408,7 +1405,7 @@ MaybeObject DescriptorArray::get(int index) const { ...@@ -1408,7 +1405,7 @@ MaybeObject DescriptorArray::get(int index) const {
void DescriptorArray::set(int index, MaybeObject value) { void DescriptorArray::set(int index, MaybeObject value) {
DCHECK(index >= 0 && index < this->length()); DCHECK(index >= 0 && index < this->length());
RELAXED_WRITE_FIELD(this, offset(index), value); RELAXED_WRITE_WEAK_FIELD(this, offset(index), value);
WEAK_WRITE_BARRIER(this, offset(index), value); WEAK_WRITE_BARRIER(this, offset(index), value);
} }
......
...@@ -18041,7 +18041,9 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() { ...@@ -18041,7 +18041,9 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
template <typename Dictionary> template <typename Dictionary>
struct EnumIndexComparator { struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary dict) : dict(dict) {} explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
bool operator()(Address a, Address b) { bool operator()(Tagged_t a, Tagged_t b) {
// TODO(ishell): revisit the code below
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
PropertyDetails da(dict->DetailsAt(Smi(a).value())); PropertyDetails da(dict->DetailsAt(Smi(a).value()));
PropertyDetails db(dict->DetailsAt(Smi(b).value())); PropertyDetails db(dict->DetailsAt(Smi(b).value()));
return da.dictionary_index() < db.dictionary_index(); return da.dictionary_index() < db.dictionary_index();
......
...@@ -30,7 +30,7 @@ class Object; ...@@ -30,7 +30,7 @@ class Object;
// Storing heap object through this slot may require triggering write barriers // Storing heap object through this slot may require triggering write barriers
// so this operation must be done via static store_tagged() methods. // so this operation must be done via static store_tagged() methods.
class EmbedderDataSlot class EmbedderDataSlot
: public SlotBase<EmbedderDataSlot, kEmbedderDataSlotSize> { : public SlotBase<EmbedderDataSlot, Address, kEmbedderDataSlotSize> {
public: public:
EmbedderDataSlot() : SlotBase(kNullAddress) {} EmbedderDataSlot() : SlotBase(kNullAddress) {}
V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index); V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);
......
...@@ -277,7 +277,7 @@ void WeakFixedArray::Set(int index, MaybeObject value) { ...@@ -277,7 +277,7 @@ void WeakFixedArray::Set(int index, MaybeObject value) {
DCHECK_GE(index, 0); DCHECK_GE(index, 0);
DCHECK_LT(index, length()); DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index); int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value); RELAXED_WRITE_WEAK_FIELD(this, offset, value);
WEAK_WRITE_BARRIER(this, offset, value); WEAK_WRITE_BARRIER(this, offset, value);
} }
...@@ -285,7 +285,7 @@ void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) { ...@@ -285,7 +285,7 @@ void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0); DCHECK_GE(index, 0);
DCHECK_LT(index, length()); DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index); int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value); RELAXED_WRITE_WEAK_FIELD(this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
} }
...@@ -306,7 +306,7 @@ void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) { ...@@ -306,7 +306,7 @@ void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0); DCHECK_GE(index, 0);
DCHECK_LT(index, this->capacity()); DCHECK_LT(index, this->capacity());
int offset = OffsetOfElementAt(index); int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value); RELAXED_WRITE_WEAK_FIELD(this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
} }
......
...@@ -118,8 +118,7 @@ ObjectSlot HeapObjectPtr::map_slot() { ...@@ -118,8 +118,7 @@ ObjectSlot HeapObjectPtr::map_slot() {
} }
MapWord HeapObjectPtr::map_word() const { MapWord HeapObjectPtr::map_word() const {
return MapWord( return MapWord(RELAXED_READ_FIELD(this, kMapOffset).ptr());
reinterpret_cast<Address>(RELAXED_READ_FIELD(this, kMapOffset)));
} }
void HeapObjectPtr::set_map_word(MapWord map_word) { void HeapObjectPtr::set_map_word(MapWord map_word) {
......
...@@ -296,49 +296,39 @@ ...@@ -296,49 +296,39 @@
#define FIELD_ADDR(p, offset) ((p)->ptr() + offset - kHeapObjectTag) #define FIELD_ADDR(p, offset) ((p)->ptr() + offset - kHeapObjectTag)
#define READ_FIELD(p, offset) \ #define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
(*reinterpret_cast<Object* const*>(FIELD_ADDR(p, offset)))
#define READ_WEAK_FIELD(p, offset) \ #define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
MaybeObject(*reinterpret_cast<Address*>(FIELD_ADDR(p, offset)))
#define ACQUIRE_READ_FIELD(p, offset) \ #define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Acquire_Load( \ ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load1()
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define RELAXED_READ_FIELD(p, offset) \ #define RELAXED_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Relaxed_Load( \ ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define RELAXED_READ_WEAK_FIELD(p, offset) \ #define RELAXED_READ_WEAK_FIELD(p, offset) \
MaybeObject(base::Relaxed_Load( \ MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
#ifdef V8_CONCURRENT_MARKING #ifdef V8_CONCURRENT_MARKING
#define WRITE_FIELD(p, offset, value) \ #define WRITE_FIELD(p, offset, value) \
base::Relaxed_Store( \ ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store1(value)
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ #define WRITE_WEAK_FIELD(p, offset, value) \
static_cast<base::AtomicWord>((value)->ptr())); MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
#define WRITE_WEAK_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>(value.ptr()));
#else #else
#define WRITE_FIELD(p, offset, value) \ #define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) ObjectSlot(FIELD_ADDR(p, offset)).store(value)
#define WRITE_WEAK_FIELD(p, offset, value) \ #define WRITE_WEAK_FIELD(p, offset, value) \
(*reinterpret_cast<Address*>(FIELD_ADDR(p, offset)) = value.ptr()) MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
#endif #endif
#define RELEASE_WRITE_FIELD(p, offset, value) \ #define RELEASE_WRITE_FIELD(p, offset, value) \
base::Release_Store( \ ObjectSlot(FIELD_ADDR(p, offset)).Release_Store1(value)
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>((value)->ptr()));
#define RELAXED_WRITE_FIELD(p, offset, value) \ #define RELAXED_WRITE_FIELD(p, offset, value) \
base::Relaxed_Store( \ ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store1(value)
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>((value)->ptr())); #define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
#define WRITE_BARRIER(object, offset, value) \ #define WRITE_BARRIER(object, offset, value) \
do { \ do { \
......
...@@ -18,41 +18,40 @@ namespace internal { ...@@ -18,41 +18,40 @@ namespace internal {
// FixedArray array; // FixedArray array;
// AtomicSlot start(array->GetFirstElementAddress()); // AtomicSlot start(array->GetFirstElementAddress());
// std::sort(start, start + given_length, // std::sort(start, start + given_length,
// [](Address a, Address b) { // [](Tagged_t a, Tagged_t b) {
// // Decompress a and b if necessary.
// return my_comparison(a, b); // return my_comparison(a, b);
// }); // });
// Note how the comparator operates on Address values, representing the raw // Note how the comparator operates on Address values, representing the raw
// data found at the given heap location, so you probably want to construct // data found at the given heap location, so you probably want to construct
// an Object from it. // an Object from it.
class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> { class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
public: public:
// This class is a stand-in for "Address&" that uses custom atomic // This class is a stand-in for "Address&" that uses custom atomic
// read/write operations for the actual memory accesses. // read/write operations for the actual memory accesses.
class Reference { class Reference {
public: public:
explicit Reference(Address* address) : address_(address) {} explicit Reference(Tagged_t* address) : address_(address) {}
Reference(const Reference& other) : address_(other.address_) {} Reference(const Reference& other) : address_(other.address_) {}
Reference& operator=(const Reference& other) { Reference& operator=(const Reference& other) {
base::AsAtomicWord::Relaxed_Store( AsAtomicTagged::Relaxed_Store(
address_, base::AsAtomicWord::Relaxed_Load(other.address_)); address_, AsAtomicTagged::Relaxed_Load(other.address_));
return *this; return *this;
} }
Reference& operator=(Address value) { Reference& operator=(Tagged_t value) {
base::AsAtomicWord::Relaxed_Store(address_, value); AsAtomicTagged::Relaxed_Store(address_, value);
return *this; return *this;
} }
// Values of type AtomicSlot::reference must be implicitly convertible // Values of type AtomicSlot::reference must be implicitly convertible
// to AtomicSlot::value_type. // to AtomicSlot::value_type.
operator Address() const { operator Tagged_t() const { return AsAtomicTagged::Relaxed_Load(address_); }
return base::AsAtomicWord::Relaxed_Load(address_);
}
void swap(Reference& other) { void swap(Reference& other) {
Address tmp = value(); Address tmp = value();
base::AsAtomicWord::Relaxed_Store(address_, other.value()); AsAtomicTagged::Relaxed_Store(address_, other.value());
base::AsAtomicWord::Relaxed_Store(other.address_, tmp); AsAtomicTagged::Relaxed_Store(other.address_, tmp);
} }
bool operator<(const Reference& other) const { bool operator<(const Reference& other) const {
...@@ -64,15 +63,15 @@ class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> { ...@@ -64,15 +63,15 @@ class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> {
} }
private: private:
Address value() const { return base::AsAtomicWord::Relaxed_Load(address_); } Address value() const { return AsAtomicTagged::Relaxed_Load(address_); }
Address* address_; Tagged_t* address_;
}; };
// The rest of this class follows C++'s "RandomAccessIterator" requirements. // The rest of this class follows C++'s "RandomAccessIterator" requirements.
// Most of the heavy lifting is inherited from SlotBase. // Most of the heavy lifting is inherited from SlotBase.
typedef int difference_type; typedef int difference_type;
typedef Address value_type; typedef Tagged_t value_type;
typedef Reference reference; typedef Reference reference;
typedef void* pointer; // Must be present, but should not be used. typedef void* pointer; // Must be present, but should not be used.
typedef std::random_access_iterator_tag iterator_category; typedef std::random_access_iterator_tag iterator_category;
...@@ -82,16 +81,16 @@ class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> { ...@@ -82,16 +81,16 @@ class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> {
explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {} explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {}
Reference operator*() const { Reference operator*() const {
return Reference(reinterpret_cast<Address*>(address())); return Reference(reinterpret_cast<Tagged_t*>(address()));
} }
Reference operator[](difference_type i) const { Reference operator[](difference_type i) const {
return Reference(reinterpret_cast<Address*>(address() + i * kPointerSize)); return Reference(reinterpret_cast<Tagged_t*>(address() + i * kTaggedSize));
} }
friend void swap(Reference lhs, Reference rhs) { lhs.swap(rhs); } friend void swap(Reference lhs, Reference rhs) { lhs.swap(rhs); }
friend difference_type operator-(AtomicSlot a, AtomicSlot b) { friend difference_type operator-(AtomicSlot a, AtomicSlot b) {
return static_cast<int>(a.address() - b.address()) / kPointerSize; return static_cast<int>(a.address() - b.address()) / kTaggedSize;
} }
}; };
......
...@@ -18,49 +18,69 @@ namespace internal { ...@@ -18,49 +18,69 @@ namespace internal {
ObjectSlot::ObjectSlot(ObjectPtr* object) ObjectSlot::ObjectSlot(ObjectPtr* object)
: SlotBase(reinterpret_cast<Address>(&object->ptr_)) {} : SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
void ObjectSlot::store(Object* value) { *location() = value->ptr(); } void ObjectSlot::store(Object* value) const { *location() = value->ptr(); }
ObjectPtr ObjectSlot::Acquire_Load() const { ObjectPtr ObjectSlot::Acquire_Load() const {
return ObjectPtr(base::AsAtomicWord::Acquire_Load(location())); return ObjectPtr(AsAtomicTagged::Acquire_Load(location()));
}
Object* ObjectSlot::Acquire_Load1() const {
return reinterpret_cast<Object*>(AsAtomicTagged::Acquire_Load(location()));
} }
ObjectPtr ObjectSlot::Relaxed_Load() const { ObjectPtr ObjectSlot::Relaxed_Load() const {
return ObjectPtr(base::AsAtomicWord::Relaxed_Load(location())); return ObjectPtr(AsAtomicTagged::Relaxed_Load(location()));
} }
void ObjectSlot::Relaxed_Store(ObjectPtr value) const { void ObjectSlot::Relaxed_Store(ObjectPtr value) const {
base::AsAtomicWord::Relaxed_Store(location(), value->ptr()); AsAtomicTagged::Relaxed_Store(location(), value->ptr());
}
void ObjectSlot::Relaxed_Store1(Object* value) const {
AsAtomicTagged::Relaxed_Store(location(), value->ptr());
}
void ObjectSlot::Release_Store1(Object* value) const {
AsAtomicTagged::Release_Store(location(), value->ptr());
} }
void ObjectSlot::Release_Store(ObjectPtr value) const { void ObjectSlot::Release_Store(ObjectPtr value) const {
base::AsAtomicWord::Release_Store(location(), value->ptr()); AsAtomicTagged::Release_Store(location(), value->ptr());
} }
ObjectPtr ObjectSlot::Release_CompareAndSwap(ObjectPtr old, ObjectPtr ObjectSlot::Release_CompareAndSwap(ObjectPtr old,
ObjectPtr target) const { ObjectPtr target) const {
Address result = base::AsAtomicWord::Release_CompareAndSwap( Address result = AsAtomicTagged::Release_CompareAndSwap(
location(), old->ptr(), target->ptr()); location(), old->ptr(), target->ptr());
return ObjectPtr(result); return ObjectPtr(result);
} }
MaybeObject MaybeObjectSlot::operator*() { return MaybeObject(*location()); } MaybeObject MaybeObjectSlot::operator*() const {
return MaybeObject(*location());
}
void MaybeObjectSlot::store(MaybeObject value) { *location() = value.ptr(); } void MaybeObjectSlot::store(MaybeObject value) const {
*location() = value.ptr();
}
MaybeObject MaybeObjectSlot::Relaxed_Load() const { MaybeObject MaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(base::AsAtomicWord::Relaxed_Load(location())); return MaybeObject(AsAtomicTagged::Relaxed_Load(location()));
}
void MaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
AsAtomicTagged::Relaxed_Store(location(), value->ptr());
} }
void MaybeObjectSlot::Release_CompareAndSwap(MaybeObject old, void MaybeObjectSlot::Release_CompareAndSwap(MaybeObject old,
MaybeObject target) const { MaybeObject target) const {
base::AsAtomicWord::Release_CompareAndSwap(location(), old.ptr(), AsAtomicTagged::Release_CompareAndSwap(location(), old.ptr(), target.ptr());
target.ptr());
} }
HeapObjectReference HeapObjectSlot::operator*() { HeapObjectReference HeapObjectSlot::operator*() const {
return HeapObjectReference(*location()); return HeapObjectReference(*location());
} }
void HeapObjectSlot::store(HeapObjectReference value) {
void HeapObjectSlot::store(HeapObjectReference value) const {
*location() = value.ptr(); *location() = value.ptr();
} }
......
...@@ -12,9 +12,12 @@ namespace internal { ...@@ -12,9 +12,12 @@ namespace internal {
class ObjectPtr; class ObjectPtr;
template <typename Subclass, size_t SlotDataSize> template <typename Subclass, typename Data, size_t SlotDataSize>
class SlotBase { class SlotBase {
public: public:
using TData = Data;
// TODO(ishell): This should eventually become just sizeof(TData) once
// pointer compression is implemented.
static constexpr size_t kSlotDataSize = SlotDataSize; static constexpr size_t kSlotDataSize = SlotDataSize;
Subclass& operator++() { // Prefix increment. Subclass& operator++() { // Prefix increment.
...@@ -65,7 +68,7 @@ class SlotBase { ...@@ -65,7 +68,7 @@ class SlotBase {
Address address() const { return ptr_; } Address address() const { return ptr_; }
// For symmetry with Handle. // For symmetry with Handle.
Address* location() const { return reinterpret_cast<Address*>(ptr_); } TData* location() const { return reinterpret_cast<TData*>(ptr_); }
protected: protected:
STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize)); STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize));
...@@ -80,11 +83,11 @@ class SlotBase { ...@@ -80,11 +83,11 @@ class SlotBase {
Address ptr_; Address ptr_;
}; };
// An ObjectSlot instance describes a pointer-sized field ("slot") holding // An ObjectSlot instance describes a kTaggedSize-sized field ("slot") holding
// a tagged pointer (smi or heap object). // a tagged pointer (smi or heap object).
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> { class ObjectSlot : public SlotBase<ObjectSlot, Tagged_t, kTaggedSize> {
public: public:
ObjectSlot() : SlotBase(kNullAddress) {} ObjectSlot() : SlotBase(kNullAddress) {}
explicit ObjectSlot(Address ptr) : SlotBase(ptr) {} explicit ObjectSlot(Address ptr) : SlotBase(ptr) {}
...@@ -94,11 +97,11 @@ class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> { ...@@ -94,11 +97,11 @@ class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> {
explicit ObjectSlot(Object const* const* ptr) explicit ObjectSlot(Object const* const* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit ObjectSlot(SlotBase<T, kSlotDataSize> slot) explicit ObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
Object* operator*() const { return *reinterpret_cast<Object**>(address()); } Object* operator*() const { return *reinterpret_cast<Object**>(address()); }
inline void store(Object* value); inline void store(Object* value) const;
inline ObjectPtr Acquire_Load() const; inline ObjectPtr Acquire_Load() const;
inline ObjectPtr Relaxed_Load() const; inline ObjectPtr Relaxed_Load() const;
...@@ -106,51 +109,60 @@ class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> { ...@@ -106,51 +109,60 @@ class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> {
inline void Release_Store(ObjectPtr value) const; inline void Release_Store(ObjectPtr value) const;
inline ObjectPtr Release_CompareAndSwap(ObjectPtr old, inline ObjectPtr Release_CompareAndSwap(ObjectPtr old,
ObjectPtr target) const; ObjectPtr target) const;
// Old-style alternative for the above, temporarily separate to allow
// incremental transition.
// TODO(3770): Get rid of the duplication when the migration is complete.
inline Object* Acquire_Load1() const;
inline void Relaxed_Store1(Object* value) const;
inline void Release_Store1(Object* value) const;
}; };
// A MaybeObjectSlot instance describes a pointer-sized field ("slot") holding // A MaybeObjectSlot instance describes a kTaggedSize-sized field ("slot")
// a possibly-weak tagged pointer (think: MaybeObject). // holding a possibly-weak tagged pointer (think: MaybeObject).
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
class MaybeObjectSlot : public SlotBase<MaybeObjectSlot, kTaggedSize> { class MaybeObjectSlot
: public SlotBase<MaybeObjectSlot, Tagged_t, kTaggedSize> {
public: public:
explicit MaybeObjectSlot(Address ptr) : SlotBase(ptr) {} explicit MaybeObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit MaybeObjectSlot(Object** ptr) explicit MaybeObjectSlot(Object** ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {} : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T> template <typename T>
explicit MaybeObjectSlot(SlotBase<T, kSlotDataSize> slot) explicit MaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline MaybeObject operator*(); inline MaybeObject operator*() const;
inline void store(MaybeObject value); inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const; inline MaybeObject Relaxed_Load() const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const; inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
}; };
// A HeapObjectSlot instance describes a pointer-sized field ("slot") holding // A HeapObjectSlot instance describes a kTaggedSize-sized field ("slot")
// a weak or strong pointer to a heap object (think: HeapObjectReference). // holding a weak or strong pointer to a heap object (think:
// HeapObjectReference).
// Its address() is the address of the slot. // Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store(). // The slot's contents can be read and written using operator* and store().
// In case it is known that that slot contains a strong heap object pointer, // In case it is known that that slot contains a strong heap object pointer,
// ToHeapObject() can be used to retrieve that heap object. // ToHeapObject() can be used to retrieve that heap object.
class HeapObjectSlot : public SlotBase<HeapObjectSlot, kTaggedSize> { class HeapObjectSlot : public SlotBase<HeapObjectSlot, Tagged_t, kTaggedSize> {
public: public:
HeapObjectSlot() : SlotBase(kNullAddress) {} HeapObjectSlot() : SlotBase(kNullAddress) {}
explicit HeapObjectSlot(Address ptr) : SlotBase(ptr) {} explicit HeapObjectSlot(Address ptr) : SlotBase(ptr) {}
template <typename T> template <typename T>
explicit HeapObjectSlot(SlotBase<T, kSlotDataSize> slot) explicit HeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {} : SlotBase(slot.address()) {}
inline HeapObjectReference operator*(); inline HeapObjectReference operator*() const;
inline void store(HeapObjectReference value); inline void store(HeapObjectReference value) const;
HeapObject* ToHeapObject() { HeapObject* ToHeapObject() const {
DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag); DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag);
return reinterpret_cast<HeapObject*>(*location()); return reinterpret_cast<HeapObject*>(*location());
} }
void StoreHeapObject(HeapObject* value) { void StoreHeapObject(HeapObject* value) const {
*reinterpret_cast<HeapObject**>(address()) = value; *reinterpret_cast<HeapObject**>(address()) = value;
} }
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment