Commit 63a9919a authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][cleanup] Remove [Compressed]MapWordSlot

... since decompression customization for MapWord is already
handled by HeapObject::MapField.

Bug: v8:9353, v8:9183
Change-Id: I009cdbbf8fc7e72029e1b9be54a0f0b73050e738
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1660475
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62188}
parent f186c66f
......@@ -648,7 +648,6 @@ struct SlotTraits;
template <>
struct SlotTraits<SlotLocation::kOffHeap> {
using TObjectSlot = FullObjectSlot;
using TMapWordSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
};
......@@ -659,12 +658,10 @@ template <>
struct SlotTraits<SlotLocation::kOnHeap> {
#ifdef V8_COMPRESS_POINTERS
using TObjectSlot = CompressedObjectSlot;
using TMapWordSlot = CompressedMapWordSlot;
using TMaybeObjectSlot = CompressedMaybeObjectSlot;
using THeapObjectSlot = CompressedHeapObjectSlot;
#else
using TObjectSlot = FullObjectSlot;
using TMapWordSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
#endif
......@@ -674,10 +671,6 @@ struct SlotTraits<SlotLocation::kOnHeap> {
// holding Object value (smi or strong heap object).
using ObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TObjectSlot;
// An MapWordSlot instance describes a kTaggedSize-sized on-heap field ("slot")
// holding HeapObject (strong heap object) value or a forwarding pointer.
using MapWordSlot = SlotTraits<SlotLocation::kOnHeap>::TMapWordSlot;
// A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field
// ("slot") holding MaybeObject (smi or weak heap object or strong heap object).
using MaybeObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TMaybeObjectSlot;
......
......@@ -243,7 +243,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
VisitMapPointer(weak_cell, weak_cell.map_slot());
VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
......@@ -302,13 +302,13 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object, object.map_slot());
VisitMapPointer(object);
return SeqOneByteString::SizeFor(object.synchronized_length());
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object, object.map_slot());
VisitMapPointer(object);
return SeqTwoByteString::SizeFor(object.synchronized_length());
}
......@@ -363,7 +363,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
VisitMapPointer(shared_info, shared_info.map_slot());
VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
......@@ -381,7 +381,7 @@ class ConcurrentMarkingVisitor final
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object.map_slot());
VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
object.MakeOlder();
......@@ -449,7 +449,7 @@ class ConcurrentMarkingVisitor final
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array.map_slot());
VisitMapPointer(array);
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array.GetFirstPointerSlot(),
array.GetDescriptorSlot(0));
......@@ -459,7 +459,7 @@ class ConcurrentMarkingVisitor final
int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array.map_slot());
VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
......@@ -623,7 +623,7 @@ class ConcurrentMarkingVisitor final
// Left trimming marks the array black before over-writing the length.
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
VisitMapPointer(object, object.map_slot());
VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
......@@ -648,7 +648,7 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
visitor.VisitPointer(object, ObjectSlot(object.map_slot().address()));
visitor.VisitPointer(object, object.map_slot());
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
......
......@@ -405,7 +405,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
return AllocationMemento();
}
HeapObject candidate = HeapObject::FromAddress(memento_address);
MapWordSlot candidate_map_slot = candidate.map_slot();
ObjectSlot candidate_map_slot = candidate.map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
......
......@@ -71,9 +71,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
HeapObject host, MapWordSlot map_slot) {
HeapObject host) {
DCHECK(!host.map_word().IsForwardingAddress());
static_cast<ConcreteVisitor*>(this)->VisitPointer(host, ObjectSlot(map_slot));
static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
}
#define VISIT(TypeName, Type) \
......@@ -88,8 +88,9 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
"concurrent marker"); \
} \
int size = TypeName::BodyDescriptor::SizeOf(map, object); \
if (visitor->ShouldVisitMapPointer()) \
visitor->VisitMapPointer(object, object.map_slot()); \
if (visitor->ShouldVisitMapPointer()) { \
visitor->VisitMapPointer(object); \
} \
TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
......@@ -109,7 +110,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object.map_slot());
visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(size);
}
......@@ -120,8 +121,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object.map_slot());
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object);
}
JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
......@@ -132,8 +134,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object.map_slot());
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object);
}
JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
......@@ -145,7 +148,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object.map_slot());
visitor->VisitMapPointer(object);
}
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
......@@ -157,7 +160,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object.map_slot());
visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(object.size());
}
......@@ -169,7 +172,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object.map_slot());
visitor->VisitMapPointer(object);
}
WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
return size;
......
......@@ -91,7 +91,7 @@ class HeapVisitor : public ObjectVisitor {
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
V8_INLINE void VisitMapPointer(HeapObject host, MapWordSlot map_slot);
V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
......
......@@ -2987,17 +2987,14 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
Map free_space_map = ReadOnlyRoots(heap).free_space_map();
FreeSpace n = top();
while (!n.is_null()) {
MapWordSlot map_location = n.map_slot();
// We can't use .is_null() here because *map_location returns an
// Object (for which "is null" is not defined, as it would be
// indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
if (map_location.contains_value(kNullAddress)) {
map_location.store(ReadOnlyRoots(heap).free_space_map());
ObjectSlot map_slot = n.map_slot();
if (map_slot.contains_value(kNullAddress)) {
map_slot.store(free_space_map);
} else {
DCHECK(map_location.contains_value(
ReadOnlyRoots(heap).free_space_map().ptr()));
DCHECK(map_slot.contains_value(free_space_map.ptr()));
}
n = n.next();
}
......
......@@ -22,6 +22,12 @@ namespace internal {
CompressedObjectSlot::CompressedObjectSlot(Object* object)
: SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
bool CompressedObjectSlot::contains_value(Address raw_value) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return static_cast<uint32_t>(value) ==
static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
}
Object CompressedObjectSlot::operator*() const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(address(), value));
......@@ -60,54 +66,6 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
return Object(DecompressTaggedAny(address(), result));
}
//
// CompressedMapWordSlot implementation.
//
bool CompressedMapWordSlot::contains_value(Address raw_value) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return static_cast<uint32_t>(value) ==
static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
}
Object CompressedMapWordSlot::operator*() const {
Tagged_t value = *location();
return Object(DecompressTaggedPointer(address(), value));
}
void CompressedMapWordSlot::store(Object value) const {
*location() = CompressTagged(value.ptr());
}
Object CompressedMapWordSlot::Relaxed_Load() const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return Object(DecompressTaggedPointer(address(), value));
}
void CompressedMapWordSlot::Relaxed_Store(Object value) const {
Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
Object CompressedMapWordSlot::Acquire_Load() const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
return Object(DecompressTaggedPointer(address(), value));
}
void CompressedMapWordSlot::Release_Store(Object value) const {
Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Release_Store(location(), ptr);
}
Object CompressedMapWordSlot::Release_CompareAndSwap(Object old,
Object target) const {
Tagged_t old_ptr = CompressTagged(old.ptr());
Tagged_t target_ptr = CompressTagged(target.ptr());
Tagged_t result =
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
return Object(DecompressTaggedPointer(address(), result));
}
//
// CompressedMaybeObjectSlot implementation.
//
......
......@@ -34,32 +34,6 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
inline Object operator*() const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
};
// A CompressedMapWordSlot instance describes a kTaggedSize-sized map-word field
// ("slot") of heap objects holding a compressed tagged pointer or a Smi
// representing forwaring pointer value.
// This slot kind is similar to CompressedObjectSlot but decompression of
// forwarding pointer is different.
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
public:
using TObject = Object;
static constexpr bool kCanBeWeak = false;
CompressedMapWordSlot() : SlotBase(kNullAddress) {}
explicit CompressedMapWordSlot(Address ptr) : SlotBase(ptr) {}
// Compares memory representation of a value stored in the slot with given
// raw value without decompression.
inline bool contains_value(Address raw_value) const;
......@@ -67,10 +41,9 @@ class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
inline Object operator*() const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
inline void Relaxed_Store(Object value) const;
inline Object Acquire_Load() const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
};
......
......@@ -26,29 +26,13 @@ RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
int FreeSpace::Size() { return size(); }
FreeSpace FreeSpace::next() {
#ifdef DEBUG
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
return FreeSpace::unchecked_cast(*ObjectSlot(address() + kNextOffset));
DCHECK(IsValid());
return FreeSpace::unchecked_cast(READ_FIELD(*this, kNextOffset));
}
void FreeSpace::set_next(FreeSpace next) {
#ifdef DEBUG
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
ObjectSlot(address() + kNextOffset).Relaxed_Store(next);
DCHECK(IsValid());
RELAXED_WRITE_FIELD(*this, kNextOffset, next);
}
FreeSpace FreeSpace::cast(HeapObject o) {
......@@ -61,6 +45,17 @@ FreeSpace FreeSpace::unchecked_cast(const Object o) {
return bit_cast<FreeSpace>(o);
}
bool FreeSpace::IsValid() {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
CHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
CHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
return true;
}
} // namespace internal
} // namespace v8
......
......@@ -44,6 +44,9 @@ class FreeSpace : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FREE_SPACE_FIELDS)
private:
inline bool IsValid();
OBJECT_CONSTRUCTORS(FreeSpace, HeapObject);
};
......
......@@ -32,7 +32,7 @@ class HeapObject : public Object {
inline Map map() const;
inline void set_map(Map value);
inline MapWordSlot map_slot() const;
inline ObjectSlot map_slot() const;
// The no-write-barrier version. This is OK if the object is white and in
// new space, or if the value is an immortal immutable object, like the maps
......
......@@ -709,8 +709,8 @@ void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
}
}
MapWordSlot HeapObject::map_slot() const {
return MapWordSlot(MapField::address(*this));
ObjectSlot HeapObject::map_slot() const {
return ObjectSlot(MapField::address(*this));
}
MapWord HeapObject::map_word() const { return MapField::Relaxed_Load(*this); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment