Commit bc8106dc authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][cleanup] Introduce TaggedField<T, kOffset> template

It will allow us to use knowledge about the type of field during value
decompression upon field read.

Use the new class for HeapObject::MapField.

Bug: v8:9353
Change-Id: I1368426ec2e25fcec3af8d5cccd7a78d80423e72
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1658150
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62184}
parent 490c4199
......@@ -2601,6 +2601,8 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
"src/objects/tagged-field-inl.h",
"src/objects/tagged-field.h",
"src/objects/tagged-impl-inl.h",
"src/objects/tagged-impl.cc",
"src/objects/tagged-impl.h",
......
......@@ -25,8 +25,12 @@ V8_INLINE Address GetIsolateRoot(TOnHeapAddress on_heap_addr);
template <>
V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
// We subtract 1 here in order to let the compiler generate addition of 32-bit
// signed constant instead of 64-bit constant (the problem is that 2Gb looks
// like a negative 32-bit value). It's correct because we will never use
// leftmost address of V8 heap as |on_heap_addr|.
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr +
kPtrComprIsolateRootBias);
kPtrComprIsolateRootBias - 1);
}
template <>
......
......@@ -1366,8 +1366,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
object.map_slot().Relaxed_Store(
MapWord::FromForwardingAddress(actual).ToMap());
object.set_map_word(MapWord::FromForwardingAddress(actual));
return true;
}
// TODO(mlippautz): Handle ConsString.
......
......@@ -109,9 +109,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
Object old = source.map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(target).ToMap());
if (old != map) {
if (!source.synchronized_compare_and_swap_map_word(
MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
// Other task migrated the object.
return false;
}
......@@ -215,8 +214,8 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
if (object.map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
if (object.synchronized_compare_and_swap_map_word(
MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
......@@ -313,8 +312,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
object.map_slot().Release_Store(
MapWord::FromForwardingAddress(first).ToMap());
object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
return REMOVE_SLOT;
}
......@@ -323,16 +321,15 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
object.map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap());
object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
Map::ObjectFieldsFrom(map.visitor_id()));
object.map_slot().Release_Store(
MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
object.synchronized_set_map_word(
MapWord::FromForwardingAddress(slot.ToHeapObject()));
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
......
......@@ -9,6 +9,7 @@
#include "src/roots/roots.h"
#include "src/objects/objects.h"
#include "src/objects/tagged-field.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
......@@ -45,6 +46,10 @@ class HeapObject : public Object {
// Set the map using release store
inline void synchronized_set_map(Map value);
inline void synchronized_set_map_word(MapWord map_word);
// Compare-and-swaps map word using release store, returns true if the map
// word was actually swapped.
inline bool synchronized_compare_and_swap_map_word(MapWord old_map_word,
MapWord new_map_word);
// Initialize the map immediately after the object is allocated.
// Do not use this outside Heap.
......@@ -191,6 +196,8 @@ class HeapObject : public Object {
STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
using MapField = TaggedField<MapWord, HeapObject::kMapOffset>;
inline Address GetFieldAddress(int field_offset) const;
protected:
......
......@@ -30,6 +30,10 @@ class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
private:
template <typename TFieldType, int kFieldOffset>
friend class TaggedField;
};
// A HeapObjectReference is either a strong reference to a HeapObject, a weak
......
......@@ -18,11 +18,14 @@
// Since this changes visibility, it should always be last in a class
// definition.
#define OBJECT_CONSTRUCTORS(Type, ...) \
public: \
constexpr Type() : __VA_ARGS__() {} \
\
protected: \
#define OBJECT_CONSTRUCTORS(Type, ...) \
public: \
constexpr Type() : __VA_ARGS__() {} \
\
protected: \
template <typename TFieldType, int kFieldOffset> \
friend class TaggedField; \
\
explicit inline Type(Address ptr)
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
......@@ -412,12 +415,15 @@
set(IndexForEntry(i) + k##name##Offset, value); \
}
#define TQ_OBJECT_CONSTRUCTORS(Type) \
public: \
constexpr Type() = default; \
\
protected: \
inline explicit Type(Address ptr); \
#define TQ_OBJECT_CONSTRUCTORS(Type) \
public: \
constexpr Type() = default; \
\
protected: \
template <typename TFieldType, int kFieldOffset> \
friend class TaggedField; \
\
inline explicit Type(Address ptr); \
friend class TorqueGenerated##Type<Type, Super>;
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type) \
......
......@@ -38,6 +38,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
#include "src/objects/tagged-field-inl.h"
#include "src/objects/tagged-impl-inl.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
......@@ -709,23 +710,28 @@ void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
}
MapWordSlot HeapObject::map_slot() const {
return MapWordSlot(FIELD_ADDR(*this, kMapOffset));
return MapWordSlot(MapField::address(*this));
}
MapWord HeapObject::map_word() const {
return MapWord(map_slot().Relaxed_Load().ptr());
}
MapWord HeapObject::map_word() const { return MapField::Relaxed_Load(*this); }
void HeapObject::set_map_word(MapWord map_word) {
map_slot().Relaxed_Store(Object(map_word.value_));
MapField::Relaxed_Store(*this, map_word);
}
MapWord HeapObject::synchronized_map_word() const {
return MapWord(map_slot().Acquire_Load().ptr());
return MapField::Acquire_Load(*this);
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
map_slot().Release_Store(Object(map_word.value_));
MapField::Release_Store(*this, map_word);
}
bool HeapObject::synchronized_compare_and_swap_map_word(MapWord old_map_word,
MapWord new_map_word) {
Tagged_t result =
MapField::Release_CompareAndSwap(*this, old_map_word, new_map_word);
return result == static_cast<Tagged_t>(old_map_word.ptr());
}
int HeapObject::Size() const { return SizeFromMap(map()); }
......
......@@ -743,13 +743,13 @@ class MapWord {
// View this map word as a forwarding address.
inline HeapObject ToForwardingAddress();
static inline MapWord FromRawValue(uintptr_t value) { return MapWord(value); }
inline uintptr_t ToRawValue() { return value_; }
inline Address ptr() { return value_; }
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
template <typename TFieldType, int kFieldOffset>
friend class TaggedField;
explicit MapWord(Address value) : value_(value) {}
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_TAGGED_FIELD_INL_H_
#define V8_OBJECTS_TAGGED_FIELD_INL_H_
#include "src/objects/tagged-field.h"
#include "src/common/ptr-compr-inl.h"
namespace v8 {
namespace internal {
// static
template <typename T, int kFieldOffset>
Address TaggedField<T, kFieldOffset>::address(HeapObject host) {
return host.address() + kFieldOffset;
}
// static
template <typename T, int kFieldOffset>
Tagged_t* TaggedField<T, kFieldOffset>::location(HeapObject host) {
return reinterpret_cast<Tagged_t*>(address(host));
}
// static
template <typename T, int kFieldOffset>
template <typename TOnHeapAddress>
Address TaggedField<T, kFieldOffset>::tagged_to_full(
TOnHeapAddress on_heap_addr, Tagged_t tagged_value) {
#ifdef V8_COMPRESS_POINTERS
if (kIsSmi) {
return DecompressTaggedSigned(tagged_value);
} else if (kIsHeapObject) {
return DecompressTaggedPointer(on_heap_addr, tagged_value);
} else {
return DecompressTaggedAny(on_heap_addr, tagged_value);
}
#else
return tagged_value;
#endif
}
// static
template <typename T, int kFieldOffset>
Tagged_t TaggedField<T, kFieldOffset>::full_to_tagged(Address value) {
#ifdef V8_COMPRESS_POINTERS
return CompressTagged(value);
#else
return value;
#endif
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::load(HeapObject host) {
Tagged_t value = *location(host);
return T(tagged_to_full(host.ptr(), value));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::load(Isolate* isolate, HeapObject host) {
Tagged_t value = *location(host);
return T(tagged_to_full(isolate, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
*location(host) = full_to_tagged(value.ptr());
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host));
return T(tagged_to_full(host.ptr(), value));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Relaxed_Load(Isolate* isolate,
HeapObject host) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host));
return T(tagged_to_full(isolate, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(value.ptr()));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host));
return T(tagged_to_full(host.ptr(), value));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load(Isolate* isolate,
HeapObject host) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host));
return T(tagged_to_full(isolate, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, T value) {
AsAtomicTagged::Release_Store(location(host), full_to_tagged(value.ptr()));
}
// static
template <typename T, int kFieldOffset>
Tagged_t TaggedField<T, kFieldOffset>::Release_CompareAndSwap(HeapObject host,
T old, T value) {
Tagged_t old_value = full_to_tagged(old.ptr());
Tagged_t new_value = full_to_tagged(value.ptr());
Tagged_t result = AsAtomicTagged::Release_CompareAndSwap(
location(host), old_value, new_value);
return result;
}
} // namespace internal
} // namespace v8
#endif // V8_OBJECTS_TAGGED_FIELD_INL_H_
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_TAGGED_FIELD_H_
#define V8_OBJECTS_TAGGED_FIELD_H_
#include "src/common/globals.h"
#include "src/objects/objects.h"
#include "src/objects/tagged-value.h"
namespace v8 {
namespace internal {
// This helper static class represents a tagged field of type T at offset
// kFieldOffset inside some host HeapObject.
// For full-pointer mode this type adds no overhead but when pointer
// compression is enabled such class allows us to use proper decompression
// function depending on the field type.
template <typename T, int kFieldOffset>
class TaggedField : public AllStatic {
public:
static_assert(std::is_base_of<Object, T>::value ||
std::is_same<MapWord, T>::value ||
std::is_same<MaybeObject, T>::value,
"T must be strong or weak tagged type or MapWord");
// True for Smi fields.
static constexpr bool kIsSmi = std::is_base_of<Smi, T>::value;
// True for HeapObject and MapWord fields. The latter may look like a Smi
// if it contains forwarding pointer but still requires tagged pointer
// decompression.
static constexpr bool kIsHeapObject =
std::is_base_of<HeapObject, T>::value || std::is_same<MapWord, T>::value;
static inline Address address(HeapObject host);
static inline T load(HeapObject host);
static inline T load(Isolate* isolate, HeapObject host);
static inline void store(HeapObject host, T value);
static inline T Relaxed_Load(HeapObject host);
static inline T Relaxed_Load(Isolate* isolate, HeapObject host);
static inline void Relaxed_Store(HeapObject host, T value);
static inline T Acquire_Load(HeapObject host);
static inline T Acquire_Load(Isolate* isolate, HeapObject host);
static inline void Release_Store(HeapObject host, T value);
static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
T value);
private:
static inline Tagged_t* location(HeapObject host);
template <typename TOnHeapAddress>
static inline Address tagged_to_full(TOnHeapAddress on_heap_addr,
Tagged_t tagged_value);
static inline Tagged_t full_to_tagged(Address value);
};
} // namespace internal
} // namespace v8
#endif // V8_OBJECTS_TAGGED_FIELD_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment