Commit 6a5cd598 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Prepare for changing kTaggedSize, pt.2

This CL also unifies CopyWords() and CopyBytes() implementations.

Bug: v8:7703
Change-Id: I0b2e2f35c0c651e46231c4e4286c705634dce02b
Reviewed-on: https://chromium-review.googlesource.com/c/1491602Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59912}
parent b3139bdd
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "src/objects/property-cell.h" #include "src/objects/property-cell.h"
#include "src/objects/scope-info.h" #include "src/objects/scope-info.h"
#include "src/objects/script-inl.h" #include "src/objects/script-inl.h"
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h" #include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h" #include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h" #include "src/string-hasher.h"
...@@ -405,8 +406,7 @@ bool Heap::ShouldBePromoted(Address old_address) { ...@@ -405,8 +406,7 @@ bool Heap::ShouldBePromoted(Address old_address) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) { void Heap::CopyBlock(Address dst, Address src, int byte_size) {
DCHECK(IsAligned(byte_size, kTaggedSize)); DCHECK(IsAligned(byte_size, kTaggedSize));
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
CopyWords(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
} }
template <Heap::FindMementoMode mode> template <Heap::FindMementoMode mode>
......
...@@ -266,8 +266,10 @@ class RememberedSet : public AllStatic { ...@@ -266,8 +266,10 @@ class RememberedSet : public AllStatic {
class UpdateTypedSlotHelper { class UpdateTypedSlotHelper {
public: public:
// Updates a typed slot using an untyped slot callback. // Updates a typed slot using an untyped slot callback where |addr| depending
// The callback accepts MaybeObjectSlot and returns SlotCallbackResult. // on slot type represents either address for respective RelocInfo or address
// of the uncompressed constant pool entry.
// The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback> template <typename Callback>
static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type, static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
Address addr, Callback callback) { Address addr, Callback callback) {
...@@ -284,8 +286,6 @@ class UpdateTypedSlotHelper { ...@@ -284,8 +286,6 @@ class UpdateTypedSlotHelper {
return UpdateEmbeddedPointer(heap, &rinfo, callback); return UpdateEmbeddedPointer(heap, &rinfo, callback);
} }
case OBJECT_SLOT: { case OBJECT_SLOT: {
// TODO(ishell): the incoming addr represents MaybeObjectSlot(addr).
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
return callback(FullMaybeObjectSlot(addr)); return callback(FullMaybeObjectSlot(addr));
} }
case CLEARED_SLOT: case CLEARED_SLOT:
......
...@@ -462,7 +462,8 @@ void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) { ...@@ -462,7 +462,8 @@ void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
template <typename TSlot> template <typename TSlot>
void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) { void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
if (Heap::InYoungGeneration(heap_object)) { if (Heap::InYoungGeneration(heap_object)) {
scavenger_->ScavengeObject(HeapObjectSlot(slot), heap_object); using THeapObjectSlot = typename TSlot::THeapObjectSlot;
scavenger_->ScavengeObject(THeapObjectSlot(slot), heap_object);
} }
} }
......
...@@ -137,6 +137,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor { ...@@ -137,6 +137,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
HeapObject::cast(target))); HeapObject::cast(target)));
} else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate( } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target))) { HeapObject::cast(target))) {
// We should never try to record off-heap slots.
DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
// We cannot call MarkCompactCollector::RecordSlot because that checks // We cannot call MarkCompactCollector::RecordSlot because that checks
// that the host page is not in young generation, which does not hold // that the host page is not in young generation, which does not hold
// for pending large pages. // for pending large pages.
......
...@@ -570,12 +570,11 @@ class MemoryChunk { ...@@ -570,12 +570,11 @@ class MemoryChunk {
// Some callers rely on the fact that this can operate on both // Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses. // tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const { inline uint32_t AddressToMarkbitIndex(Address addr) const {
return static_cast<uint32_t>(addr - this->address()) >> return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
kSystemPointerSizeLog2;
} }
inline Address MarkbitIndexToAddress(uint32_t index) const { inline Address MarkbitIndexToAddress(uint32_t index) const {
return this->address() + (index << kSystemPointerSizeLog2); return this->address() + (index << kTaggedSizeLog2);
} }
template <AccessMode access_mode = AccessMode::NON_ATOMIC> template <AccessMode access_mode = AccessMode::NON_ATOMIC>
......
...@@ -25,7 +25,7 @@ void init_memcopy_functions(); ...@@ -25,7 +25,7 @@ void init_memcopy_functions();
#if defined(V8_TARGET_ARCH_IA32) #if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely // Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying. // to outweigh the benefits of faster copying.
const int kMinComplexMemCopy = 64; const size_t kMinComplexMemCopy = 64;
// Copy memory area. No restrictions. // Copy memory area. No restrictions.
V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size); V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
...@@ -45,7 +45,7 @@ V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, ...@@ -45,7 +45,7 @@ V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
memcpy(dest, src, chars); memcpy(dest, src, chars);
} }
// For values < 16, the assembler function is slower than the inlined C code. // For values < 16, the assembler function is slower than the inlined C code.
const int kMinComplexMemCopy = 16; const size_t kMinComplexMemCopy = 16;
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size); reinterpret_cast<const uint8_t*>(src), size);
...@@ -75,7 +75,7 @@ V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, ...@@ -75,7 +75,7 @@ V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
memcpy(dest, src, chars); memcpy(dest, src, chars);
} }
// For values < 16, the assembler function is slower than the inlined C code. // For values < 16, the assembler function is slower than the inlined C code.
const int kMinComplexMemCopy = 16; const size_t kMinComplexMemCopy = 16;
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size); reinterpret_cast<const uint8_t*>(src), size);
...@@ -93,54 +93,49 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, ...@@ -93,54 +93,49 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
size_t size) { size_t size) {
memmove(dest, src, size); memmove(dest, src, size);
} }
const int kMinComplexMemCopy = 8; const size_t kMinComplexMemCopy = 8;
#endif // V8_TARGET_ARCH_IA32 #endif // V8_TARGET_ARCH_IA32
// Copies words from |src| to |dst|. The data spans must not overlap. // Copies words from |src| to |dst|. The data spans must not overlap.
// |src| and |dst| must be kSystemPointerSize-aligned. // |src| and |dst| must be TWord-size aligned.
inline void CopyWords(Address dst, const Address src, size_t num_words) { template <size_t kBlockCopyLimit, typename T>
constexpr int kSystemPointerSize = sizeof(void*); // to avoid src/globals.h inline void CopyImpl(T* dst_ptr, const T* src_ptr, size_t count) {
DCHECK(IsAligned(dst, kSystemPointerSize)); constexpr int kTWordSize = sizeof(T);
DCHECK(IsAligned(src, kSystemPointerSize)); #ifdef DEBUG
DCHECK(((src <= dst) && ((src + num_words * kSystemPointerSize) <= dst)) || Address dst = reinterpret_cast<Address>(dst_ptr);
((dst <= src) && ((dst + num_words * kSystemPointerSize) <= src))); Address src = reinterpret_cast<Address>(src_ptr);
DCHECK(IsAligned(dst, kTWordSize));
DCHECK(IsAligned(src, kTWordSize));
DCHECK(((src <= dst) && ((src + count * kTWordSize) <= dst)) ||
((dst <= src) && ((dst + count * kTWordSize) <= src)));
#endif
// Use block copying MemCopy if the segment we're copying is // Use block copying MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead. // enough to justify the extra call/setup overhead.
static const size_t kBlockCopyLimit = 16; if (count < kBlockCopyLimit) {
Address* dst_ptr = reinterpret_cast<Address*>(dst);
Address* src_ptr = reinterpret_cast<Address*>(src);
if (num_words < kBlockCopyLimit) {
do { do {
num_words--; count--;
*dst_ptr++ = *src_ptr++; *dst_ptr++ = *src_ptr++;
} while (num_words > 0); } while (count > 0);
} else { } else {
MemCopy(dst_ptr, src_ptr, num_words * kSystemPointerSize); MemCopy(dst_ptr, src_ptr, count * kTWordSize);
} }
} }
// Copies kSystemPointerSize-sized words from |src| to |dst|. The data spans
// must not overlap. |src| and |dst| must be kSystemPointerSize-aligned.
inline void CopyWords(Address dst, const Address src, size_t num_words) {
static const size_t kBlockCopyLimit = 16;
CopyImpl<kBlockCopyLimit>(reinterpret_cast<Address*>(dst),
reinterpret_cast<const Address*>(src), num_words);
}
// Copies data from |src| to |dst|. The data spans must not overlap. // Copies data from |src| to |dst|. The data spans must not overlap.
template <typename T> template <typename T>
inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
STATIC_ASSERT(sizeof(T) == 1); STATIC_ASSERT(sizeof(T) == 1);
DCHECK(((src <= dst) && ((src + num_bytes) <= dst)) ||
((dst <= src) && ((dst + num_bytes) <= src)));
if (num_bytes == 0) return; if (num_bytes == 0) return;
CopyImpl<kMinComplexMemCopy>(dst, src, num_bytes);
// Use block copying MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
static const int kBlockCopyLimit = kMinComplexMemCopy;
if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
do {
num_bytes--;
*dst++ = *src++;
} while (num_bytes > 0);
} else {
MemCopy(dst, src, num_bytes);
}
} }
inline void MemsetPointer(Address* dest, Address value, size_t counter) { inline void MemsetPointer(Address* dest, Address value, size_t counter) {
...@@ -236,7 +231,7 @@ template <typename sourcechar, typename sinkchar> ...@@ -236,7 +231,7 @@ template <typename sourcechar, typename sinkchar>
void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) { void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) {
sinkchar* limit = dest + chars; sinkchar* limit = dest + chars;
if ((sizeof(*dest) == sizeof(*src)) && if ((sizeof(*dest) == sizeof(*src)) &&
(chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) { (chars >= kMinComplexMemCopy / sizeof(*dest))) {
MemCopy(dest, src, chars * sizeof(*dest)); MemCopy(dest, src, chars * sizeof(*dest));
} else { } else {
while (dest < limit) *dest++ = static_cast<sinkchar>(*src++); while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
......
...@@ -49,7 +49,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> { ...@@ -49,7 +49,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
operator Tagged_t() const { return AsAtomicTagged::Relaxed_Load(address_); } operator Tagged_t() const { return AsAtomicTagged::Relaxed_Load(address_); }
void swap(Reference& other) { void swap(Reference& other) {
Address tmp = value(); Tagged_t tmp = value();
AsAtomicTagged::Relaxed_Store(address_, other.value()); AsAtomicTagged::Relaxed_Store(address_, other.value());
AsAtomicTagged::Relaxed_Store(other.address_, tmp); AsAtomicTagged::Relaxed_Store(other.address_, tmp);
} }
...@@ -63,7 +63,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> { ...@@ -63,7 +63,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
} }
private: private:
Address value() const { return AsAtomicTagged::Relaxed_Load(address_); } Tagged_t value() const { return AsAtomicTagged::Relaxed_Load(address_); }
Tagged_t* address_; Tagged_t* address_;
}; };
......
...@@ -70,16 +70,17 @@ void FullMaybeObjectSlot::store(MaybeObject value) const { ...@@ -70,16 +70,17 @@ void FullMaybeObjectSlot::store(MaybeObject value) const {
} }
MaybeObject FullMaybeObjectSlot::Relaxed_Load() const { MaybeObject FullMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(AsAtomicTagged::Relaxed_Load(location())); return MaybeObject(base::AsAtomicPointer::Relaxed_Load(location()));
} }
void FullMaybeObjectSlot::Relaxed_Store(MaybeObject value) const { void FullMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
AsAtomicTagged::Relaxed_Store(location(), value->ptr()); base::AsAtomicPointer::Relaxed_Store(location(), value->ptr());
} }
void FullMaybeObjectSlot::Release_CompareAndSwap(MaybeObject old, void FullMaybeObjectSlot::Release_CompareAndSwap(MaybeObject old,
MaybeObject target) const { MaybeObject target) const {
AsAtomicTagged::Release_CompareAndSwap(location(), old.ptr(), target.ptr()); base::AsAtomicPointer::Release_CompareAndSwap(location(), old.ptr(),
target.ptr());
} }
// //
...@@ -107,6 +108,14 @@ void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const { ...@@ -107,6 +108,14 @@ void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
// Utils. // Utils.
// //
// Copies tagged words from |src| to |dst|. The data spans must not overlap.
// |src| and |dst| must be kTaggedSize-aligned.
inline void CopyTagged(Address dst, const Address src, size_t num_tagged) {
static const size_t kBlockCopyLimit = 16;
CopyImpl<kBlockCopyLimit>(reinterpret_cast<Tagged_t*>(dst),
reinterpret_cast<const Tagged_t*>(src), num_tagged);
}
// Sets |counter| number of kTaggedSize-sized values starting at |start| slot. // Sets |counter| number of kTaggedSize-sized values starting at |start| slot.
inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) { inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
// TODO(ishell): revisit this implementation, maybe use "rep stosl" // TODO(ishell): revisit this implementation, maybe use "rep stosl"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment