Commit 9c9682d0 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[heap] Optimize MemoryChunk::FromAnyPointerAddress"

This reverts commit fd49c8bb.

Reason for revert: Speculative revert for:
https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/24672

Original change's description:
> [heap] Optimize MemoryChunk::FromAnyPointerAddress
> 
> Currently this function requires the caller to hold a mutex for the
> large page chunk hashtable and performs a hashtable lookup.
> 
> This patch adds a header sentinel field in each MemoryChunk. The field
> is then used to distinguish large object slots from ordinary slots.
> 
> Bug: chromium:915233
> Change-Id: I9fbeeb4f07f49573d0a21f9a2cc934370e417d68
> Reviewed-on: https://chromium-review.googlesource.com/c/1391752
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#58732}

TBR=ulan@chromium.org,mlippautz@chromium.org

Change-Id: I232729fdfd55baef7de99ea2fd14fbc0a2f71d27
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:915233
Reviewed-on: https://chromium-review.googlesource.com/c/1406671Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58738}
parent 24f8f96b
...@@ -617,6 +617,13 @@ void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type, ...@@ -617,6 +617,13 @@ void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
// trigger garbage collections. // trigger garbage collections.
} }
bool Heap::IsWithinLargeObject(Address address) {
if (new_lo_space()->FindPage(address) || lo_space()->FindPage(address) ||
code_lo_space()->FindPage(address))
return true;
return false;
}
void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) { size_t amount) {
base::CheckedDecrement(&backing_store_bytes_, amount); base::CheckedDecrement(&backing_store_bytes_, amount);
......
...@@ -377,6 +377,7 @@ class Heap { ...@@ -377,6 +377,7 @@ class Heap {
bool IsLargeObject(HeapObject object); bool IsLargeObject(HeapObject object);
bool IsLargeMemoryChunk(MemoryChunk* chunk); bool IsLargeMemoryChunk(MemoryChunk* chunk);
inline bool IsWithinLargeObject(Address address);
bool IsInYoungGeneration(HeapObject object); bool IsInYoungGeneration(HeapObject object);
......
...@@ -158,7 +158,8 @@ bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); } ...@@ -158,7 +158,8 @@ bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); } bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
bool PagedSpace::Contains(Address addr) { bool PagedSpace::Contains(Address addr) {
return MemoryChunk::FromAnyPointerAddress(addr)->owner() == this; if (heap()->IsWithinLargeObject(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
} }
bool PagedSpace::Contains(Object o) { bool PagedSpace::Contains(Object o) {
...@@ -199,18 +200,12 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) { ...@@ -199,18 +200,12 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false; return false;
} }
bool MemoryChunk::HasHeaderSentinel(Address slot_addr) { MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
Address base = BaseAddress(slot_addr); MemoryChunk* chunk = heap->lo_space()->FindPage(addr);
if (slot_addr < base + kHeaderSize) return false; if (chunk == nullptr) {
return HeapObject::FromAddress(base) == chunk = MemoryChunk::FromAddress(addr);
ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
while (!HasHeaderSentinel(addr)) {
addr = BaseAddress(addr) - 1;
} }
return FromAddress(addr); return chunk;
} }
void MemoryChunk::IncrementExternalBackingStoreBytes( void MemoryChunk::IncrementExternalBackingStoreBytes(
......
...@@ -617,12 +617,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -617,12 +617,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
VirtualMemory reservation) { VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base); MemoryChunk* chunk = FromAddress(base);
DCHECK_EQ(base, chunk->address()); DCHECK(base == chunk->address());
chunk->heap_ = heap; chunk->heap_ = heap;
chunk->size_ = size; chunk->size_ = size;
chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
DCHECK(HasHeaderSentinel(area_start));
chunk->area_start_ = area_start; chunk->area_start_ = area_start;
chunk->area_end_ = area_end; chunk->area_end_ = area_end;
chunk->flags_ = Flags(NO_FLAGS); chunk->flags_ = Flags(NO_FLAGS);
......
...@@ -363,8 +363,6 @@ class MemoryChunk { ...@@ -363,8 +363,6 @@ class MemoryChunk {
kMarkBitmapOffset + kSystemPointerSize; kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kHeapOffset = static const intptr_t kHeapOffset =
kReservationOffset + 3 * kSystemPointerSize; kReservationOffset + 3 * kSystemPointerSize;
static const intptr_t kHeaderSentinelOffset =
kHeapOffset + kSystemPointerSize;
static const size_t kHeaderSize = static const size_t kHeaderSize =
kSizeOffset // NOLINT kSizeOffset // NOLINT
...@@ -373,7 +371,6 @@ class MemoryChunk { ...@@ -373,7 +371,6 @@ class MemoryChunk {
+ kSystemPointerSize // Bitmap* marking_bitmap_ + kSystemPointerSize // Bitmap* marking_bitmap_
+ 3 * kSystemPointerSize // VirtualMemory reservation_ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSystemPointerSize // Heap* heap_ + kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address header_sentinel_
+ kSystemPointerSize // Address area_start_ + kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_ + kSystemPointerSize // Address area_end_
+ kSystemPointerSize // Address owner_ + kSystemPointerSize // Address owner_
...@@ -408,21 +405,19 @@ class MemoryChunk { ...@@ -408,21 +405,19 @@ class MemoryChunk {
// TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap. // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
static const int kMaxWriteUnprotectCounter = 4; static const int kMaxWriteUnprotectCounter = 4;
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
// Only works if the pointer is in the first kPageSize of the MemoryChunk. // Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) { static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(a)); return reinterpret_cast<MemoryChunk*>(a & ~kAlignmentMask);
} }
// Only works if the object is in the first kPageSize of the MemoryChunk. // Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(const HeapObject o) { static MemoryChunk* FromHeapObject(const HeapObject o) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr())); return reinterpret_cast<MemoryChunk*>(o.ptr() & ~kAlignmentMask);
} }
void SetOldGenerationPageFlags(bool is_marking); void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking); void SetYoungGenerationPageFlags(bool is_marking);
static inline MemoryChunk* FromAnyPointerAddress(Address addr); static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) { static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return; if (mark == kNullAddress) return;
...@@ -647,8 +642,6 @@ class MemoryChunk { ...@@ -647,8 +642,6 @@ class MemoryChunk {
void set_owner(Space* space) { owner_ = space; } void set_owner(Space* space) { owner_ = space; }
static inline bool HasHeaderSentinel(Address slot_addr);
// Emits a memory barrier. For TSAN builds the other thread needs to perform // Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier. // MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence(); void InitializationMemoryFence();
...@@ -693,12 +686,6 @@ class MemoryChunk { ...@@ -693,12 +686,6 @@ class MemoryChunk {
Heap* heap_; Heap* heap_;
// This is used to distinguish the memory chunk header from the interior of a
// large page. The memory chunk header stores here an impossible tagged
// pointer: the tagger pointer of the page start. A field in a large object is
// guaranteed to not contain such a pointer.
Address header_sentinel_;
// Start and end of allocatable memory on this chunk. // Start and end of allocatable memory on this chunk.
Address area_start_; Address area_start_;
Address area_end_; Address area_end_;
......
...@@ -160,14 +160,14 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) { ...@@ -160,14 +160,14 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK_GE(index, 0); DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers); DCHECK_LT(index, kStoreBuffers);
Address last_inserted_addr = kNullAddress; Address last_inserted_addr = kNullAddress;
MemoryChunk* chunk = nullptr;
// We are taking the chunk map mutex here because the page lookup of addr
// below may require us to check if addr is part of a large page.
base::MutexGuard guard(heap_->lo_space()->chunk_map_mutex());
for (Address* current = start_[index]; current < lazy_top_[index]; for (Address* current = start_[index]; current < lazy_top_[index];
current++) { current++) {
Address addr = *current; Address addr = *current;
if (MemoryChunk::BaseAddress(addr) != chunk->address()) { MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
chunk = MemoryChunk::FromAnyPointerAddress(addr);
}
if (IsDeletionAddress(addr)) { if (IsDeletionAddress(addr)) {
last_inserted_addr = kNullAddress; last_inserted_addr = kNullAddress;
current++; current++;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment