Commit 3647f758 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Move marking bitmap into the memory chunk header

Instead allocating the bitmap with malloc, we now reserve a block
at the start of the memory chunk. This CL is a partial revert of
https://chromium-review.googlesource.com/c/v8/v8/+/1254125
Additionally it refactors field offset computation and moves them
to MemoryChunkLayout.

Having the bitmap in the memory chunk simplifies sharing of RO pages
and also solves the malloc fragmentation issues.

Bug: chromium:1073140
Change-Id: Ibc04f48921fc9496370858ce4c25c56b31c93c89
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2289979
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68783}
parent db36a80b
......@@ -2542,6 +2542,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/memory-allocator.cc",
"src/heap/memory-allocator.h",
"src/heap/memory-chunk-inl.h",
"src/heap/memory-chunk-layout.cc",
"src/heap/memory-chunk-layout.h",
"src/heap/memory-chunk.cc",
"src/heap/memory-chunk.h",
"src/heap/memory-measurement-inl.h",
......
......@@ -238,8 +238,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void GetMarkBit(TNode<IntPtrT> object, TNode<IntPtrT>* cell,
TNode<IntPtrT>* mask) {
TNode<IntPtrT> page = PageFromAddress(object);
TNode<IntPtrT> bitmap = Load<IntPtrT>(
page, IntPtrConstant(BasicMemoryChunk::kMarkBitmapOffset));
TNode<IntPtrT> bitmap =
IntPtrAdd(page, IntPtrConstant(MemoryChunk::kMarkingBitmapOffset));
{
// Temp variable to calculate cell offset in bitmap.
......
......@@ -28,17 +28,10 @@ STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
Address area_end) {
size_ = size;
marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
area_start_ = area_start;
area_end_ = area_end;
}
void BasicMemoryChunk::ReleaseMarkingBitmap() {
DCHECK_NOT_NULL(marking_bitmap_);
free(marking_bitmap_);
marking_bitmap_ = nullptr;
}
// static
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
size_t size, Address area_start,
......@@ -55,6 +48,7 @@ BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
return chunk;
}
......@@ -75,5 +69,35 @@ void BasicMemoryChunk::SynchronizedHeapLoad() {
}
#endif
class BasicMemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
offsetof(BasicMemoryChunk, size_));
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
offsetof(BasicMemoryChunk, flags_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(offsetof(BasicMemoryChunk, size_) ==
MemoryChunkLayout::kSizeOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, flags_) ==
MemoryChunkLayout::kFlagsOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, heap_) ==
MemoryChunkLayout::kHeapOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, area_start_) ==
MemoryChunkLayout::kAreaStartOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, area_end_) ==
MemoryChunkLayout::kAreaEndOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, allocated_bytes_) ==
MemoryChunkLayout::kAllocatedBytesOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, wasted_memory_) ==
MemoryChunkLayout::kWastedMemoryOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, high_water_mark_) ==
MemoryChunkLayout::kHighWaterMarkOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, owner_) ==
MemoryChunkLayout::kOwnerOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, reservation_) ==
MemoryChunkLayout::kReservationOffset);
};
} // namespace internal
} // namespace v8
......@@ -12,6 +12,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/objects/heap-object.h"
#include "src/utils/allocation.h"
......@@ -254,8 +255,6 @@ class BasicMemoryChunk {
return addr >= area_start() && addr <= area_end();
}
void ReleaseMarkingBitmap();
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
BaseSpace* owner,
......@@ -265,25 +264,15 @@ class BasicMemoryChunk {
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() const { return allocated_bytes_; }
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
static const intptr_t kSizeOffset = MemoryChunkLayout::kSizeOffset;
static const intptr_t kFlagsOffset = MemoryChunkLayout::kFlagsOffset;
static const intptr_t kHeapOffset = MemoryChunkLayout::kHeapOffset;
static const intptr_t kAreaStartOffset = MemoryChunkLayout::kAreaStartOffset;
static const intptr_t kAreaEndOffset = MemoryChunkLayout::kAreaEndOffset;
static const intptr_t kMarkingBitmapOffset =
MemoryChunkLayout::kMarkingBitmapOffset;
static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
+ kSystemPointerSize // Bitmap* marking_bitmap_
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // Address owner_
+ 3 * kSystemPointerSize; // VirtualMemory reservation_
MemoryChunkLayout::kBasicMemoryChunkHeaderSize;
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromAddress(Address a) {
......@@ -299,7 +288,8 @@ class BasicMemoryChunk {
template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
return static_cast<ConcurrentBitmap<mode>*>(
Bitmap::FromAddress(address() + kMarkingBitmapOffset));
}
Address HighWaterMark() { return address() + high_water_mark_; }
......@@ -349,8 +339,6 @@ class BasicMemoryChunk {
uintptr_t flags_ = NO_FLAGS;
Bitmap* marking_bitmap_ = nullptr;
// TODO(v8:7464): Find a way to remove this.
// This goes against the spirit for the BasicMemoryChunk, but until C++14/17
// is the default it needs to live here because MemoryChunk is not standard
......@@ -390,18 +378,6 @@ class BasicMemoryChunk {
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
class BasicMemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
offsetof(BasicMemoryChunk, size_));
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
offsetof(BasicMemoryChunk, flags_));
STATIC_ASSERT(BasicMemoryChunk::kMarkBitmapOffset ==
offsetof(BasicMemoryChunk, marking_bitmap_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
};
} // namespace internal
} // namespace v8
......
......@@ -42,9 +42,6 @@ class ConcurrentMarkingState final
: memory_chunk_data_(memory_chunk_data) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
......
......@@ -41,8 +41,7 @@ namespace heap_internals {
struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset =
kSizetSize + kUIntptrSize + kSystemPointerSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
......
......@@ -298,9 +298,6 @@ class MajorMarkingState final
public:
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
......@@ -326,9 +323,6 @@ class MajorAtomicMarkingState final
public:
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
......@@ -343,9 +337,6 @@ class MajorNonAtomicMarkingState final
public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
}
......
......@@ -561,7 +561,6 @@ void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
chunk->SetFlag(MemoryChunk::PRE_FREED);
chunk->ReleaseMarkingBitmap();
VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
namespace v8 {
namespace internal {
size_t MemoryChunkLayout::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return ::RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize,
MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::CodePageGuardSize() {
return MemoryAllocator::GetCommitPageSize();
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return MemoryChunk::kPageSize -
static_cast<int>(MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
return RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize, kTaggedSize);
}
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return ObjectStartOffsetInCodePage();
}
return ObjectStartOffsetInDataPage();
}
size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return AllocatableMemoryInCodePage();
}
return AllocatableMemoryInDataPage();
}
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#define V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#include "src/heap/heap.h"
#include "src/heap/list.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
class Bitmap;
class CodeObjectRegistry;
class FreeListCategory;
class Heap;
class LocalArrayBuferTracker;
class TypedSlotsSet;
class SlotSet;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static const int kNumSets = NUMBER_OF_REMEMBERED_SET_TYPES;
static const int kNumTypes = ExternalBackingStoreType::kNumTypes;
#define FIELD(Type, Name) \
k##Name##Offset, k##Name##End = k##Name##Offset + sizeof(Type) - 1
enum Header {
// BasicMemoryChunk fields:
FIELD(size_t, Size),
FIELD(uintptr_t, Flags),
FIELD(Heap*, Heap),
FIELD(Address, AreaStart),
FIELD(Address, AreaEnd),
FIELD(size_t, AllocatedBytes),
FIELD(size_t, WastedMemory),
FIELD(std::atomic<intptr_t>, HighWaterMark),
FIELD(Address, Owner),
FIELD(VirtualMemory, Reservation),
// MemoryChunk fields:
FIELD(SlotSet* [kNumSets], SlotSet),
FIELD(std::atomic<size_t>, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount),
FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
FIELD(void* [kNumSets], InvalidatedSlots),
FIELD(base::Mutex*, Mutex),
FIELD(std::atomic<intptr_t>, ConcurrentSweeping),
FIELD(base::Mutex*, PageProtectionChangeMutex),
FIELD(uintptr_t, WriteUnprotectCounter),
FIELD(std::atomic<size_t>[kNumTypes], ExternalBackingStoreBytes),
FIELD(heap::ListNode<MemoryChunk>, ListNode),
FIELD(FreeListCategory**, Categories),
FIELD(LocalArrayBuferTracker*, LocalTracker),
FIELD(std::atomic<intptr_t>, YoungGenerationLiveByteCount),
FIELD(Bitmap*, YoungGenerationBitmap),
FIELD(CodeObjectRegistry*, CodeObjectRegistry),
FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets),
kMarkingBitmapOffset,
kMemoryChunkHeaderSize = kMarkingBitmapOffset,
kMemoryChunkHeaderStart = kSlotSetOffset,
kBasicMemoryChunkHeaderSize = kMemoryChunkHeaderStart,
kBasicMemoryChunkHeaderStart = 0,
};
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
......@@ -27,61 +27,6 @@ void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
}
}
size_t MemoryChunkLayout::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::CodePageGuardSize() {
return MemoryAllocator::GetCommitPageSize();
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return Page::kPageSize -
static_cast<int>(MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
}
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return ObjectStartOffsetInCodePage();
}
return ObjectStartOffsetInDataPage();
}
size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return AllocatableMemoryInCodePage();
}
return AllocatableMemoryInDataPage();
}
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER
......@@ -212,6 +157,10 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
chunk->possibly_empty_buckets_.Initialize();
#ifdef DEBUG
ValidateOffsets(chunk);
#endif
return chunk;
}
......@@ -280,7 +229,6 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
void MemoryChunk::ReleaseAllAllocatedMemory() {
ReleaseAllocatedMemoryNeededForWritableChunk();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
......@@ -444,5 +392,61 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
#ifdef DEBUG
void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
// Note that we cannot use offsetof because MemoryChunk is not a POD.
DCHECK_EQ(reinterpret_cast<Address>(&chunk->slot_set_) - chunk->address(),
MemoryChunkLayout::kSlotSetOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->progress_bar_) - chunk->address(),
MemoryChunkLayout::kProgressBarOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(),
MemoryChunkLayout::kLiveByteCountOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->sweeping_slot_set_) - chunk->address(),
MemoryChunkLayout::kSweepingSlotSetOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(),
MemoryChunkLayout::kTypedSlotSetOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->invalidated_slots_) - chunk->address(),
MemoryChunkLayout::kInvalidatedSlotsOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->mutex_) - chunk->address(),
MemoryChunkLayout::kMutexOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->concurrent_sweeping_) -
chunk->address(),
MemoryChunkLayout::kConcurrentSweepingOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->page_protection_change_mutex_) -
chunk->address(),
MemoryChunkLayout::kPageProtectionChangeMutexOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->write_unprotect_counter_) -
chunk->address(),
MemoryChunkLayout::kWriteUnprotectCounterOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->external_backing_store_bytes_) -
chunk->address(),
MemoryChunkLayout::kExternalBackingStoreBytesOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->list_node_) - chunk->address(),
MemoryChunkLayout::kListNodeOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->categories_) - chunk->address(),
MemoryChunkLayout::kCategoriesOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->local_tracker_) - chunk->address(),
MemoryChunkLayout::kLocalTrackerOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->young_generation_live_byte_count_) -
chunk->address(),
MemoryChunkLayout::kYoungGenerationLiveByteCountOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->young_generation_bitmap_) -
chunk->address(),
MemoryChunkLayout::kYoungGenerationBitmapOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->code_object_registry_) -
chunk->address(),
MemoryChunkLayout::kCodeObjectRegistryOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->possibly_empty_buckets_) -
chunk->address(),
MemoryChunkLayout::kPossiblyEmptyBucketsOffset);
}
#endif
} // namespace internal
} // namespace v8
......@@ -15,6 +15,7 @@
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/slot-set.h"
namespace v8 {
......@@ -24,25 +25,6 @@ class CodeObjectRegistry;
class FreeListCategory;
class LocalArrayBufferTracker;
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
......@@ -60,32 +42,10 @@ class MemoryChunk : public BasicMemoryChunk {
kInProgress,
};
static const size_t kHeaderSize =
BasicMemoryChunk::kHeaderSize // Parent size.
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
+ kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
+ kSystemPointerSize * 2 // heap::ListNode
+ kSystemPointerSize // FreeListCategory** categories__
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kSystemPointerSize // Bitmap* young_generation_bitmap_
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
static const intptr_t kOldToNewSlotSetOffset = BasicMemoryChunk::kHeaderSize;
static const size_t kHeaderSize = MemoryChunkLayout::kMemoryChunkHeaderSize;
static const intptr_t kOldToNewSlotSetOffset =
MemoryChunkLayout::kSlotSetOffset;
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
......@@ -287,6 +247,9 @@ class MemoryChunk : public BasicMemoryChunk {
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
#ifdef DEBUG
static void ValidateOffsets(MemoryChunk* chunk);
#endif
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
......@@ -349,6 +312,7 @@ class MemoryChunk : public BasicMemoryChunk {
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
friend class MemoryChunkValidator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
......
......@@ -85,9 +85,6 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
}
void ReadOnlyPage::MakeHeaderRelocatable() {
// Detached read-only space needs to have a valid marking bitmap. Instruct
// Lsan to ignore it if required.
LSAN_IGNORE_OBJECT(marking_bitmap_);
heap_ = nullptr;
owner_ = nullptr;
}
......
......@@ -817,15 +817,17 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
// Allocated objects size.
CHECK_EQ(faked_space.Size(), 16);
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
allocator->GetCommitPageSize());
// Amount of OS allocated memory.
CHECK_EQ(faked_space.CommittedMemory(), allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedPhysicalMemory(),
allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
// Capacity will be one OS page minus the page header.
CHECK_EQ(faked_space.Capacity(),
allocator->GetCommitPageSize() -
MemoryChunkLayout::ObjectStartOffsetInDataPage());
committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
......@@ -870,15 +872,16 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
// with pointer compression.
CHECK_EQ(faked_space.Size(), object_size + RoundUp(object_size, alignment));
// Amount of OS allocated memory will be 3 OS pages.
CHECK_EQ(faked_space.CommittedMemory(), 3 * allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedPhysicalMemory(),
3 * allocator->GetCommitPageSize());
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
// Capacity will be 3 OS pages minus the page header.
CHECK_EQ(faked_space.Capacity(),
3 * allocator->GetCommitPageSize() -
MemoryChunkLayout::ObjectStartOffsetInDataPage());
committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
TEST(ReadOnlySpaceMetrics_TwoPages) {
......@@ -900,8 +903,10 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
// Allocate an object that's too big to have more than one on a page.
int object_size = static_cast<int>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16);
int object_size = RoundUp(
static_cast<int>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16),
kTaggedSize);
CHECK_GT(object_size * 2,
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
faked_space.AllocateRaw(object_size, kWordAligned);
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment