Commit 17890f67 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Externalize mark bitmap.

Change-Id: Idc52e3ed6af13b20569a412e98bae0841d32e009
Reviewed-on: https://chromium-review.googlesource.com/c/1254125
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56572}
parent 196bd1f0
......@@ -249,6 +249,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void GetMarkBit(Node* object, Node** cell, Node** mask) {
Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
Node* bitmap = Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
{
// Temp variable to calculate cell offset in bitmap.
......@@ -258,8 +260,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
r0 = WordShr(object, IntPtrConstant(shift));
r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
*cell = IntPtrAdd(IntPtrAdd(page, r0),
IntPtrConstant(MemoryChunk::kHeaderSize));
*cell = IntPtrAdd(bitmap, r0);
}
{
// Temp variable to calculate bit offset in cell.
......
......@@ -34,7 +34,10 @@ class ConcurrentMarkingState final
: live_bytes_(live_bytes) {}
Bitmap* bitmap(const MemoryChunk* chunk) {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
......
......@@ -3891,7 +3891,6 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
DCHECK_EQ(0, free_start % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
......
......@@ -77,13 +77,9 @@ class MarkingStateBase {
class MarkBitCellIterator {
public:
MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
DCHECK(Bitmap::IsCellAligned(
chunk_->AddressToMarkbitIndex(chunk_->area_start())));
DCHECK(Bitmap::IsCellAligned(
chunk_->AddressToMarkbitIndex(chunk_->area_end())));
last_cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
cell_base_ = chunk_->area_start();
cell_base_ = chunk_->address();
cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
cells_ = bitmap->cells();
......@@ -342,7 +338,10 @@ class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap_;
}
// Concurrent marking uses local live bytes.
......@@ -363,7 +362,10 @@ class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
......@@ -384,7 +386,10 @@ class MajorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
......
......@@ -122,10 +122,6 @@ class V8_EXPORT_PRIVATE Bitmap {
return index & ~kBitIndexMask;
}
V8_INLINE static bool IsCellAligned(uint32_t index) {
return (index & kBitIndexMask) == 0;
}
V8_INLINE MarkBit::CellType* cells() {
return reinterpret_cast<MarkBit::CellType*>(this);
}
......
......@@ -539,6 +539,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
chunk->marking_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
......@@ -550,14 +551,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->categories_[i] = nullptr;
}
chunk->AllocateMarkingBitmap();
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
} else {
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
chunk);
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
0);
}
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
......@@ -1189,6 +1191,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
if (IsPagedSpace()) {
Page* page = static_cast<Page*>(this);
......@@ -1330,6 +1333,17 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
void MemoryChunk::AllocateMarkingBitmap() {
DCHECK_NULL(marking_bitmap_);
marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
}
void MemoryChunk::ReleaseMarkingBitmap() {
DCHECK_NOT_NULL(marking_bitmap_);
free(marking_bitmap_);
marking_bitmap_ = nullptr;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
......
......@@ -346,18 +346,17 @@ class MemoryChunk {
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
static const size_t kMinHeaderSize =
kSizeOffset // NOLINT
+ kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
+ kPointerSize // Bitmap* marking_bitmap_
+ 3 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
+ 3 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
......@@ -383,21 +382,11 @@ class MemoryChunk {
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kPointerSize; // Bitmap* young_generation_bitmap_
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
// Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
static const size_t kHeaderSize = kMinHeaderSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
// The start offset of the object area in a page. Aligned to both maps and
// code alignment to be suitable for both. Also aligned to 32 words because
// the marking bitmap is arranged in 32 bit chunks.
static const int kObjectStartAlignment = 32 * kPointerSize;
// TODO(hpayer): Fix kObjectStartOffset and kAllocatableMemory for code pages.
static const int kObjectStartOffset =
kBodyOffset - 1 +
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
kHeaderSize + (kPointerSize - kHeaderSize % kPointerSize);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
......@@ -524,6 +513,9 @@ class MemoryChunk {
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
void AllocateMarkingBitmap();
void ReleaseMarkingBitmap();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
......@@ -663,13 +655,15 @@ class MemoryChunk {
size_t size_;
uintptr_t flags_;
// Start and end of allocatable memory on this chunk.
Address area_start_;
Address area_end_;
Bitmap* marking_bitmap_;
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
// Start and end of allocatable memory on this chunk.
Address area_start_;
Address area_end_;
// The space owning this memory chunk.
std::atomic<Space*> owner_;
......
......@@ -249,7 +249,6 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
DCHECK_EQ(0, free_start % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
......
......@@ -36,16 +36,21 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
Handle<FixedArray> array;
for (int allocated = 0; allocated != (Page::kAllocatableMemory - remainder);
allocated += array->Size()) {
if (allocated == (Page::kAllocatableMemory - kArraySize)) {
array = isolate->factory()->NewFixedArray(
heap::FixedArrayLenFromSize(kArraySize - remainder), TENURED);
CHECK_EQ(kArraySize - remainder, array->Size());
int allocated = 0;
do {
if (allocated + kArraySize * 2 > MemoryChunk::kAllocatableMemory) {
int size =
kArraySize * 2 -
((allocated + kArraySize * 2) - MemoryChunk::kAllocatableMemory) -
remainder;
int last_array_len = heap::FixedArrayLenFromSize(size);
array = isolate->factory()->NewFixedArray(last_array_len, TENURED);
CHECK_EQ(size, array->Size());
allocated += array->Size() + remainder;
} else {
array = isolate->factory()->NewFixedArray(kArrayLen, TENURED);
allocated += array->Size();
CHECK_EQ(kArraySize, array->Size());
}
if (handles.empty()) {
......@@ -54,7 +59,7 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Page::FromAddress(array->address())->area_start());
}
handles.push_back(array);
}
} while (allocated < MemoryChunk::kAllocatableMemory);
return handles;
}
......
......@@ -6128,7 +6128,7 @@ size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) {
double factor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
// Some tables (e.g. deoptimization table) are allocated directly with the
// memory allocator. Allow some slack to account for them.
size_t slack = 1 * MB;
size_t slack = 5 * MB;
return static_cast<size_t>(capacity * factor) + slack;
}
......
......@@ -35,7 +35,6 @@ Page* HeapTester::AllocateByteArraysOnPage(
CHECK(AllocateByteArrayForTest(heap, kLength, TENURED).To(&byte_array));
byte_arrays->push_back(byte_array);
page = Page::FromAddress(byte_array->address());
CHECK_EQ(page->area_size() % kSize, 0u);
size_t n = page->area_size() / kSize;
for (size_t i = 1; i < n; i++) {
CHECK(AllocateByteArrayForTest(heap, kLength, TENURED).To(&byte_array));
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment