Commit a8471820 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Initialize pages with placement-new

Define ctors for BasicMemoryChunk, ReadOnlyPage, MemoryChunk, Page and
LargePage. We can use those with placement-new to initialize pages. We
now initialize chunks at once either for ReadOnlyPage, Page or
LargePage. Previously initialization happened in multiple locations
starting with BasicMemoryChunk::Initialize.

Adding ctors to these classes should improve debugging, since debug
info for classes without ctors was removed with the compiler flag
`-fuse-ctor-homing`.

Change-Id: Ib842bb9b1e93a6576cad8299b7c5dbfe299baa33
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3545092Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79638}
parent bd7f4823
......@@ -910,7 +910,7 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum PageSize { kRegular, kLarge };
enum class PageSize { kRegular, kLarge };
enum class CodeFlushMode {
kFlushBytecode,
......
......@@ -9,6 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/objects/heap-object.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
......@@ -49,32 +50,19 @@ constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
constexpr BasicMemoryChunk::MainThreadFlags
BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
Address area_end) {
size_ = size;
area_start_ = area_start;
area_end_ = area_end;
}
// static
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
size_t size, Address area_start,
Address area_end,
BaseSpace* owner,
VirtualMemory reservation) {
BasicMemoryChunk* chunk = FromAddress(base);
DCHECK_EQ(base, chunk->address());
new (chunk) BasicMemoryChunk(size, area_start, area_end);
chunk->heap_ = heap;
chunk->set_owner(owner);
chunk->reservation_ = std::move(reservation);
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
return chunk;
BasicMemoryChunk::BasicMemoryChunk(Heap* heap, BaseSpace* space,
size_t chunk_size, Address area_start,
Address area_end, VirtualMemory reservation)
: size_(chunk_size),
heap_(heap),
area_start_(area_start),
area_end_(area_end),
allocated_bytes_(area_end - area_start),
wasted_memory_(0),
high_water_mark_(area_start - reinterpret_cast<Address>(this)),
owner_(space),
reservation_(std::move(reservation)) {
marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
}
bool BasicMemoryChunk::InOldSpace() const {
......
......@@ -129,7 +129,9 @@ class BasicMemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
BasicMemoryChunk(size_t size, Address area_start, Address area_end);
BasicMemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation);
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
......@@ -178,7 +180,7 @@ class BasicMemoryChunk {
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
// Set or clear multiple flags at a time. `mask` indicates which flags are
// should be replaced with new `flags`.
void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
void SetFlags(MainThreadFlags flags, MainThreadFlags mask = kAllFlagsMask) {
main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
}
......@@ -254,11 +256,6 @@ class BasicMemoryChunk {
return addr >= area_start() && addr <= area_end();
}
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
BaseSpace* owner,
VirtualMemory reservation);
size_t wasted_memory() const { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() const { return allocated_bytes_; }
......
......@@ -30,6 +30,21 @@ namespace internal {
// order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
LargePage::LargePage(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation, Executability executable)
: MemoryChunk(heap, space, chunk_size, area_start, area_end,
std::move(reservation), executable, PageSize::kLarge) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
if (executable && chunk_size > LargePage::kMaxCodePageSize) {
FATAL("Code page is too large.");
}
SetFlag(MemoryChunk::LARGE_PAGE);
list_node().Initialize();
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
......@@ -107,7 +122,8 @@ void LargeObjectSpace::TearDown() {
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
page);
}
}
......@@ -195,7 +211,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
this, object_size, executable);
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
......@@ -324,7 +340,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
}
current = next_current;
}
......@@ -544,7 +561,8 @@ void NewLargeObjectSpace::FreeDeadObjects(
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
......
......@@ -31,6 +31,10 @@ class LargePage : public MemoryChunk {
// already imposes on x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
LargePage(Heap* heap, BaseSpace* space, size_t chunk_size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable);
static LargePage* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
......
This diff is collapsed.
......@@ -17,6 +17,8 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-range.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
......@@ -44,15 +46,15 @@ class MemoryAllocator {
Unmapper(Heap* heap, MemoryAllocator* allocator)
: heap_(heap), allocator_(allocator) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
chunks_[ChunkQueueType::kRegular].reserve(kReservedQueueingSlots);
chunks_[ChunkQueueType::kPooled].reserve(kReservedQueueingSlots);
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe(kRegular, chunk);
AddMemoryChunkSafe(ChunkQueueType::kRegular, chunk);
} else {
AddMemoryChunkSafe(kNonRegular, chunk);
AddMemoryChunkSafe(ChunkQueueType::kNonRegular, chunk);
}
}
......@@ -62,9 +64,9 @@ class MemoryAllocator {
// been uncommitted.
// (2) Try to steal any memory chunk of kPageSize that would've been
// uncommitted.
MemoryChunk* chunk = GetMemoryChunkSafe(kPooled);
MemoryChunk* chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled);
if (chunk == nullptr) {
chunk = GetMemoryChunkSafe(kRegular);
chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular);
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllAllocatedMemory();
......@@ -126,13 +128,13 @@ class MemoryAllocator {
Heap* const heap_;
MemoryAllocator* const allocator_;
base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
std::vector<MemoryChunk*> chunks_[ChunkQueueType::kNumberOfChunkQueues];
std::unique_ptr<v8::JobHandle> job_handle_;
friend class MemoryAllocator;
};
enum AllocationMode {
enum class AllocationMode {
// Regular allocation path. Does not use pool.
kRegular,
......@@ -140,7 +142,7 @@ class MemoryAllocator {
kUsePool,
};
enum FreeMode {
enum class FreeMode {
// Frees page immediately on the main thread.
kImmediately,
......@@ -182,13 +184,14 @@ class MemoryAllocator {
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
// should be tried first.
V8_EXPORT_PRIVATE Page* AllocatePage(
MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner,
MemoryAllocator::AllocationMode alloc_mode, Space* space,
Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
V8_EXPORT_PRIVATE LargePage* AllocateLargePage(LargeObjectSpace* space,
size_t object_size,
Executability executable);
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
......@@ -216,15 +219,6 @@ class MemoryAllocator {
address >= highest_ever_allocated_;
}
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
PageSize page_size,
BaseSpace* space);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so
......@@ -265,14 +259,29 @@ class MemoryAllocator {
void UnregisterReadOnlyPage(ReadOnlyPage* page);
private:
// Returns a BasicMemoryChunk in which the memory region from commit_area_size
// to reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
size_t reserve_area_size, size_t commit_area_size,
Executability executable, BaseSpace* space);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
// Used to store all data about MemoryChunk allocation, e.g. in
// AllocateUninitializedChunk.
struct MemoryChunkAllocationResult {
void* start;
size_t size;
size_t area_start;
size_t area_end;
VirtualMemory reservation;
};
// Computes the size of a MemoryChunk from the size of the object_area and
// whether the chunk is executable or not.
static size_t ComputeChunkSize(size_t area_size, Executability executable);
// Internal allocation method for all pages/memory chunks. Returns data about
// the unintialized memory region.
V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
Executability executable, PageSize page_size);
// Internal raw allocation method that allocates an aligned MemoryChunk and
// sets the right memory permissions.
Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
size_t alignment, Executability executable,
void* hint, VirtualMemory* controller);
......@@ -280,10 +289,11 @@ class MemoryAllocator {
// it succeeded and false otherwise.
bool CommitMemory(VirtualMemory* reservation);
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size);
// Sets memory permissions on executable memory chunks. This entails page
// header (RW), guard pages (no access) and the object area (code modification
// permissions).
V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(
VirtualMemory* vm, Address start, size_t area_size, size_t reserved_size);
// Disallows any access on memory region owned by given reservation object.
// Returns true if it succeeded and false otherwise.
......@@ -304,7 +314,8 @@ class MemoryAllocator {
// See AllocatePage for public interface. Note that currently we only
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
MemoryChunk* AllocatePagePooled(Space* owner);
base::Optional<MemoryChunkAllocationResult> AllocateUninitializedPageFromPool(
Space* space);
// Frees a pooled page. Only used on tear-down and last-resort GCs.
void FreePooledChunk(MemoryChunk* chunk);
......@@ -314,7 +325,7 @@ class MemoryAllocator {
// collector to rebuild page headers in the from space, which is
// used as a marking stack and its page headers are destroyed.
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
PagedSpace* space);
void UpdateAllocatedSpaceLimits(Address low, Address high) {
// The use of atomic primitives does not guarantee correctness (wrt.
......@@ -385,7 +396,7 @@ class MemoryAllocator {
std::atomic<Address> lowest_ever_allocated_;
std::atomic<Address> highest_ever_allocated_;
VirtualMemory last_chunk_;
base::Optional<VirtualMemory> reserved_chunk_at_virtual_memory_limit_;
Unmapper unmapper_;
#ifdef DEBUG
......
......@@ -4,6 +4,7 @@
#include "src/heap/memory-chunk-layout.h"
#include "src/common/globals.h"
#include "src/heap/marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
......@@ -46,7 +47,7 @@ intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
if (space == CODE_SPACE || space == CODE_LO_SPACE) {
return ObjectStartOffsetInCodePage();
}
return ObjectStartOffsetInDataPage();
......
......@@ -7,6 +7,7 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
......@@ -118,92 +119,83 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Executability executable,
PageSize page_size) {
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_SHARED],
nullptr);
MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation, Executability executable,
PageSize page_size)
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
std::move(reservation)) {
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_SHARED], nullptr);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
nullptr);
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
}
base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_SHARED],
base::AsAtomicPointer::Release_Store(&sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
nullptr);
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
invalidated_slots_[OLD_TO_NEW] = nullptr;
invalidated_slots_[OLD_TO_OLD] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// Not actually used but initialize anyway for predictability.
chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
invalidated_slots_[OLD_TO_CODE] = nullptr;
}
chunk->progress_bar_.Initialize();
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
chunk->young_generation_bitmap_ = nullptr;
progress_bar_.Initialize();
set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
page_protection_change_mutex_ = new base::Mutex();
write_unprotect_counter_ = 0;
mutex_ = new base::Mutex();
young_generation_bitmap_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
0;
chunk->external_backing_store_bytes_
[ExternalBackingStoreType::kExternalString] = 0;
external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
chunk->categories_ = nullptr;
categories_ = nullptr;
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
0);
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) {
chunk->write_unprotect_counter_ =
write_unprotect_counter_ =
heap->code_space_memory_modification_scope_depth();
} else {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(chunk->area_start(), page_size));
size_t area_size =
RoundUp(chunk->area_end() - chunk->area_start(), page_size);
CHECK(chunk->reservation_.SetPermissions(
chunk->area_start(), area_size, DefaultWritableCodePermissions()));
DCHECK(IsAligned(area_start_, page_size));
size_t area_size = RoundUp(area_end_ - area_start_, page_size);
CHECK(reservation_.SetPermissions(area_start_, area_size,
DefaultWritableCodePermissions()));
}
}
if (chunk->owner()->identity() == CODE_SPACE) {
chunk->code_object_registry_ = new CodeObjectRegistry();
if (owner()->identity() == CODE_SPACE) {
code_object_registry_ = new CodeObjectRegistry();
} else {
chunk->code_object_registry_ = nullptr;
code_object_registry_ = nullptr;
}
chunk->possibly_empty_buckets_.Initialize();
possibly_empty_buckets_.Initialize();
if (page_size == PageSize::kRegular) {
chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
MemoryAllocator::GetCommitPageSizeBits(),
chunk->size());
active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
MemoryAllocator::GetCommitPageSizeBits(), size());
} else {
// We do not track active system pages for large pages.
chunk->active_system_pages_.Clear();
active_system_pages_.Clear();
}
// All pages of a shared heap need to be marked with this flag.
if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
if (heap->IsShared()) SetFlag(MemoryChunk::IN_SHARED_HEAP);
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
#endif
#ifdef DEBUG
ValidateOffsets(chunk);
ValidateOffsets(this);
#endif
return chunk;
}
size_t MemoryChunk::CommittedPhysicalMemory() {
......
......@@ -53,6 +53,10 @@ class MemoryChunk : public BasicMemoryChunk {
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
MemoryChunk(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable, PageSize page_size);
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return cast(BasicMemoryChunk::FromAddress(a));
......@@ -219,9 +223,6 @@ class MemoryChunk : public BasicMemoryChunk {
#endif
protected:
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Executability executable, PageSize page_size);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
void ReleaseAllAllocatedMemory();
......
......@@ -70,8 +70,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
current_page);
heap()->memory_allocator()->Free(
MemoryAllocator::FreeMode::kConcurrentlyAndPool, current_page);
current_page = next_current;
}
......@@ -81,16 +81,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
while (actual_pages < expected_pages) {
actual_pages++;
current_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool,
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
AccountCommitted(Page::kPageSize);
IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
current_page->SetFlags(first_page()->GetFlags());
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
......@@ -128,8 +126,7 @@ bool SemiSpace::Commit() {
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool,
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
DCHECK(!IsCommitted());
......@@ -155,8 +152,8 @@ bool SemiSpace::Uncommit() {
MemoryChunk* chunk = memory_chunk_list_.front();
DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory());
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
chunk);
heap()->memory_allocator()->Free(
MemoryAllocator::FreeMode::kConcurrentlyAndPool, chunk);
}
current_page_ = nullptr;
current_capacity_ = 0;
......@@ -191,8 +188,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool,
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
......@@ -215,8 +211,8 @@ void SemiSpace::RewindPages(int num_pages) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory());
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
last);
heap()->memory_allocator()->Free(
MemoryAllocator::FreeMode::kConcurrentlyAndPool, last);
num_pages--;
}
}
......@@ -278,7 +274,7 @@ void SemiSpace::RemovePage(Page* page) {
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
page->SetFlags(current_page()->GetFlags());
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
......
......@@ -107,7 +107,8 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
chunk);
}
accounting_stats_.Clear();
}
......@@ -351,13 +352,9 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
}
Page* PagedSpace::AllocatePage() {
return heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kRegular, AreaSize(), this, executable());
}
Page* PagedSpace::Expand() {
Page* page = AllocatePage();
Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return nullptr;
ConcurrentAllocationMutex guard(this);
AddPage(page);
......@@ -368,7 +365,8 @@ Page* PagedSpace::Expand() {
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
size_t size_in_bytes) {
Page* page = AllocatePage();
Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
AddPage(page);
......@@ -528,7 +526,8 @@ void PagedSpace::ReleasePage(Page* page) {
AccountUncommitted(page->size());
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
page);
}
void PagedSpace::SetReadable() {
......
......@@ -391,8 +391,6 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<std::pair<Address, size_t>> ExpandBackground(
size_t size_in_bytes);
Page* AllocatePage();
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
......
......@@ -329,6 +329,19 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
}
ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation)
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
std::move(reservation)) {
allocated_bytes_ = 0;
SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(this)
->MarkAllBits();
}
void ReadOnlyPage::MakeHeaderRelocatable() {
heap_ = nullptr;
owner_ = nullptr;
......@@ -612,7 +625,7 @@ void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
FreeLinearAllocationArea();
BasicMemoryChunk* chunk =
heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
heap()->memory_allocator()->AllocateReadOnlyPage(this);
capacity_ += AreaSize();
accounting_stats_.IncreaseCapacity(chunk->area_size());
......@@ -754,20 +767,6 @@ void ReadOnlySpace::ShrinkPages() {
limit_ = pages_.back()->area_end();
}
ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
page->allocated_bytes_ = 0;
page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
heap()
->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
return page;
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
......
......@@ -26,6 +26,9 @@ class SnapshotData;
class ReadOnlyPage : public BasicMemoryChunk {
public:
ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end, VirtualMemory reservation);
// Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable.
void MakeHeaderRelocatable();
......@@ -229,8 +232,6 @@ class ReadOnlySpace : public BaseSpace {
// Return size of allocatable area on a page in this space.
int AreaSize() const { return static_cast<int>(area_size_); }
ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
Address FirstPageAddress() const { return pages_.front()->address(); }
protected:
......
......@@ -50,6 +50,12 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
// static
constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
Page::Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable)
: MemoryChunk(heap, space, size, area_start, area_end,
std::move(reservation), executable, PageSize::kRegular) {}
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
......
......@@ -224,6 +224,9 @@ class Page : public MemoryChunk {
MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation, Executability executable);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
......
......@@ -113,9 +113,8 @@ class V8_NODISCARD TestCodePageAllocatorScope {
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size,
Executability executable, PageSize page_size,
Space* space) {
size_t area_size, Executability executable,
PageSize page_size, LargeObjectSpace* space) {
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestCodePageAllocatorScope test_code_page_allocator_scope(
......@@ -129,23 +128,23 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
size_t guard_size =
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, page_size, space);
MemoryChunk* memory_chunk =
memory_allocator->AllocateLargePage(space, area_size, executable);
size_t reserved_size =
((executable == EXECUTABLE))
? allocatable_memory_area_offset +
RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
RoundUp(area_size, page_allocator->CommitPageSize()) +
guard_size
: RoundUp(allocatable_memory_area_offset + reserve_area_size,
: RoundUp(allocatable_memory_area_offset + area_size,
page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size());
CHECK(memory_chunk->area_end() <=
memory_chunk->address() + memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
CHECK(static_cast<size_t>(memory_chunk->area_size()) == area_size);
memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk);
memory_allocator->Free(MemoryAllocator::FreeMode::kImmediately, memory_chunk);
}
static unsigned int PseudorandomAreaSize() {
......@@ -160,12 +159,10 @@ TEST(MemoryChunk) {
Heap* heap = isolate->heap();
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size;
size_t area_size;
for (int i = 0; i < 100; i++) {
initial_commit_area_size =
area_size =
RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
// With CodeRange.
......@@ -179,13 +176,11 @@ TEST(MemoryChunk) {
code_range_reservation.size(), MemoryChunk::kAlignment,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, EXECUTABLE, PageSize::kLarge,
heap->code_space());
VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
EXECUTABLE, PageSize::kLarge, heap->code_lo_space());
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, NOT_EXECUTABLE,
PageSize::kLarge, heap->old_space());
VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
NOT_EXECUTABLE, PageSize::kLarge, heap->lo_space());
}
}
......@@ -203,7 +198,7 @@ TEST(MemoryAllocator) {
CHECK(!faked_space.first_page());
CHECK(!faked_space.last_page());
Page* first_page = memory_allocator->AllocatePage(
MemoryAllocator::kRegular, faked_space.AreaSize(),
MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
faked_space.memory_chunk_list().PushBack(first_page);
......@@ -216,7 +211,7 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
MemoryAllocator::kRegular, faked_space.AreaSize(),
MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
total_pages++;
faked_space.memory_chunk_list().PushBack(other);
......@@ -813,7 +808,7 @@ TEST(NoMemoryForNewPage) {
LinearAllocationArea allocation_info;
OldSpace faked_space(heap, &allocation_info);
Page* page = memory_allocator->AllocatePage(
MemoryAllocator::kRegular, faked_space.AreaSize(),
MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
CHECK_NULL(page);
......
......@@ -311,16 +311,15 @@ bool SequentialUnmapperTest::old_flag_;
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
MemoryAllocator::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
Page* page =
allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
......@@ -341,17 +340,16 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
MemoryAllocator::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
Page* page =
allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment