Commit a8471820 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Initialize pages with placement-new

Define ctors for BasicMemoryChunk, ReadOnlyPage, MemoryChunk, Page and
LargePage. We can use those with placement-new to initialize pages. We
now initialize chunks at once either for ReadOnlyPage, Page or
LargePage. Previously initialization happened in multiple locations
starting with BasicMemoryChunk::Initialize.

Adding ctors to these classes should improve debugging, since debug
info for classes without ctors was removed with the compiler flag
`-fuse-ctor-homing`.

Change-Id: Ib842bb9b1e93a6576cad8299b7c5dbfe299baa33
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3545092Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79638}
parent bd7f4823
...@@ -910,7 +910,7 @@ enum class CompactionSpaceKind { ...@@ -910,7 +910,7 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE }; enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum PageSize { kRegular, kLarge }; enum class PageSize { kRegular, kLarge };
enum class CodeFlushMode { enum class CodeFlushMode {
kFlushBytecode, kFlushBytecode,
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/objects/heap-object.h" #include "src/objects/heap-object.h"
#include "src/utils/allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -49,32 +50,19 @@ constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask; ...@@ -49,32 +50,19 @@ constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
constexpr BasicMemoryChunk::MainThreadFlags constexpr BasicMemoryChunk::MainThreadFlags
BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask; BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start, BasicMemoryChunk::BasicMemoryChunk(Heap* heap, BaseSpace* space,
Address area_end) { size_t chunk_size, Address area_start,
size_ = size; Address area_end, VirtualMemory reservation)
area_start_ = area_start; : size_(chunk_size),
area_end_ = area_end; heap_(heap),
} area_start_(area_start),
area_end_(area_end),
// static allocated_bytes_(area_end - area_start),
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base, wasted_memory_(0),
size_t size, Address area_start, high_water_mark_(area_start - reinterpret_cast<Address>(this)),
Address area_end, owner_(space),
BaseSpace* owner, reservation_(std::move(reservation)) {
VirtualMemory reservation) { marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
BasicMemoryChunk* chunk = FromAddress(base);
DCHECK_EQ(base, chunk->address());
new (chunk) BasicMemoryChunk(size, area_start, area_end);
chunk->heap_ = heap;
chunk->set_owner(owner);
chunk->reservation_ = std::move(reservation);
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
return chunk;
} }
bool BasicMemoryChunk::InOldSpace() const { bool BasicMemoryChunk::InOldSpace() const {
......
...@@ -129,7 +129,9 @@ class BasicMemoryChunk { ...@@ -129,7 +129,9 @@ class BasicMemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1; static const intptr_t kAlignmentMask = kAlignment - 1;
BasicMemoryChunk(size_t size, Address area_start, Address area_end); BasicMemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation);
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; } static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
...@@ -178,7 +180,7 @@ class BasicMemoryChunk { ...@@ -178,7 +180,7 @@ class BasicMemoryChunk {
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; } void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
// Set or clear multiple flags at a time. `mask` indicates which flags are // Set or clear multiple flags at a time. `mask` indicates which flags are
// should be replaced with new `flags`. // should be replaced with new `flags`.
void SetFlags(MainThreadFlags flags, MainThreadFlags mask) { void SetFlags(MainThreadFlags flags, MainThreadFlags mask = kAllFlagsMask) {
main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask); main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
} }
...@@ -254,11 +256,6 @@ class BasicMemoryChunk { ...@@ -254,11 +256,6 @@ class BasicMemoryChunk {
return addr >= area_start() && addr <= area_end(); return addr >= area_start() && addr <= area_end();
} }
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
BaseSpace* owner,
VirtualMemory reservation);
size_t wasted_memory() const { return wasted_memory_; } size_t wasted_memory() const { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; } void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() const { return allocated_bytes_; } size_t allocated_bytes() const { return allocated_bytes_; }
......
...@@ -30,6 +30,21 @@ namespace internal { ...@@ -30,6 +30,21 @@ namespace internal {
// order to figure out if it's a cleared weak reference or not. // order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize); STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
LargePage::LargePage(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation, Executability executable)
: MemoryChunk(heap, space, chunk_size, area_start, area_end,
std::move(reservation), executable, PageSize::kLarge) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
if (executable && chunk_size > LargePage::kMaxCodePageSize) {
FATAL("Code page is too large.");
}
SetFlag(MemoryChunk::LARGE_PAGE);
list_node().Initialize();
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk, LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable) { Executability executable) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) { if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
...@@ -107,7 +122,8 @@ void LargeObjectSpace::TearDown() { ...@@ -107,7 +122,8 @@ void LargeObjectSpace::TearDown() {
DeleteEvent("LargeObjectChunk", DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address()))); reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page); memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page); heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
page);
} }
} }
...@@ -195,7 +211,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground( ...@@ -195,7 +211,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LargePage* LargeObjectSpace::AllocateLargePage(int object_size, LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) { Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage( LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable); this, object_size, executable);
if (page == nullptr) return nullptr; if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size)); DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
...@@ -324,7 +340,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -324,7 +340,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
} }
} else { } else {
RemovePage(current, size); RemovePage(current, size);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current); heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
} }
current = next_current; current = next_current;
} }
...@@ -544,7 +561,8 @@ void NewLargeObjectSpace::FreeDeadObjects( ...@@ -544,7 +561,8 @@ void NewLargeObjectSpace::FreeDeadObjects(
if (is_dead(object)) { if (is_dead(object)) {
freed_pages = true; freed_pages = true;
RemovePage(page, size); RemovePage(page, size);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page); heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
page);
if (FLAG_concurrent_marking && is_marking) { if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page); heap()->concurrent_marking()->ClearMemoryChunkData(page);
} }
......
...@@ -31,6 +31,10 @@ class LargePage : public MemoryChunk { ...@@ -31,6 +31,10 @@ class LargePage : public MemoryChunk {
// already imposes on x64 and ia32 architectures. // already imposes on x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB; static const int kMaxCodePageSize = 512 * MB;
LargePage(Heap* heap, BaseSpace* space, size_t chunk_size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable);
static LargePage* FromHeapObject(HeapObject o) { static LargePage* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o)); return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
......
This diff is collapsed.
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-range.h" #include "src/heap/code-range.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
...@@ -44,15 +46,15 @@ class MemoryAllocator { ...@@ -44,15 +46,15 @@ class MemoryAllocator {
Unmapper(Heap* heap, MemoryAllocator* allocator) Unmapper(Heap* heap, MemoryAllocator* allocator)
: heap_(heap), allocator_(allocator) { : heap_(heap), allocator_(allocator) {
chunks_[kRegular].reserve(kReservedQueueingSlots); chunks_[ChunkQueueType::kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots); chunks_[ChunkQueueType::kPooled].reserve(kReservedQueueingSlots);
} }
void AddMemoryChunkSafe(MemoryChunk* chunk) { void AddMemoryChunkSafe(MemoryChunk* chunk) {
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) { if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe(kRegular, chunk); AddMemoryChunkSafe(ChunkQueueType::kRegular, chunk);
} else { } else {
AddMemoryChunkSafe(kNonRegular, chunk); AddMemoryChunkSafe(ChunkQueueType::kNonRegular, chunk);
} }
} }
...@@ -62,9 +64,9 @@ class MemoryAllocator { ...@@ -62,9 +64,9 @@ class MemoryAllocator {
// been uncommitted. // been uncommitted.
// (2) Try to steal any memory chunk of kPageSize that would've been // (2) Try to steal any memory chunk of kPageSize that would've been
// uncommitted. // uncommitted.
MemoryChunk* chunk = GetMemoryChunkSafe(kPooled); MemoryChunk* chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled);
if (chunk == nullptr) { if (chunk == nullptr) {
chunk = GetMemoryChunkSafe(kRegular); chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular);
if (chunk != nullptr) { if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory. // For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllAllocatedMemory(); chunk->ReleaseAllAllocatedMemory();
...@@ -126,13 +128,13 @@ class MemoryAllocator { ...@@ -126,13 +128,13 @@ class MemoryAllocator {
Heap* const heap_; Heap* const heap_;
MemoryAllocator* const allocator_; MemoryAllocator* const allocator_;
base::Mutex mutex_; base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues]; std::vector<MemoryChunk*> chunks_[ChunkQueueType::kNumberOfChunkQueues];
std::unique_ptr<v8::JobHandle> job_handle_; std::unique_ptr<v8::JobHandle> job_handle_;
friend class MemoryAllocator; friend class MemoryAllocator;
}; };
enum AllocationMode { enum class AllocationMode {
// Regular allocation path. Does not use pool. // Regular allocation path. Does not use pool.
kRegular, kRegular,
...@@ -140,7 +142,7 @@ class MemoryAllocator { ...@@ -140,7 +142,7 @@ class MemoryAllocator {
kUsePool, kUsePool,
}; };
enum FreeMode { enum class FreeMode {
// Frees page immediately on the main thread. // Frees page immediately on the main thread.
kImmediately, kImmediately,
...@@ -182,13 +184,14 @@ class MemoryAllocator { ...@@ -182,13 +184,14 @@ class MemoryAllocator {
// whether pooled allocation, which only works for MemoryChunk::kPageSize, // whether pooled allocation, which only works for MemoryChunk::kPageSize,
// should be tried first. // should be tried first.
V8_EXPORT_PRIVATE Page* AllocatePage( V8_EXPORT_PRIVATE Page* AllocatePage(
MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner, MemoryAllocator::AllocationMode alloc_mode, Space* space,
Executability executable); Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner, V8_EXPORT_PRIVATE LargePage* AllocateLargePage(LargeObjectSpace* space,
size_t object_size,
Executability executable); Executability executable);
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner); ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage( std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address); ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
...@@ -216,15 +219,6 @@ class MemoryAllocator { ...@@ -216,15 +219,6 @@ class MemoryAllocator {
address >= highest_ever_allocated_; address >= highest_ever_allocated_;
} }
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
PageSize page_size,
BaseSpace* space);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation. // internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so // Additional memory beyond the page is not accounted though, so
...@@ -265,14 +259,29 @@ class MemoryAllocator { ...@@ -265,14 +259,29 @@ class MemoryAllocator {
void UnregisterReadOnlyPage(ReadOnlyPage* page); void UnregisterReadOnlyPage(ReadOnlyPage* page);
private: private:
// Returns a BasicMemoryChunk in which the memory region from commit_area_size // Used to store all data about MemoryChunk allocation, e.g. in
// to reserve_area_size of the chunk area is reserved but not committed, it // AllocateUninitializedChunk.
// could be committed later by calling MemoryChunk::CommitArea. struct MemoryChunkAllocationResult {
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk( void* start;
size_t reserve_area_size, size_t commit_area_size, size_t size;
Executability executable, BaseSpace* space); size_t area_start;
size_t area_end;
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, VirtualMemory reservation;
};
// Computes the size of a MemoryChunk from the size of the object_area and
// whether the chunk is executable or not.
static size_t ComputeChunkSize(size_t area_size, Executability executable);
// Internal allocation method for all pages/memory chunks. Returns data about
// the unintialized memory region.
V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
Executability executable, PageSize page_size);
// Internal raw allocation method that allocates an aligned MemoryChunk and
// sets the right memory permissions.
Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
size_t alignment, Executability executable, size_t alignment, Executability executable,
void* hint, VirtualMemory* controller); void* hint, VirtualMemory* controller);
...@@ -280,10 +289,11 @@ class MemoryAllocator { ...@@ -280,10 +289,11 @@ class MemoryAllocator {
// it succeeded and false otherwise. // it succeeded and false otherwise.
bool CommitMemory(VirtualMemory* reservation); bool CommitMemory(VirtualMemory* reservation);
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm, // Sets memory permissions on executable memory chunks. This entails page
Address start, // header (RW), guard pages (no access) and the object area (code modification
size_t commit_size, // permissions).
size_t reserved_size); V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(
VirtualMemory* vm, Address start, size_t area_size, size_t reserved_size);
// Disallows any access on memory region owned by given reservation object. // Disallows any access on memory region owned by given reservation object.
// Returns true if it succeeded and false otherwise. // Returns true if it succeeded and false otherwise.
...@@ -304,7 +314,8 @@ class MemoryAllocator { ...@@ -304,7 +314,8 @@ class MemoryAllocator {
// See AllocatePage for public interface. Note that currently we only // See AllocatePage for public interface. Note that currently we only
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
MemoryChunk* AllocatePagePooled(Space* owner); base::Optional<MemoryChunkAllocationResult> AllocateUninitializedPageFromPool(
Space* space);
// Frees a pooled page. Only used on tear-down and last-resort GCs. // Frees a pooled page. Only used on tear-down and last-resort GCs.
void FreePooledChunk(MemoryChunk* chunk); void FreePooledChunk(MemoryChunk* chunk);
...@@ -314,7 +325,7 @@ class MemoryAllocator { ...@@ -314,7 +325,7 @@ class MemoryAllocator {
// collector to rebuild page headers in the from space, which is // collector to rebuild page headers in the from space, which is
// used as a marking stack and its page headers are destroyed. // used as a marking stack and its page headers are destroyed.
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner); PagedSpace* space);
void UpdateAllocatedSpaceLimits(Address low, Address high) { void UpdateAllocatedSpaceLimits(Address low, Address high) {
// The use of atomic primitives does not guarantee correctness (wrt. // The use of atomic primitives does not guarantee correctness (wrt.
...@@ -385,7 +396,7 @@ class MemoryAllocator { ...@@ -385,7 +396,7 @@ class MemoryAllocator {
std::atomic<Address> lowest_ever_allocated_; std::atomic<Address> lowest_ever_allocated_;
std::atomic<Address> highest_ever_allocated_; std::atomic<Address> highest_ever_allocated_;
VirtualMemory last_chunk_; base::Optional<VirtualMemory> reserved_chunk_at_virtual_memory_limit_;
Unmapper unmapper_; Unmapper unmapper_;
#ifdef DEBUG #ifdef DEBUG
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "src/heap/memory-chunk-layout.h" #include "src/heap/memory-chunk-layout.h"
#include "src/common/globals.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
#include "src/heap/memory-allocator.h" #include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
...@@ -46,7 +47,7 @@ intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() { ...@@ -46,7 +47,7 @@ intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk( size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) { AllocationSpace space) {
if (space == CODE_SPACE) { if (space == CODE_SPACE || space == CODE_LO_SPACE) {
return ObjectStartOffsetInCodePage(); return ObjectStartOffsetInCodePage();
} }
return ObjectStartOffsetInDataPage(); return ObjectStartOffsetInDataPage();
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h" #include "src/base/platform/wrappers.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h" #include "src/heap/code-object-registry.h"
#include "src/heap/memory-allocator.h" #include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h" #include "src/heap/memory-chunk-inl.h"
...@@ -118,92 +119,83 @@ PageAllocator::Permission DefaultWritableCodePermissions() { ...@@ -118,92 +119,83 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace } // namespace
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap, MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
Executability executable, Address area_start, Address area_end,
PageSize page_size) { VirtualMemory reservation, Executability executable,
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk); PageSize page_size)
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr); std::move(reservation)) {
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr); base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_SHARED], base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_OLD], nullptr);
nullptr); base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_SHARED], nullptr);
if (V8_EXTERNAL_CODE_SPACE_BOOL) { if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE], base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
nullptr);
} }
base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr); base::AsAtomicPointer::Release_Store(&sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW], base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
nullptr); base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD], base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
nullptr); nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_SHARED], invalidated_slots_[OLD_TO_NEW] = nullptr;
nullptr); invalidated_slots_[OLD_TO_OLD] = nullptr;
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) { if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// Not actually used but initialize anyway for predictability. // Not actually used but initialize anyway for predictability.
chunk->invalidated_slots_[OLD_TO_CODE] = nullptr; invalidated_slots_[OLD_TO_CODE] = nullptr;
} }
chunk->progress_bar_.Initialize(); progress_bar_.Initialize();
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone); set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex(); page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0; write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex(); mutex_ = new base::Mutex();
chunk->young_generation_bitmap_ = nullptr; young_generation_bitmap_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
0; external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
chunk->external_backing_store_bytes_
[ExternalBackingStoreType::kExternalString] = 0;
chunk->categories_ = nullptr; categories_ = nullptr;
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk, heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
0); 0);
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE); SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) { if (heap->write_protect_code_memory()) {
chunk->write_unprotect_counter_ = write_unprotect_counter_ =
heap->code_space_memory_modification_scope_depth(); heap->code_space_memory_modification_scope_depth();
} else { } else {
size_t page_size = MemoryAllocator::GetCommitPageSize(); size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(chunk->area_start(), page_size)); DCHECK(IsAligned(area_start_, page_size));
size_t area_size = size_t area_size = RoundUp(area_end_ - area_start_, page_size);
RoundUp(chunk->area_end() - chunk->area_start(), page_size); CHECK(reservation_.SetPermissions(area_start_, area_size,
CHECK(chunk->reservation_.SetPermissions( DefaultWritableCodePermissions()));
chunk->area_start(), area_size, DefaultWritableCodePermissions()));
} }
} }
if (chunk->owner()->identity() == CODE_SPACE) { if (owner()->identity() == CODE_SPACE) {
chunk->code_object_registry_ = new CodeObjectRegistry(); code_object_registry_ = new CodeObjectRegistry();
} else { } else {
chunk->code_object_registry_ = nullptr; code_object_registry_ = nullptr;
} }
chunk->possibly_empty_buckets_.Initialize(); possibly_empty_buckets_.Initialize();
if (page_size == PageSize::kRegular) { if (page_size == PageSize::kRegular) {
chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize, active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
MemoryAllocator::GetCommitPageSizeBits(), MemoryAllocator::GetCommitPageSizeBits(), size());
chunk->size());
} else { } else {
// We do not track active system pages for large pages. // We do not track active system pages for large pages.
chunk->active_system_pages_.Clear(); active_system_pages_.Clear();
} }
// All pages of a shared heap need to be marked with this flag. // All pages of a shared heap need to be marked with this flag.
if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP); if (heap->IsShared()) SetFlag(MemoryChunk::IN_SHARED_HEAP);
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start()); chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
#endif #endif
#ifdef DEBUG #ifdef DEBUG
ValidateOffsets(chunk); ValidateOffsets(this);
#endif #endif
return chunk;
} }
size_t MemoryChunk::CommittedPhysicalMemory() { size_t MemoryChunk::CommittedPhysicalMemory() {
......
...@@ -53,6 +53,10 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -53,6 +53,10 @@ class MemoryChunk : public BasicMemoryChunk {
// Maximum number of nested code memory modification scopes. // Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3; static const int kMaxWriteUnprotectCounter = 3;
MemoryChunk(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable, PageSize page_size);
// Only works if the pointer is in the first kPageSize of the MemoryChunk. // Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) { static MemoryChunk* FromAddress(Address a) {
return cast(BasicMemoryChunk::FromAddress(a)); return cast(BasicMemoryChunk::FromAddress(a));
...@@ -219,9 +223,6 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -219,9 +223,6 @@ class MemoryChunk : public BasicMemoryChunk {
#endif #endif
protected: protected:
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Executability executable, PageSize page_size);
// Release all memory allocated by the chunk. Should be called when memory // Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed. // chunk is about to be freed.
void ReleaseAllAllocatedMemory(); void ReleaseAllAllocatedMemory();
......
...@@ -70,8 +70,8 @@ bool SemiSpace::EnsureCurrentCapacity() { ...@@ -70,8 +70,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Clear new space flags to avoid this page being treated as a new // Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept. // space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask); current_page->ClearFlags(Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, heap()->memory_allocator()->Free(
current_page); MemoryAllocator::FreeMode::kConcurrentlyAndPool, current_page);
current_page = next_current; current_page = next_current;
} }
...@@ -81,16 +81,14 @@ bool SemiSpace::EnsureCurrentCapacity() { ...@@ -81,16 +81,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
while (actual_pages < expected_pages) { while (actual_pages < expected_pages) {
actual_pages++; actual_pages++;
current_page = heap()->memory_allocator()->AllocatePage( current_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool, MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (current_page == nullptr) return false; if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page); DCHECK_NOT_NULL(current_page);
AccountCommitted(Page::kPageSize); AccountCommitted(Page::kPageSize);
IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory()); IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.PushBack(current_page); memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page); marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask); current_page->SetFlags(first_page()->GetFlags());
heap()->CreateFillerObjectAt(current_page->area_start(), heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()), static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
...@@ -128,8 +126,7 @@ bool SemiSpace::Commit() { ...@@ -128,8 +126,7 @@ bool SemiSpace::Commit() {
// collector. Therefore, they must be initialized with the same FreeList as // collector. Therefore, they must be initialized with the same FreeList as
// old pages. // old pages.
Page* new_page = heap()->memory_allocator()->AllocatePage( Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool, MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) { if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added); if (pages_added) RewindPages(pages_added);
DCHECK(!IsCommitted()); DCHECK(!IsCommitted());
...@@ -155,8 +152,8 @@ bool SemiSpace::Uncommit() { ...@@ -155,8 +152,8 @@ bool SemiSpace::Uncommit() {
MemoryChunk* chunk = memory_chunk_list_.front(); MemoryChunk* chunk = memory_chunk_list_.front();
DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory()); DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory());
memory_chunk_list_.Remove(chunk); memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, heap()->memory_allocator()->Free(
chunk); MemoryAllocator::FreeMode::kConcurrentlyAndPool, chunk);
} }
current_page_ = nullptr; current_page_ = nullptr;
current_capacity_ = 0; current_capacity_ = 0;
...@@ -191,8 +188,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) { ...@@ -191,8 +188,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) { for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page = heap()->memory_allocator()->AllocatePage( Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool, MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) { if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added); if (pages_added) RewindPages(pages_added);
return false; return false;
...@@ -215,8 +211,8 @@ void SemiSpace::RewindPages(int num_pages) { ...@@ -215,8 +211,8 @@ void SemiSpace::RewindPages(int num_pages) {
MemoryChunk* last = last_page(); MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last); memory_chunk_list_.Remove(last);
DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory()); DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory());
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, heap()->memory_allocator()->Free(
last); MemoryAllocator::FreeMode::kConcurrentlyAndPool, last);
num_pages--; num_pages--;
} }
} }
...@@ -278,7 +274,7 @@ void SemiSpace::RemovePage(Page* page) { ...@@ -278,7 +274,7 @@ void SemiSpace::RemovePage(Page* page) {
} }
void SemiSpace::PrependPage(Page* page) { void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask); page->SetFlags(current_page()->GetFlags());
page->set_owner(this); page->set_owner(this);
memory_chunk_list_.PushFront(page); memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize; current_capacity_ += Page::kPageSize;
......
...@@ -107,7 +107,8 @@ void PagedSpace::TearDown() { ...@@ -107,7 +107,8 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) { while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front(); MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk); memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk); heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
chunk);
} }
accounting_stats_.Clear(); accounting_stats_.Clear();
} }
...@@ -351,13 +352,9 @@ void PagedSpace::ShrinkImmortalImmovablePages() { ...@@ -351,13 +352,9 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
} }
} }
Page* PagedSpace::AllocatePage() {
return heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kRegular, AreaSize(), this, executable());
}
Page* PagedSpace::Expand() { Page* PagedSpace::Expand() {
Page* page = AllocatePage(); Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return nullptr; if (page == nullptr) return nullptr;
ConcurrentAllocationMutex guard(this); ConcurrentAllocationMutex guard(this);
AddPage(page); AddPage(page);
...@@ -368,7 +365,8 @@ Page* PagedSpace::Expand() { ...@@ -368,7 +365,8 @@ Page* PagedSpace::Expand() {
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground( base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
size_t size_in_bytes) { size_t size_in_bytes) {
Page* page = AllocatePage(); Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return {}; if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_); base::MutexGuard lock(&space_mutex_);
AddPage(page); AddPage(page);
...@@ -528,7 +526,8 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -528,7 +526,8 @@ void PagedSpace::ReleasePage(Page* page) {
AccountUncommitted(page->size()); AccountUncommitted(page->size());
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory()); DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size()); accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page); heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
page);
} }
void PagedSpace::SetReadable() { void PagedSpace::SetReadable() {
......
...@@ -391,8 +391,6 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -391,8 +391,6 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<std::pair<Address, size_t>> ExpandBackground( base::Optional<std::pair<Address, size_t>> ExpandBackground(
size_t size_in_bytes); size_t size_in_bytes);
Page* AllocatePage();
// Sets up a linear allocation area that fits the given number of bytes. // Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry // Returns false if there is not enough space and the caller has to retry
// after collecting garbage. // after collecting garbage.
......
...@@ -329,6 +329,19 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts( ...@@ -329,6 +329,19 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_); artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
} }
ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end,
VirtualMemory reservation)
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
std::move(reservation)) {
allocated_bytes_ = 0;
SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(this)
->MarkAllBits();
}
void ReadOnlyPage::MakeHeaderRelocatable() { void ReadOnlyPage::MakeHeaderRelocatable() {
heap_ = nullptr; heap_ = nullptr;
owner_ = nullptr; owner_ = nullptr;
...@@ -612,7 +625,7 @@ void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) { ...@@ -612,7 +625,7 @@ void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
FreeLinearAllocationArea(); FreeLinearAllocationArea();
BasicMemoryChunk* chunk = BasicMemoryChunk* chunk =
heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this); heap()->memory_allocator()->AllocateReadOnlyPage(this);
capacity_ += AreaSize(); capacity_ += AreaSize();
accounting_stats_.IncreaseCapacity(chunk->area_size()); accounting_stats_.IncreaseCapacity(chunk->area_size());
...@@ -754,20 +767,6 @@ void ReadOnlySpace::ShrinkPages() { ...@@ -754,20 +767,6 @@ void ReadOnlySpace::ShrinkPages() {
limit_ = pages_.back()->area_end(); limit_ = pages_.back()->area_end();
} }
ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
page->allocated_bytes_ = 0;
page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
heap()
->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
return page;
}
SharedReadOnlySpace::SharedReadOnlySpace( SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts) Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) { : SharedReadOnlySpace(heap) {
......
...@@ -26,6 +26,9 @@ class SnapshotData; ...@@ -26,6 +26,9 @@ class SnapshotData;
class ReadOnlyPage : public BasicMemoryChunk { class ReadOnlyPage : public BasicMemoryChunk {
public: public:
ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
Address area_start, Address area_end, VirtualMemory reservation);
// Clears any pointers in the header that point out of the page that would // Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable. // otherwise make the header non-relocatable.
void MakeHeaderRelocatable(); void MakeHeaderRelocatable();
...@@ -229,8 +232,6 @@ class ReadOnlySpace : public BaseSpace { ...@@ -229,8 +232,6 @@ class ReadOnlySpace : public BaseSpace {
// Return size of allocatable area on a page in this space. // Return size of allocatable area on a page in this space.
int AreaSize() const { return static_cast<int>(area_size_); } int AreaSize() const { return static_cast<int>(area_size_); }
ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
Address FirstPageAddress() const { return pages_.front()->address(); } Address FirstPageAddress() const { return pages_.front()->address(); }
protected: protected:
......
...@@ -50,6 +50,12 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize); ...@@ -50,6 +50,12 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
// static // static
constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask; constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
Page::Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable)
: MemoryChunk(heap, space, size, area_start, area_end,
std::move(reservation), executable, PageSize::kRegular) {}
void Page::AllocateFreeListCategories() { void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_); DCHECK_NULL(categories_);
categories_ = categories_ =
......
...@@ -224,6 +224,9 @@ class Page : public MemoryChunk { ...@@ -224,6 +224,9 @@ class Page : public MemoryChunk {
MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING); MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation, Executability executable);
// Returns the page containing a given address. The address ranges // Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object // from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page. // is in fact in a page.
......
...@@ -113,9 +113,8 @@ class V8_NODISCARD TestCodePageAllocatorScope { ...@@ -113,9 +113,8 @@ class V8_NODISCARD TestCodePageAllocatorScope {
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator, v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size, size_t area_size, Executability executable,
Executability executable, PageSize page_size, PageSize page_size, LargeObjectSpace* space) {
Space* space) {
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved()); TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
MemoryAllocator* memory_allocator = test_allocator_scope.allocator(); MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestCodePageAllocatorScope test_code_page_allocator_scope( TestCodePageAllocatorScope test_code_page_allocator_scope(
...@@ -129,23 +128,23 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, ...@@ -129,23 +128,23 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
size_t guard_size = size_t guard_size =
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0; (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk( MemoryChunk* memory_chunk =
reserve_area_size, commit_area_size, executable, page_size, space); memory_allocator->AllocateLargePage(space, area_size, executable);
size_t reserved_size = size_t reserved_size =
((executable == EXECUTABLE)) ((executable == EXECUTABLE))
? allocatable_memory_area_offset + ? allocatable_memory_area_offset +
RoundUp(reserve_area_size, page_allocator->CommitPageSize()) + RoundUp(area_size, page_allocator->CommitPageSize()) +
guard_size guard_size
: RoundUp(allocatable_memory_area_offset + reserve_area_size, : RoundUp(allocatable_memory_area_offset + area_size,
page_allocator->CommitPageSize()); page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size); CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size()); memory_chunk->address() + memory_chunk->size());
CHECK(memory_chunk->area_end() <= CHECK(memory_chunk->area_end() <=
memory_chunk->address() + memory_chunk->size()); memory_chunk->address() + memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size); CHECK(static_cast<size_t>(memory_chunk->area_size()) == area_size);
memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk); memory_allocator->Free(MemoryAllocator::FreeMode::kImmediately, memory_chunk);
} }
static unsigned int PseudorandomAreaSize() { static unsigned int PseudorandomAreaSize() {
...@@ -160,12 +159,10 @@ TEST(MemoryChunk) { ...@@ -160,12 +159,10 @@ TEST(MemoryChunk) {
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t area_size;
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size;
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
initial_commit_area_size = area_size =
RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize()); RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
// With CodeRange. // With CodeRange.
...@@ -179,13 +176,11 @@ TEST(MemoryChunk) { ...@@ -179,13 +176,11 @@ TEST(MemoryChunk) {
code_range_reservation.size(), MemoryChunk::kAlignment, code_range_reservation.size(), MemoryChunk::kAlignment,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized); base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size, VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
initial_commit_area_size, EXECUTABLE, PageSize::kLarge, EXECUTABLE, PageSize::kLarge, heap->code_lo_space());
heap->code_space());
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size, VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
initial_commit_area_size, NOT_EXECUTABLE, NOT_EXECUTABLE, PageSize::kLarge, heap->lo_space());
PageSize::kLarge, heap->old_space());
} }
} }
...@@ -203,7 +198,7 @@ TEST(MemoryAllocator) { ...@@ -203,7 +198,7 @@ TEST(MemoryAllocator) {
CHECK(!faked_space.first_page()); CHECK(!faked_space.first_page());
CHECK(!faked_space.last_page()); CHECK(!faked_space.last_page());
Page* first_page = memory_allocator->AllocatePage( Page* first_page = memory_allocator->AllocatePage(
MemoryAllocator::kRegular, faked_space.AreaSize(), MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE); static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
faked_space.memory_chunk_list().PushBack(first_page); faked_space.memory_chunk_list().PushBack(first_page);
...@@ -216,7 +211,7 @@ TEST(MemoryAllocator) { ...@@ -216,7 +211,7 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages. // Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage( Page* other = memory_allocator->AllocatePage(
MemoryAllocator::kRegular, faked_space.AreaSize(), MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE); static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
total_pages++; total_pages++;
faked_space.memory_chunk_list().PushBack(other); faked_space.memory_chunk_list().PushBack(other);
...@@ -813,7 +808,7 @@ TEST(NoMemoryForNewPage) { ...@@ -813,7 +808,7 @@ TEST(NoMemoryForNewPage) {
LinearAllocationArea allocation_info; LinearAllocationArea allocation_info;
OldSpace faked_space(heap, &allocation_info); OldSpace faked_space(heap, &allocation_info);
Page* page = memory_allocator->AllocatePage( Page* page = memory_allocator->AllocatePage(
MemoryAllocator::kRegular, faked_space.AreaSize(), MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE); static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
CHECK_NULL(page); CHECK_NULL(page);
......
...@@ -311,16 +311,15 @@ bool SequentialUnmapperTest::old_flag_; ...@@ -311,16 +311,15 @@ bool SequentialUnmapperTest::old_flag_;
// See v8:5945. // See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
if (FLAG_enable_third_party_heap) return; if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage( Page* page =
MemoryAllocator::kRegular, allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()), static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE); Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page); EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize(); const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite); PageAllocator::kReadWrite);
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page); allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite); PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks(); unmapper()->FreeQueuedChunks();
...@@ -341,9 +340,8 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { ...@@ -341,9 +340,8 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
// See v8:5945. // See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
if (FLAG_enable_third_party_heap) return; if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage( Page* page =
MemoryAllocator::kRegular, allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()), static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE); Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page); EXPECT_NE(nullptr, page);
...@@ -351,7 +349,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { ...@@ -351,7 +349,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite); PageAllocator::kReadWrite);
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page); allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite); PageAllocator::kReadWrite);
unmapper()->TearDown(); unmapper()->TearDown();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment