Commit 32b911f9 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

Reland "[heap] Make ReadOnlySpace use bump pointer allocation"

This reverts commit f78d69fa.

With https://chromium-review.googlesource.com/c/v8/v8/+/2243216,
incorrect MemoryChunk::FromHeapObject uses are now fixed.

Original change's description:
> Revert "[heap] Make ReadOnlySpace use bump pointer allocation"
> 
> This reverts commit 81c34968 and also
> 490f3580 which depends on the former.
> 
> Reason for revert: Break CFI tests in chromium https://ci.chromium.org/p/chromium/builders/ci/Linux%20CFI/17438
> Original change's description:
> > [heap] Make ReadOnlySpace use bump pointer allocation
> >
> > This changes ReadOnlySpace to no longer be a PagedSpace but instead it
> > is now a BaseSpace. BasicSpace is a new base class that Space inherits
> > from and which has no allocation methods and does not dictate how the
> > pages should be held.
> >
> > ReadOnlySpace unlike Space holds its pages as a
> > std::vector<ReadOnlyPage>, where ReadOnlyPage directly subclasses
> > BasicMemoryChunk, meaning they do not have prev_ and next_ pointers and
> > cannot be held in a heap::List. This is desirable since with pointer
> > compression we would like to remap these pages to different memory
> > addresses which would be impossible with a heap::List.
> >
> > Since ReadOnlySpace no longer uses most of the code from the other
> > Spaces it makes sense to simplify its memory allocation to use a simple
> > bump pointer and always allocate a new page whenever an allocation
> > exceeds the remaining space on the final page.
> >
> > Change-Id: Iee6d9f96cfb174b4026ee671ee4f897909b38418
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2209060
> > Commit-Queue: Dan Elphick <delphick@chromium.org>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#68137}
> 
> TBR=ulan@chromium.org,delphick@chromium.org
> 
> # Not skipping CQ checks because original CL landed > 1 day ago.
> 
> Change-Id: I68c9834872e55eb833be081f8ff99b786bfa9894
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2232552
> Commit-Queue: Dan Elphick <delphick@chromium.org>
> Reviewed-by: Dan Elphick <delphick@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#68211}

TBR=ulan@chromium.org,delphick@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Change-Id: Id5b3cce41b5dec1dca816c05848d183790b1cc05
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2250254Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68407}
parent af5f156d
...@@ -8427,8 +8427,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { ...@@ -8427,8 +8427,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::ReadOnlySpace* ro_space = heap->read_only_space(); i::ReadOnlySpace* ro_space = heap->read_only_space();
heap_statistics->total_heap_size_ += ro_space->CommittedMemory(); heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory(); heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory();
heap_statistics->total_available_size_ += ro_space->Available(); heap_statistics->used_heap_size_ += ro_space->Size();
heap_statistics->used_heap_size_ += ro_space->SizeOfObjects();
#endif // V8_SHARED_RO_HEAP #endif // V8_SHARED_RO_HEAP
heap_statistics->total_heap_size_executable_ = heap_statistics->total_heap_size_executable_ =
...@@ -8462,18 +8461,26 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics, ...@@ -8462,18 +8461,26 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap(); i::Heap* heap = isolate->heap();
i::Space* space = heap->space(static_cast<int>(index));
i::AllocationSpace allocation_space = static_cast<i::AllocationSpace>(index); i::AllocationSpace allocation_space = static_cast<i::AllocationSpace>(index);
space_statistics->space_name_ = i::Heap::GetSpaceName(allocation_space); space_statistics->space_name_ = i::Heap::GetSpaceName(allocation_space);
if (allocation_space == i::RO_SPACE && V8_SHARED_RO_HEAP_BOOL) { if (allocation_space == i::RO_SPACE) {
// RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared. if (V8_SHARED_RO_HEAP_BOOL) {
space_statistics->space_size_ = 0; // RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
space_statistics->space_used_size_ = 0; space_statistics->space_size_ = 0;
space_statistics->space_available_size_ = 0; space_statistics->space_used_size_ = 0;
space_statistics->physical_space_size_ = 0; space_statistics->space_available_size_ = 0;
space_statistics->physical_space_size_ = 0;
} else {
i::ReadOnlySpace* space = heap->read_only_space();
space_statistics->space_size_ = space->CommittedMemory();
space_statistics->space_used_size_ = space->Size();
space_statistics->space_available_size_ = 0;
space_statistics->physical_space_size_ = space->CommittedPhysicalMemory();
}
} else { } else {
i::Space* space = heap->space(static_cast<int>(index));
space_statistics->space_size_ = space->CommittedMemory(); space_statistics->space_size_ = space->CommittedMemory();
space_statistics->space_used_size_ = space->SizeOfObjects(); space_statistics->space_used_size_ = space->SizeOfObjects();
space_statistics->space_available_size_ = space->Available(); space_statistics->space_available_size_ = space->Available();
......
...@@ -3291,15 +3291,15 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() { ...@@ -3291,15 +3291,15 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
AddressToString(isolate_address)); AddressToString(isolate_address));
const uintptr_t ro_space_firstpage_address = const uintptr_t ro_space_firstpage_address =
reinterpret_cast<uintptr_t>(heap()->read_only_space()->first_page()); heap()->read_only_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress, add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
AddressToString(ro_space_firstpage_address)); AddressToString(ro_space_firstpage_address));
const uintptr_t map_space_firstpage_address = const uintptr_t map_space_firstpage_address =
reinterpret_cast<uintptr_t>(heap()->map_space()->first_page()); heap()->map_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress, add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
AddressToString(map_space_firstpage_address)); AddressToString(map_space_firstpage_address));
const uintptr_t code_space_firstpage_address = const uintptr_t code_space_firstpage_address =
reinterpret_cast<uintptr_t>(heap()->code_space()->first_page()); heap()->code_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress, add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
AddressToString(code_space_firstpage_address)); AddressToString(code_space_firstpage_address));
} }
......
...@@ -42,7 +42,8 @@ void BasicMemoryChunk::ReleaseMarkingBitmap() { ...@@ -42,7 +42,8 @@ void BasicMemoryChunk::ReleaseMarkingBitmap() {
// static // static
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base, BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
size_t size, Address area_start, size_t size, Address area_start,
Address area_end, Space* owner, Address area_end,
BaseSpace* owner,
VirtualMemory reservation) { VirtualMemory reservation) {
BasicMemoryChunk* chunk = FromAddress(base); BasicMemoryChunk* chunk = FromAddress(base);
DCHECK_EQ(base, chunk->address()); DCHECK_EQ(base, chunk->address());
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_HEAP_BASIC_MEMORY_CHUNK_H_ #define V8_HEAP_BASIC_MEMORY_CHUNK_H_
#include <type_traits> #include <type_traits>
#include <unordered_map>
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/common/globals.h" #include "src/common/globals.h"
...@@ -17,7 +18,7 @@ ...@@ -17,7 +18,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class Space; class BaseSpace;
class BasicMemoryChunk { class BasicMemoryChunk {
public: public:
...@@ -151,9 +152,9 @@ class BasicMemoryChunk { ...@@ -151,9 +152,9 @@ class BasicMemoryChunk {
} }
// Gets the chunk's owner or null if the space has been detached. // Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; } BaseSpace* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; } void set_owner(BaseSpace* space) { owner_ = space; }
template <AccessMode access_mode = AccessMode::NON_ATOMIC> template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) { void SetFlag(Flag flag) {
...@@ -265,7 +266,8 @@ class BasicMemoryChunk { ...@@ -265,7 +266,8 @@ class BasicMemoryChunk {
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size, static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end, Address area_start, Address area_end,
Space* owner, VirtualMemory reservation); BaseSpace* owner,
VirtualMemory reservation);
size_t wasted_memory() { return wasted_memory_; } size_t wasted_memory() { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; } void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
...@@ -378,7 +380,7 @@ class BasicMemoryChunk { ...@@ -378,7 +380,7 @@ class BasicMemoryChunk {
std::atomic<intptr_t> high_water_mark_; std::atomic<intptr_t> high_water_mark_;
// The space owning this memory chunk. // The space owning this memory chunk.
std::atomic<Space*> owner_; std::atomic<BaseSpace*> owner_;
// If the chunk needs to remember its memory reservation, it is stored here. // If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_; VirtualMemory reservation_;
......
...@@ -241,8 +241,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, ...@@ -241,8 +241,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(!large_object); DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace()); DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin); DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation = allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -483,8 +483,7 @@ void Heap::PrintShortHeapStatistics() { ...@@ -483,8 +483,7 @@ void Heap::PrintShortHeapStatistics() {
"Read-only space, used: %6zu KB" "Read-only space, used: %6zu KB"
", available: %6zu KB" ", available: %6zu KB"
", committed: %6zu KB\n", ", committed: %6zu KB\n",
read_only_space_->Size() / KB, read_only_space_->Size() / KB, size_t{0},
read_only_space_->Available() / KB,
read_only_space_->CommittedMemory() / KB); read_only_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, PrintIsolate(isolate_,
"New space, used: %6zu KB" "New space, used: %6zu KB"
...@@ -535,8 +534,8 @@ void Heap::PrintShortHeapStatistics() { ...@@ -535,8 +534,8 @@ void Heap::PrintShortHeapStatistics() {
"All spaces, used: %6zu KB" "All spaces, used: %6zu KB"
", available: %6zu KB" ", available: %6zu KB"
", committed: %6zu KB\n", ", committed: %6zu KB\n",
(this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB, (this->SizeOfObjects() + ro_space->Size()) / KB,
(this->Available() + ro_space->Available()) / KB, (this->Available()) / KB,
(this->CommittedMemory() + ro_space->CommittedMemory()) / KB); (this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
PrintIsolate(isolate_, PrintIsolate(isolate_,
"Unmapper buffering %zu chunks of committed: %6zu KB\n", "Unmapper buffering %zu chunks of committed: %6zu KB\n",
...@@ -1977,6 +1976,9 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) { ...@@ -1977,6 +1976,9 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
#else #else
if (space == NEW_SPACE) { if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size); allocation = new_space()->AllocateRawUnaligned(size);
} else if (space == RO_SPACE) {
allocation = read_only_space()->AllocateRaw(
size, AllocationAlignment::kWordAligned);
} else { } else {
// The deserializer will update the skip list. // The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(size); allocation = paged_space(space)->AllocateRawUnaligned(size);
...@@ -3029,10 +3031,12 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size, ...@@ -3029,10 +3031,12 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
#ifdef DEBUG #ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) { void VerifyNoNeedToClearSlots(Address start, Address end) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(start); BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
if (basic_chunk->InReadOnlySpace()) return;
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
// TODO(ulan): Support verification of large pages. // TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return; if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
Space* space = chunk->owner(); BaseSpace* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return; if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end); space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
} }
...@@ -3095,7 +3099,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) { ...@@ -3095,7 +3099,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP #ifdef V8_ENABLE_THIRD_PARTY_HEAP
return false; // currently unsupported return false; // currently unsupported
#else #else
Space* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner(); BaseSpace* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
if (owner->identity() == OLD_SPACE) { if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here? // TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space(); return static_cast<PagedSpace*>(owner)->is_off_thread_space();
...@@ -4214,28 +4218,6 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) { ...@@ -4214,28 +4218,6 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
public:
explicit VerifyReadOnlyPointersVisitor(Heap* heap)
: VerifyPointersVisitor(heap) {}
protected:
void VerifyPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
if (!host.is_null()) {
CHECK(ReadOnlyHeap::Contains(host.map()));
}
VerifyPointersVisitor::VerifyPointers(host, start, end);
for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject heap_object;
if ((*current)->GetHeapObject(&heap_object)) {
CHECK(ReadOnlyHeap::Contains(heap_object));
}
}
}
};
void Heap::Verify() { void Heap::Verify() {
CHECK(HasBeenSetUp()); CHECK(HasBeenSetUp());
SafepointScope safepoint_scope(this); SafepointScope safepoint_scope(this);
...@@ -4275,8 +4257,7 @@ void Heap::Verify() { ...@@ -4275,8 +4257,7 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() { void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable()); CHECK(!read_only_space_->writable());
VerifyReadOnlyPointersVisitor read_only_visitor(this); read_only_space_->Verify(isolate());
read_only_space_->Verify(isolate(), &read_only_visitor);
} }
class SlotVerifyingVisitor : public ObjectVisitor { class SlotVerifyingVisitor : public ObjectVisitor {
...@@ -5353,13 +5334,15 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) { ...@@ -5353,13 +5334,15 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap); DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr, DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space()); read_only_space_ == ro_heap->read_only_space());
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space(); space_[RO_SPACE] = nullptr;
read_only_space_ = ro_heap->read_only_space();
} }
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) { void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL); CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_; delete read_only_space_;
space_[RO_SPACE] = read_only_space_ = space;
read_only_space_ = space;
} }
void Heap::SetUpSpaces() { void Heap::SetUpSpaces() {
...@@ -5660,7 +5643,7 @@ void Heap::TearDown() { ...@@ -5660,7 +5643,7 @@ void Heap::TearDown() {
tracer_.reset(); tracer_.reset();
isolate()->read_only_heap()->OnHeapTearDown(); isolate()->read_only_heap()->OnHeapTearDown();
space_[RO_SPACE] = read_only_space_ = nullptr; read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) { for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i]; delete space_[i];
space_[i] = nullptr; space_[i] = nullptr;
......
...@@ -7,11 +7,13 @@ ...@@ -7,11 +7,13 @@
#include <cinttypes> #include <cinttypes>
#include "src/base/address-region.h" #include "src/base/address-region.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/flags/flags.h" #include "src/flags/flags.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
#include "src/logging/log.h" #include "src/logging/log.h"
namespace v8 { namespace v8 {
...@@ -372,10 +374,9 @@ Address MemoryAllocator::AllocateAlignedMemory( ...@@ -372,10 +374,9 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base; return base;
} }
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
size_t commit_area_size, size_t reserve_area_size, size_t commit_area_size, Executability executable,
Executability executable, BaseSpace* owner) {
Space* owner) {
DCHECK_LE(commit_area_size, reserve_area_size); DCHECK_LE(commit_area_size, reserve_area_size);
size_t chunk_size; size_t chunk_size;
...@@ -483,19 +484,32 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -483,19 +484,32 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_executable_ -= chunk_size; size_executable_ -= chunk_size;
} }
CHECK(last_chunk_.IsReserved()); CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable, return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
owner); owner);
} }
BasicMemoryChunk* chunk =
BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
owner, std::move(reservation));
return chunk;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
BaseSpace* owner) {
BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
reserve_area_size, commit_area_size, executable, owner);
MemoryChunk* chunk = MemoryChunk* chunk =
MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
executable, owner, std::move(reservation));
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk); if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
return chunk; return chunk;
} }
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free, void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
Address start_free,
size_t bytes_to_free, size_t bytes_to_free,
Address new_area_end) { Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory(); VirtualMemory* reservation = chunk->reserved_memory();
...@@ -519,22 +533,42 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free, ...@@ -519,22 +533,42 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_ -= released_bytes; size_ -= released_bytes;
} }
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) { void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory(); VirtualMemory* reservation = chunk->reserved_memory();
const size_t size = const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size(); reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size)); DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size; size_ -= size;
if (chunk->executable() == EXECUTABLE) { if (executable == EXECUTABLE) {
DCHECK_GE(size_executable_, size); DCHECK_GE(size_executable_, size);
size_executable_ -= size; size_executable_ -= size;
UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
} }
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
chunk->SetFlag(MemoryChunk::UNREGISTERED); chunk->SetFlag(MemoryChunk::UNREGISTERED);
} }
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
UnregisterMemory(chunk, chunk->executable());
}
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
chunk->SetFlag(MemoryChunk::PRE_FREED);
chunk->ReleaseMarkingBitmap();
VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
FreeMemory(page_allocator(NOT_EXECUTABLE), chunk->address(), chunk->size());
}
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
...@@ -547,20 +581,15 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { ...@@ -547,20 +581,15 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
DCHECK(!chunk->InReadOnlySpace());
chunk->ReleaseAllAllocatedMemory(); chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory(); VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) { if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitMemory(reservation); UncommitMemory(reservation);
} else { } else {
if (reservation->IsReserved()) { DCHECK(reservation->IsReserved());
reservation->Free(); reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
} }
} }
...@@ -630,6 +659,16 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) ...@@ -630,6 +659,16 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>( Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable); size_t size, SemiSpace* owner, Executability executable);
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
ReadOnlySpace* owner) {
BasicMemoryChunk* chunk = nullptr;
if (chunk == nullptr) {
chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
}
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
LargePage* MemoryAllocator::AllocateLargePage(size_t size, LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner, LargeObjectSpace* owner,
Executability executable) { Executability executable) {
...@@ -655,8 +694,10 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { ...@@ -655,8 +694,10 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue); ZapBlock(start, size, kZapValue);
} }
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, BasicMemoryChunk* basic_chunk =
NOT_EXECUTABLE, owner, std::move(reservation)); BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
area_end, owner, std::move(reservation));
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
size_ += size; size_ += size;
return chunk; return chunk;
} }
......
...@@ -27,6 +27,7 @@ namespace internal { ...@@ -27,6 +27,7 @@ namespace internal {
class Heap; class Heap;
class Isolate; class Isolate;
class ReadOnlyPage;
// The process-wide singleton that keeps track of code range regions with the // The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory // intention to reuse free code range regions as a workaround for CFG memory
...@@ -192,9 +193,12 @@ class MemoryAllocator { ...@@ -192,9 +193,12 @@ class MemoryAllocator {
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner, LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable); Executability executable);
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
template <MemoryAllocator::FreeMode mode = kFull> template <MemoryAllocator::FreeMode mode = kFull>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
void FreeReadOnlyPage(ReadOnlyPage* chunk);
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
size_t Size() const { return size_; } size_t Size() const { return size_; }
...@@ -215,13 +219,20 @@ class MemoryAllocator { ...@@ -215,13 +219,20 @@ class MemoryAllocator {
address >= highest_ever_allocated_; address >= highest_ever_allocated_;
} }
// Returns a BasicMemoryChunk in which the memory region from commit_area_size
// to reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
size_t reserve_area_size, size_t commit_area_size,
Executability executable, BaseSpace* space);
// Returns a MemoryChunk in which the memory region from commit_area_size to // Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it // reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea. // could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size, V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
size_t commit_area_size, size_t commit_area_size,
Executability executable, Executability executable,
Space* space); BaseSpace* space);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable, size_t alignment, Executability executable,
...@@ -233,7 +244,7 @@ class MemoryAllocator { ...@@ -233,7 +244,7 @@ class MemoryAllocator {
// internally memory is freed from |start_free| to the end of the reservation. // internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so // Additional memory beyond the page is not accounted though, so
// |bytes_to_free| is computed by the caller. // |bytes_to_free| is computed by the caller.
void PartialFreeMemory(MemoryChunk* chunk, Address start_free, void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
size_t bytes_to_free, Address new_area_end); size_t bytes_to_free, Address new_area_end);
// Checks if an allocated MemoryChunk was intended to be used for executable // Checks if an allocated MemoryChunk was intended to be used for executable
...@@ -290,21 +301,24 @@ class MemoryAllocator { ...@@ -290,21 +301,24 @@ class MemoryAllocator {
// Performs all necessary bookkeeping to free the memory, but does not free // Performs all necessary bookkeeping to free the memory, but does not free
// it. // it.
void UnregisterMemory(MemoryChunk* chunk); void UnregisterMemory(MemoryChunk* chunk);
void UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable = NOT_EXECUTABLE);
private: private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator, void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
size_t requested); size_t requested);
// PreFreeMemory logically frees the object, i.e., it unregisters the memory, // PreFreeMemory logically frees the object, i.e., it unregisters the
// logs a delete event and adds the chunk to remembered unmapped pages. // memory, logs a delete event and adds the chunk to remembered unmapped
// pages.
void PreFreeMemory(MemoryChunk* chunk); void PreFreeMemory(MemoryChunk* chunk);
// PerformFreeMemory can be called concurrently when PreFree was executed // PerformFreeMemory can be called concurrently when PreFree was executed
// before. // before.
void PerformFreeMemory(MemoryChunk* chunk); void PerformFreeMemory(MemoryChunk* chunk);
// See AllocatePage for public interface. Note that currently we only support // See AllocatePage for public interface. Note that currently we only
// pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
template <typename SpaceType> template <typename SpaceType>
MemoryChunk* AllocatePagePooled(SpaceType* owner); MemoryChunk* AllocatePagePooled(SpaceType* owner);
...@@ -350,15 +364,16 @@ class MemoryAllocator { ...@@ -350,15 +364,16 @@ class MemoryAllocator {
VirtualMemory code_reservation_; VirtualMemory code_reservation_;
// Page allocator used for allocating data pages. Depending on the // Page allocator used for allocating data pages. Depending on the
// configuration it may be a page allocator instance provided by v8::Platform // configuration it may be a page allocator instance provided by
// or a BoundedPageAllocator (when pointer compression is enabled). // v8::Platform or a BoundedPageAllocator (when pointer compression is
// enabled).
v8::PageAllocator* data_page_allocator_; v8::PageAllocator* data_page_allocator_;
// Page allocator used for allocating code pages. Depending on the // Page allocator used for allocating code pages. Depending on the
// configuration it may be a page allocator instance provided by v8::Platform // configuration it may be a page allocator instance provided by
// or a BoundedPageAllocator (when pointer compression is enabled or // v8::Platform or a BoundedPageAllocator (when pointer compression is
// on those 64-bit architectures where pc-relative 32-bit displacement // enabled or on those 64-bit architectures where pc-relative 32-bit
// can be used for call and jump instructions). // displacement can be used for call and jump instructions).
v8::PageAllocator* code_page_allocator_; v8::PageAllocator* code_page_allocator_;
// A part of the |code_reservation_| that may contain executable code // A part of the |code_reservation_| that may contain executable code
...@@ -371,10 +386,11 @@ class MemoryAllocator { ...@@ -371,10 +386,11 @@ class MemoryAllocator {
// optionally existing page in the beginning of the |code_range_|. // optionally existing page in the beginning of the |code_range_|.
// So, summarizing all above, the following conditions hold: // So, summarizing all above, the following conditions hold:
// 1) |code_reservation_| >= |code_range_| // 1) |code_reservation_| >= |code_range_|
// 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|. // 2) |code_range_| >= |optional RW pages| +
// 3) |code_reservation_| is AllocatePageSize()-aligned // |code_page_allocator_instance_|. 3) |code_reservation_| is
// 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned // AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
// 5) |code_range_| is CommitPageSize()-aligned // MemoryChunk::kAlignment-aligned 5) |code_range_| is
// CommitPageSize()-aligned
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_; std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
// Maximum space size in bytes. // Maximum space size in bytes.
......
...@@ -159,14 +159,9 @@ PageAllocator::Permission DefaultWritableCodePermissions() { ...@@ -159,14 +159,9 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace } // namespace
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Address area_start, Address area_end, Executability executable) {
Executability executable, Space* owner, MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
VirtualMemory reservation) {
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(base);
DCHECK_EQ(base, chunk->address());
BasicMemoryChunk::Initialize(heap, base, size, area_start, area_end, owner,
std::move(reservation));
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr); base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr); base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
...@@ -194,14 +189,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -194,14 +189,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk, heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
0); 0);
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
chunk->SetFlag(READ_ONLY_HEAP);
}
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE); chunk->SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) { if (heap->write_protect_code_memory()) {
...@@ -217,7 +204,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -217,7 +204,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
} }
} }
if (owner->identity() == CODE_SPACE) { if (chunk->owner()->identity() == CODE_SPACE) {
chunk->code_object_registry_ = new CodeObjectRegistry(); chunk->code_object_registry_ = new CodeObjectRegistry();
} else { } else {
chunk->code_object_registry_ = nullptr; chunk->code_object_registry_ = nullptr;
......
...@@ -233,6 +233,10 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -233,6 +233,10 @@ class MemoryChunk : public BasicMemoryChunk {
return external_backing_store_bytes_[type]; return external_backing_store_bytes_[type];
} }
Space* owner() const {
return reinterpret_cast<Space*>(BasicMemoryChunk::owner());
}
// Gets the chunk's allocation space, potentially dealing with a null owner_ // Gets the chunk's allocation space, potentially dealing with a null owner_
// (like read-only chunks have). // (like read-only chunks have).
inline AllocationSpace owner_identity() const; inline AllocationSpace owner_identity() const;
...@@ -267,10 +271,8 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -267,10 +271,8 @@ class MemoryChunk : public BasicMemoryChunk {
void ReleaseAllocatedMemoryNeededForWritableChunk(); void ReleaseAllocatedMemoryNeededForWritableChunk();
protected: protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Address area_start, Address area_end, Executability executable);
Executability executable, Space* owner,
VirtualMemory reservation);
// Release all memory allocated by the chunk. Should be called when memory // Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed. // chunk is about to be freed.
......
...@@ -380,7 +380,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) { ...@@ -380,7 +380,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
size_t NewSpace::CommittedPhysicalMemory() { size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory(); size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) { if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory(); size += from_space_.CommittedPhysicalMemory();
...@@ -469,7 +469,7 @@ void NewSpace::UpdateLinearAllocationArea() { ...@@ -469,7 +469,7 @@ void NewSpace::UpdateLinearAllocationArea() {
DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top()); DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
Address new_top = to_space_.page_low(); Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high()); allocation_info_.Reset(new_top, to_space_.page_high());
// The order of the following two stores is important. // The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run. // See the corresponding loads in ConcurrentMarking::Run.
......
...@@ -33,9 +33,7 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() { ...@@ -33,9 +33,7 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
DCHECK_LE(cur_addr_, cur_end_); DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) { if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) { if (obj.IsCode()) {
DCHECK_IMPLIES( DCHECK_EQ(space_->identity(), CODE_SPACE);
space_->identity() != CODE_SPACE,
space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
DCHECK_CODEOBJECT_SIZE(obj_size, space_); DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else { } else {
DCHECK_OBJECT_SIZE(obj_size); DCHECK_OBJECT_SIZE(obj_size);
...@@ -127,7 +125,6 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned( ...@@ -127,7 +125,6 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes, AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) { AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) { if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
...@@ -145,8 +142,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes, ...@@ -145,8 +142,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE); DCHECK_EQ(identity(), OLD_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes; int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment); HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) { if (object.is_null()) {
......
...@@ -49,8 +49,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap, ...@@ -49,8 +49,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
heap->mark_compact_collector()->EnsureSweepingCompleted(); heap->mark_compact_collector()->EnsureSweepingCompleted();
#ifdef DEBUG #ifdef DEBUG
AllocationSpace owner = page->owner_identity(); AllocationSpace owner = page->owner_identity();
DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE || DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
owner == CODE_SPACE);
#endif // DEBUG #endif // DEBUG
} }
...@@ -114,12 +113,11 @@ void PagedSpace::RefillFreeList() { ...@@ -114,12 +113,11 @@ void PagedSpace::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old // Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out. // generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE && if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
identity() != MAP_SPACE && identity() != RO_SPACE) { identity() != MAP_SPACE) {
return; return;
} }
DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace); DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
DCHECK_IMPLIES(is_local_space(), is_compaction_space()); DCHECK_IMPLIES(is_local_space(), is_compaction_space());
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0; size_t added = 0;
...@@ -237,7 +235,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) { ...@@ -237,7 +235,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
size_t PagedSpace::CommittedPhysicalMemory() { size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0; size_t size = 0;
for (Page* page : *this) { for (Page* page : *this) {
size += page->CommittedPhysicalMemory(); size += page->CommittedPhysicalMemory();
...@@ -323,7 +321,7 @@ void PagedSpace::ResetFreeList() { ...@@ -323,7 +321,7 @@ void PagedSpace::ResetFreeList() {
void PagedSpace::ShrinkImmortalImmovablePages() { void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete()); DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
FreeLinearAllocationArea(); FreeLinearAllocationArea();
ResetFreeList(); ResetFreeList();
for (Page* page : *this) { for (Page* page : *this) {
...@@ -693,15 +691,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) { ...@@ -693,15 +691,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} }
for (Page* page : *this) { for (Page* page : *this) {
#ifdef V8_SHARED_RO_HEAP
if (identity() == RO_SPACE) {
CHECK_NULL(page->owner());
} else {
CHECK_EQ(page->owner(), this);
}
#else
CHECK_EQ(page->owner(), this); CHECK_EQ(page->owner(), this);
#endif
for (int i = 0; i < kNumTypes; i++) { for (int i = 0; i < kNumTypes; i++) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0; external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
...@@ -782,7 +772,6 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) { ...@@ -782,7 +772,6 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} }
void PagedSpace::VerifyLiveBytes() { void PagedSpace::VerifyLiveBytes() {
DCHECK_NE(identity(), RO_SPACE);
IncrementalMarking::MarkingState* marking_state = IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state(); heap()->incremental_marking()->marking_state();
for (Page* page : *this) { for (Page* page : *this) {
......
...@@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetTopAndLimit(Address top, Address limit) { void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit || DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1)); Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit); allocation_info_.Reset(top, limit);
} }
void DecreaseLimit(Address new_limit); void DecreaseLimit(Address new_limit);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/lsan.h" #include "src/base/lsan.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h" #include "src/heap/read-only-spaces.h"
...@@ -137,7 +138,7 @@ ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate( ...@@ -137,7 +138,7 @@ ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) { void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_); DCHECK(!init_complete_);
read_only_space_->ShrinkImmortalImmovablePages(); read_only_space_->ShrinkPages();
#ifdef V8_SHARED_RO_HEAP #ifdef V8_SHARED_RO_HEAP
std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer()); std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
read_only_space()->DetachPagesAndAddToArtifacts(artifacts); read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
...@@ -174,7 +175,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics( ...@@ -174,7 +175,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
if (artifacts) { if (artifacts) {
auto ro_space = artifacts->shared_read_only_space(); auto ro_space = artifacts->shared_read_only_space();
statistics->read_only_space_size_ = ro_space->CommittedMemory(); statistics->read_only_space_size_ = ro_space->CommittedMemory();
statistics->read_only_space_used_size_ = ro_space->SizeOfObjects(); statistics->read_only_space_used_size_ = ro_space->Size();
statistics->read_only_space_physical_size_ = statistics->read_only_space_physical_size_ =
ro_space->CommittedPhysicalMemory(); ro_space->CommittedPhysicalMemory();
} }
...@@ -214,30 +215,33 @@ ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap) ...@@ -214,30 +215,33 @@ ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space) ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space_(ro_space), : ro_space_(ro_space),
current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ? nullptr current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
: ro_space->first_page()), ? std::vector<ReadOnlyPage*>::iterator()
: ro_space->pages().begin()),
current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
? Address() ? Address()
: current_page_->area_start()) {} : (*current_page_)->area_start()) {}
HeapObject ReadOnlyHeapObjectIterator::Next() { HeapObject ReadOnlyHeapObjectIterator::Next() {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return HeapObject(); // Unsupported return HeapObject(); // Unsupported
} }
if (current_page_ == nullptr) { if (current_page_ == ro_space_->pages().end()) {
return HeapObject(); return HeapObject();
} }
BasicMemoryChunk* current_page = *current_page_;
for (;;) { for (;;) {
DCHECK_LE(current_addr_, current_page_->area_end()); DCHECK_LE(current_addr_, current_page->area_end());
if (current_addr_ == current_page_->area_end()) { if (current_addr_ == current_page->area_end()) {
// Progress to the next page. // Progress to the next page.
current_page_ = current_page_->next_page(); ++current_page_;
if (current_page_ == nullptr) { if (current_page_ == ro_space_->pages().end()) {
return HeapObject(); return HeapObject();
} }
current_addr_ = current_page_->area_start(); current_page = *current_page_;
current_addr_ = current_page->area_start();
} }
if (current_addr_ == ro_space_->top() && if (current_addr_ == ro_space_->top() &&
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <memory> #include <memory>
#include <utility> #include <utility>
#include <vector>
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/optional.h" #include "src/base/optional.h"
...@@ -20,10 +21,12 @@ class SharedMemoryStatistics; ...@@ -20,10 +21,12 @@ class SharedMemoryStatistics;
namespace internal { namespace internal {
class BasicMemoryChunk;
class Isolate; class Isolate;
class Page; class Page;
class ReadOnlyArtifacts; class ReadOnlyArtifacts;
class ReadOnlyDeserializer; class ReadOnlyDeserializer;
class ReadOnlyPage;
class ReadOnlySpace; class ReadOnlySpace;
// This class transparently manages read-only space, roots and cache creation // This class transparently manages read-only space, roots and cache creation
...@@ -116,7 +119,7 @@ class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator { ...@@ -116,7 +119,7 @@ class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
private: private:
ReadOnlySpace* const ro_space_; ReadOnlySpace* const ro_space_;
Page* current_page_; std::vector<ReadOnlyPage*>::const_iterator current_page_;
Address current_addr_; Address current_addr_;
}; };
......
This diff is collapsed.
...@@ -10,7 +10,9 @@ ...@@ -10,7 +10,9 @@
#include "include/v8-platform.h" #include "include/v8-platform.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/allocation-stats.h" #include "src/heap/allocation-stats.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/list.h" #include "src/heap/list.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h" #include "src/heap/paged-spaces.h"
...@@ -22,12 +24,14 @@ namespace internal { ...@@ -22,12 +24,14 @@ namespace internal {
class MemoryAllocator; class MemoryAllocator;
class ReadOnlyHeap; class ReadOnlyHeap;
class ReadOnlyPage : public Page { class ReadOnlyPage : public BasicMemoryChunk {
public: public:
// Clears any pointers in the header that point out of the page that would // Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable. // otherwise make the header non-relocatable.
void MakeHeaderRelocatable(); void MakeHeaderRelocatable();
size_t ShrinkToHighWaterMark();
private: private:
friend class ReadOnlySpace; friend class ReadOnlySpace;
}; };
...@@ -48,8 +52,8 @@ class ReadOnlyArtifacts { ...@@ -48,8 +52,8 @@ class ReadOnlyArtifacts {
return shared_read_only_space_.get(); return shared_read_only_space_.get();
} }
heap::List<MemoryChunk>& pages() { return pages_; } std::vector<ReadOnlyPage*>& pages() { return pages_; }
void TransferPages(heap::List<MemoryChunk>&& pages) { void TransferPages(std::vector<ReadOnlyPage*>&& pages) {
pages_ = std::move(pages); pages_ = std::move(pages);
} }
...@@ -59,7 +63,7 @@ class ReadOnlyArtifacts { ...@@ -59,7 +63,7 @@ class ReadOnlyArtifacts {
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); } ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
private: private:
heap::List<MemoryChunk> pages_; std::vector<ReadOnlyPage*> pages_;
AllocationStats stats_; AllocationStats stats_;
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_; std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
std::unique_ptr<ReadOnlyHeap> read_only_heap_; std::unique_ptr<ReadOnlyHeap> read_only_heap_;
...@@ -67,7 +71,7 @@ class ReadOnlyArtifacts { ...@@ -67,7 +71,7 @@ class ReadOnlyArtifacts {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Read Only space for all Immortal Immovable and Immutable objects // Read Only space for all Immortal Immovable and Immutable objects
class ReadOnlySpace : public PagedSpace { class ReadOnlySpace : public BaseSpace {
public: public:
explicit ReadOnlySpace(Heap* heap); explicit ReadOnlySpace(Heap* heap);
...@@ -76,13 +80,19 @@ class ReadOnlySpace : public PagedSpace { ...@@ -76,13 +80,19 @@ class ReadOnlySpace : public PagedSpace {
void DetachPagesAndAddToArtifacts( void DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts); std::shared_ptr<ReadOnlyArtifacts> artifacts);
~ReadOnlySpace() override { Unseal(); } ~ReadOnlySpace() override;
bool IsDetached() const { return heap_ == nullptr; }
bool writable() const { return !is_marked_read_only_; } bool writable() const { return !is_marked_read_only_; }
bool Contains(Address a) = delete; bool Contains(Address a) = delete;
bool Contains(Object o) = delete; bool Contains(Object o) = delete;
V8_EXPORT_PRIVATE
AllocationResult AllocateRaw(size_t size_in_bytes,
AllocationAlignment alignment);
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded(); V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap }; enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
...@@ -93,10 +103,32 @@ class ReadOnlySpace : public PagedSpace { ...@@ -93,10 +103,32 @@ class ReadOnlySpace : public PagedSpace {
void Seal(SealMode ro_mode); void Seal(SealMode ro_mode);
// During boot the free_space_map is created, and afterwards we may need // During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created. // to write it into the free space nodes that were already created.
void RepairFreeListsAfterDeserialization(); void RepairFreeSpacesAfterDeserialization();
size_t Size() override { return area_size_; }
size_t CommittedPhysicalMemory() override;
const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
Address top() const { return top_; }
Address limit() const { return limit_; }
size_t Capacity() const { return capacity_; }
bool ContainsSlow(Address addr);
void ShrinkPages();
#ifdef VERIFY_HEAP
void Verify(Isolate* isolate);
#ifdef DEBUG
void VerifyCounters(Heap* heap);
#endif // DEBUG
#endif // VERIFY_HEAP
// Return size of allocatable area on a page in this space.
int AreaSize() { return static_cast<int>(area_size_); }
ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
size_t Available() override { return 0; } Address FirstPageAddress() const { return pages_.front()->address(); }
protected: protected:
void SetPermissionsForPages(MemoryAllocator* memory_allocator, void SetPermissionsForPages(MemoryAllocator* memory_allocator,
...@@ -104,16 +136,36 @@ class ReadOnlySpace : public PagedSpace { ...@@ -104,16 +136,36 @@ class ReadOnlySpace : public PagedSpace {
bool is_marked_read_only_ = false; bool is_marked_read_only_ = false;
// Accounting information for this space.
AllocationStats accounting_stats_;
std::vector<ReadOnlyPage*> pages_;
Address top_;
Address limit_;
private: private:
// Unseal the space after is has been sealed, by making it writable. // Unseal the space after it has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
void Unseal(); void Unseal();
// void DetachFromHeap() { heap_ = nullptr; }
// String padding must be cleared just before serialization and therefore the
// string padding in the space will already have been cleared if the space was AllocationResult AllocateRawUnaligned(int size_in_bytes);
// deserialized. AllocationResult AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment);
HeapObject TryAllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment);
void EnsureSpaceForAllocation(int size_in_bytes);
void FreeLinearAllocationArea();
// String padding must be cleared just before serialization and therefore
// the string padding in the space will already have been cleared if the
// space was deserialized.
bool is_string_padding_cleared_; bool is_string_padding_cleared_;
size_t capacity_;
size_t area_size_;
}; };
class SharedReadOnlySpace : public ReadOnlySpace { class SharedReadOnlySpace : public ReadOnlySpace {
......
...@@ -130,11 +130,6 @@ void Page::MergeOldToNewRememberedSets() { ...@@ -130,11 +130,6 @@ void Page::MergeOldToNewRememberedSets() {
sweeping_slot_set_ = nullptr; sweeping_slot_set_ = nullptr;
} }
void Page::ResetAllocationStatistics() {
allocated_bytes_ = area_size();
wasted_memory_ = 0;
}
void Page::AllocateLocalTracker() { void Page::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_); DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(this); local_tracker_ = new LocalArrayBufferTracker(this);
......
...@@ -105,15 +105,70 @@ class SemiSpace; ...@@ -105,15 +105,70 @@ class SemiSpace;
DCHECK((0 < size) && (size <= code_space->AreaSize())) DCHECK((0 < size) && (size <= code_space->AreaSize()))
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces. // BaseSpace is the abstract superclass for all allocation spaces.
class V8_EXPORT_PRIVATE Space : public Malloced { class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
public:
Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
AllocationSpace identity() { return id_; }
const char* name() { return Heap::GetSpaceName(id_); }
void AccountCommitted(size_t bytes) {
DCHECK_GE(committed_ + bytes, committed_);
committed_ += bytes;
if (committed_ > max_committed_) {
max_committed_ = committed_;
}
}
void AccountUncommitted(size_t bytes) {
DCHECK_GE(committed_, committed_ - bytes);
committed_ -= bytes;
}
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
virtual size_t CommittedMemory() { return committed_; }
virtual size_t MaximumCommittedMemory() { return max_committed_; }
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
// Returns allocated size.
virtual size_t Size() = 0;
protected:
BaseSpace(Heap* heap, AllocationSpace id)
: heap_(heap), id_(id), committed_(0), max_committed_(0) {}
// Even though this has no virtual functions, this ensures that pointers are
// stable through casting.
virtual ~BaseSpace() = default;
protected:
Heap* heap_;
AllocationSpace id_;
// Keeps track of committed memory in a space.
std::atomic<size_t> committed_;
size_t max_committed_;
DISALLOW_COPY_AND_ASSIGN(BaseSpace);
};
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces that are not
// sealed after startup (i.e. not ReadOnlySpace).
class V8_EXPORT_PRIVATE Space : public BaseSpace {
public: public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list) Space(Heap* heap, AllocationSpace id, FreeList* free_list)
: allocation_observers_paused_(false), : BaseSpace(heap, id),
heap_(heap), allocation_observers_paused_(false),
id_(id),
committed_(0),
max_committed_(0),
free_list_(std::unique_ptr<FreeList>(free_list)) { free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ = external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes]; new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
...@@ -125,22 +180,11 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -125,22 +180,11 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
static inline void MoveExternalBackingStoreBytes( static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, Space* from, Space* to, size_t amount); ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
virtual ~Space() { ~Space() override {
delete[] external_backing_store_bytes_; delete[] external_backing_store_bytes_;
external_backing_store_bytes_ = nullptr; external_backing_store_bytes_ = nullptr;
} }
Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
bool IsDetached() const { return heap_ == nullptr; }
AllocationSpace identity() { return id_; }
const char* name() { return Heap::GetSpaceName(id_); }
virtual void AddAllocationObserver(AllocationObserver* observer); virtual void AddAllocationObserver(AllocationObserver* observer);
virtual void RemoveAllocationObserver(AllocationObserver* observer); virtual void RemoveAllocationObserver(AllocationObserver* observer);
...@@ -158,22 +202,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -158,22 +202,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
// single allocation-folding group. // single allocation-folding group.
void AllocationStepAfterMerge(Address first_object_in_chunk, int size); void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
virtual size_t CommittedMemory() { return committed_; }
virtual size_t MaximumCommittedMemory() { return max_committed_; }
// Returns allocated size.
virtual size_t Size() = 0;
// Returns size of objects. Can differ from the allocated size // Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace). // (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); } virtual size_t SizeOfObjects() { return Size(); }
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
// Return the available bytes without growing. // Return the available bytes without growing.
virtual size_t Available() = 0; virtual size_t Available() = 0;
...@@ -187,19 +219,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -187,19 +219,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0; virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
void AccountCommitted(size_t bytes) {
DCHECK_GE(committed_ + bytes, committed_);
committed_ += bytes;
if (committed_ > max_committed_) {
max_committed_ = committed_;
}
}
void AccountUncommitted(size_t bytes) {
DCHECK_GE(committed_, committed_ - bytes);
committed_ -= bytes;
}
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type, inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount); size_t amount);
...@@ -212,8 +231,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -212,8 +231,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return external_backing_store_bytes_[type]; return external_backing_store_bytes_[type];
} }
void* GetRandomMmapAddr();
MemoryChunk* first_page() { return memory_chunk_list_.front(); } MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); } MemoryChunk* last_page() { return memory_chunk_list_.back(); }
...@@ -224,6 +241,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -224,6 +241,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
FreeList* free_list() { return free_list_.get(); } FreeList* free_list() { return free_list_.get(); }
Address FirstPageAddress() const { return first_page()->address(); }
#ifdef DEBUG #ifdef DEBUG
virtual void Print() = 0; virtual void Print() = 0;
#endif #endif
...@@ -234,8 +253,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -234,8 +253,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return !allocation_observers_paused_ && !allocation_observers_.empty(); return !allocation_observers_paused_ && !allocation_observers_.empty();
} }
void DetachFromHeap() { heap_ = nullptr; }
std::vector<AllocationObserver*> allocation_observers_; std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space. // The List manages the pages that belong to the given space.
...@@ -245,12 +262,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -245,12 +262,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
std::atomic<size_t>* external_backing_store_bytes_; std::atomic<size_t>* external_backing_store_bytes_;
bool allocation_observers_paused_; bool allocation_observers_paused_;
Heap* heap_;
AllocationSpace id_;
// Keeps track of committed memory in a space.
std::atomic<size_t> committed_;
size_t max_committed_;
std::unique_ptr<FreeList> free_list_; std::unique_ptr<FreeList> free_list_;
...@@ -327,17 +338,6 @@ class Page : public MemoryChunk { ...@@ -327,17 +338,6 @@ class Page : public MemoryChunk {
} }
} }
// Returns the offset of a given address to this page.
inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
Address address_in_page = address() + offset;
DCHECK_GE(address_in_page, area_start());
DCHECK_LT(address_in_page, area_end());
return address_in_page;
}
void AllocateLocalTracker(); void AllocateLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; } inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
bool contains_array_buffers(); bool contains_array_buffers();
...@@ -353,21 +353,6 @@ class Page : public MemoryChunk { ...@@ -353,21 +353,6 @@ class Page : public MemoryChunk {
return categories_[type]; return categories_[type];
} }
size_t wasted_memory() { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() { return allocated_bytes_; }
void IncreaseAllocatedBytes(size_t bytes) {
DCHECK_LE(bytes, area_size());
allocated_bytes_ += bytes;
}
void DecreaseAllocatedBytes(size_t bytes) {
DCHECK_LE(bytes, area_size());
DCHECK_GE(allocated_bytes(), bytes);
allocated_bytes_ -= bytes;
}
void ResetAllocationStatistics();
size_t ShrinkToHighWaterMark(); size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end); V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
......
...@@ -107,14 +107,14 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) { ...@@ -107,14 +107,14 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
// create a back reference that encodes the page number as the chunk_index and // create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset. // the offset within the page as the chunk_offset.
Address address = obj.address(); Address address = obj.address();
Page* page = Page::FromAddress(address); BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0; uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space(); ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
for (Page* p : *read_only_space) { for (ReadOnlyPage* page : read_only_space->pages()) {
if (p == page) break; if (chunk == page) break;
++chunk_index; ++chunk_index;
} }
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address)); uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
SerializerReference back_reference = SerializerReference::BackReference( SerializerReference back_reference = SerializerReference::BackReference(
SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset); SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference); reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
......
...@@ -364,12 +364,9 @@ HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) { ...@@ -364,12 +364,9 @@ HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
uint32_t chunk_index = source_.GetInt(); uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt(); uint32_t chunk_offset = source_.GetInt();
if (is_off_thread() || isolate()->heap()->deserialization_complete()) { if (is_off_thread() || isolate()->heap()->deserialization_complete()) {
PagedSpace* read_only_space = ReadOnlySpace* read_only_space =
local_isolate()->heap()->read_only_space(); local_isolate()->heap()->read_only_space();
Page* page = read_only_space->first_page(); ReadOnlyPage* page = read_only_space->pages()[chunk_index];
for (uint32_t i = 0; i < chunk_index; ++i) {
page = page->next_page();
}
Address address = page->OffsetToAddress(chunk_offset); Address address = page->OffsetToAddress(chunk_offset);
obj = HeapObject::FromAddress(address); obj = HeapObject::FromAddress(address);
} else { } else {
......
...@@ -39,7 +39,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) { ...@@ -39,7 +39,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
ReadOnlyRoots roots(isolate); ReadOnlyRoots roots(isolate);
roots.Iterate(this); roots.Iterate(this);
ro_heap->read_only_space()->RepairFreeListsAfterDeserialization(); ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
// Deserialize the Read-only Object Cache. // Deserialize the Read-only Object Cache.
for (size_t i = 0;; ++i) { for (size_t i = 0;; ++i) {
......
...@@ -111,7 +111,9 @@ TEST(PagedSpaceIterator) { ...@@ -111,7 +111,9 @@ TEST(PagedSpaceIterator) {
TEST(SpaceIterator) { TEST(SpaceIterator) {
auto* const read_only_space = CcTest::read_only_heap()->read_only_space(); auto* const read_only_space = CcTest::read_only_heap()->read_only_space();
for (SpaceIterator it(CcTest::heap()); it.HasNext();) { for (SpaceIterator it(CcTest::heap()); it.HasNext();) {
CHECK_NE(it.Next(), reinterpret_cast<Space*>(read_only_space)); // ReadOnlySpace is not actually a Space but is instead a BaseSpace, but
// ensure it's not been inserted incorrectly.
CHECK_NE(it.Next(), reinterpret_cast<BaseSpace*>(read_only_space));
} }
} }
......
...@@ -182,8 +182,8 @@ TEST(GetObjectProperties) { ...@@ -182,8 +182,8 @@ TEST(GetObjectProperties) {
: Contains(props->brief, "maybe EmptyFixedArray")); : Contains(props->brief, "maybe EmptyFixedArray"));
// Provide a heap first page so the API can be more sure. // Provide a heap first page so the API can be more sure.
heap_addresses.read_only_space_first_page = reinterpret_cast<uintptr_t>( heap_addresses.read_only_space_first_page =
i_isolate->heap()->read_only_space()->first_page()); i_isolate->heap()->read_only_space()->FirstPageAddress();
props = props =
d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses); d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
CHECK(props->type_check_result == CHECK(props->type_check_result ==
......
...@@ -95,14 +95,20 @@ static void DumpKnownObject(FILE* out, i::Heap* heap, const char* space_name, ...@@ -95,14 +95,20 @@ static void DumpKnownObject(FILE* out, i::Heap* heap, const char* space_name,
#undef RO_ROOT_LIST_CASE #undef RO_ROOT_LIST_CASE
} }
static void DumpSpaceFirstPageAddress(FILE* out, i::PagedSpace* space) { static void DumpSpaceFirstPageAddress(FILE* out, i::BaseSpace* space,
i::Address first_page) {
const char* name = space->name(); const char* name = space->name();
i::Address first_page = reinterpret_cast<i::Address>(space->first_page());
i::Tagged_t compressed = i::CompressTagged(first_page); i::Tagged_t compressed = i::CompressTagged(first_page);
uintptr_t unsigned_compressed = static_cast<uint32_t>(compressed); uintptr_t unsigned_compressed = static_cast<uint32_t>(compressed);
i::PrintF(out, " 0x%08" V8PRIxPTR ": \"%s\",\n", unsigned_compressed, name); i::PrintF(out, " 0x%08" V8PRIxPTR ": \"%s\",\n", unsigned_compressed, name);
} }
template <typename SpaceT>
static void DumpSpaceFirstPageAddress(FILE* out, SpaceT* space) {
i::Address first_page = space->FirstPageAddress();
DumpSpaceFirstPageAddress(out, space, first_page);
}
static int DumpHeapConstants(FILE* out, const char* argv0) { static int DumpHeapConstants(FILE* out, const char* argv0) {
// Start up V8. // Start up V8.
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform(); std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment