Commit 3d76e88f authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Explicitly specify page allocator instance for VirtualMemory.

The provided page allocator will serve all the memory requests done by the virtual
memory object.
This is a necessary cleanup before introducing BoundedPageAllocator.

Bug: v8:8096
Change-Id: I95477d67e5f532013322a991db3ee1a1f2e821e6
Reviewed-on: https://chromium-review.googlesource.com/1210122Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55703}
parent 23077519
......@@ -220,15 +220,10 @@ bool OnCriticalMemoryPressure(size_t length) {
return true;
}
VirtualMemory::VirtualMemory()
: page_allocator_(GetPlatformPageAllocator()),
address_(kNullAddress),
size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: page_allocator_(GetPlatformPageAllocator()),
address_(kNullAddress),
size_(0) {
VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, size_t alignment)
: page_allocator_(page_allocator), address_(kNullAddress), size_(0) {
DCHECK_NOT_NULL(page_allocator);
size_t page_size = page_allocator_->AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = reinterpret_cast<Address>(AllocatePages(
......@@ -245,6 +240,7 @@ VirtualMemory::~VirtualMemory() {
}
void VirtualMemory::Reset() {
page_allocator_ = nullptr;
address_ = kNullAddress;
size_ = 0;
}
......@@ -277,14 +273,15 @@ void VirtualMemory::Free() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
v8::PageAllocator* page_allocator = page_allocator_;
Address address = address_;
size_t size = size_;
CHECK(InVM(address, size));
Reset();
// FreePages expects size to be aligned to allocation granularity. Trimming
// may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator_, reinterpret_cast<void*>(address),
RoundUp(size, page_allocator_->AllocatePageSize())));
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(address),
RoundUp(size, page_allocator->AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
......@@ -295,8 +292,9 @@ void VirtualMemory::TakeControl(VirtualMemory* from) {
from->Reset();
}
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
VirtualMemory vm(size, hint);
bool AllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, VirtualMemory* result) {
VirtualMemory vm(page_allocator, size, hint);
if (vm.IsReserved()) {
result->TakeControl(&vm);
return true;
......@@ -304,9 +302,10 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
return false;
}
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
bool AlignedAllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
size_t alignment, void* hint,
VirtualMemory* result) {
VirtualMemory vm(size, hint, alignment);
VirtualMemory vm(page_allocator, size, hint, alignment);
if (vm.IsReserved()) {
result->TakeControl(&vm);
return true;
......
......@@ -152,22 +152,24 @@ V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator,
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
class V8_EXPORT_PRIVATE VirtualMemory final {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
VirtualMemory()
: page_allocator_(nullptr), address_(kNullAddress), size_(0) {}
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
size_t alignment = AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(Address address, size_t size)
: page_allocator_(GetPlatformPageAllocator()),
address_(address),
size_(size) {}
VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
: page_allocator_(page_allocator), address_(address), size_(size) {
DCHECK_NOT_NULL(page_allocator);
}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
......@@ -227,8 +229,10 @@ class V8_EXPORT_PRIVATE VirtualMemory {
size_t size_; // Size of the virtual memory.
};
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
bool AllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
size_t alignment, void* hint,
VirtualMemory* result);
} // namespace internal
......
......@@ -4729,7 +4729,8 @@ void Heap::SetUp() {
space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
space_[NEW_SPACE] = new_space_ =
new NewSpace(this, initial_semispace_size_, max_semi_space_size_);
new NewSpace(this, memory_allocator_->data_page_allocator(),
initial_semispace_size_, max_semi_space_size_);
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
......
......@@ -100,7 +100,8 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
LAZY_INSTANCE_INITIALIZER;
CodeRange::CodeRange(Isolate* isolate, size_t requested)
CodeRange::CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
size_t requested)
: isolate_(isolate),
free_list_(0),
allocation_list_(0),
......@@ -135,8 +136,9 @@ CodeRange::CodeRange(Isolate* isolate, size_t requested)
VirtualMemory reservation;
void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
if (!AlignedAllocVirtualMemory(
requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()), hint,
&reservation)) {
page_allocator, requested,
Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()),
hint, &reservation)) {
V8::FatalProcessOutOfMemory(isolate,
"CodeRange setup: allocate virtual memory");
}
......@@ -313,7 +315,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
lowest_ever_allocated_(static_cast<Address>(-1ll)),
highest_ever_allocated_(kNullAddress),
unmapper_(isolate->heap(), this) {
code_range_ = new CodeRange(isolate_, code_range_size);
code_range_ = new CodeRange(isolate_, code_page_allocator_, code_range_size);
}
......@@ -526,28 +528,18 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
}
}
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
void* hint,
VirtualMemory* controller) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) {
return kNullAddress;
}
Address result = reservation.address();
size_ += reservation.size();
controller->TakeControl(&reservation);
return result;
}
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
Executability executable, void* hint, VirtualMemory* controller) {
v8::PageAllocator* page_allocator = this->page_allocator(executable);
DCHECK(commit_size <= reserve_size);
VirtualMemory reservation;
Address base =
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == kNullAddress) return kNullAddress;
if (!AlignedAllocVirtualMemory(page_allocator, reserve_size, alignment, hint,
&reservation)) {
return kNullAddress;
}
Address base = reservation.address();
size_ += reservation.size();
if (executable == EXECUTABLE) {
if (!CommitExecutableMemory(&reservation, base, commit_size,
......@@ -614,8 +606,10 @@ void MemoryChunk::SetReadAndExecutable() {
size_t protect_size = RoundUp(area_size(), page_size);
// TODO(ishell): use reservation_.SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(reservation_.page_allocator(), protect_start,
protect_size, PageAllocator::kReadExecute));
v8::PageAllocator* page_allocator =
heap()->memory_allocator()->code_page_allocator();
CHECK(SetPermissions(page_allocator, protect_start, protect_size,
PageAllocator::kReadExecute));
}
}
......@@ -635,8 +629,10 @@ void MemoryChunk::SetReadAndWritable() {
size_t unprotect_size = RoundUp(area_size(), page_size);
// TODO(ishell): use reservation_.SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(reservation_.page_allocator(), unprotect_start,
unprotect_size, PageAllocator::kReadWrite));
v8::PageAllocator* page_allocator =
heap()->memory_allocator()->code_page_allocator();
CHECK(SetPermissions(page_allocator, unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
}
}
......@@ -706,7 +702,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t area_size = RoundUp(area_end - area_start, page_size);
// TODO(ishell): use reservation->SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(reservation->page_allocator(), area_start, area_size,
v8::PageAllocator* page_allocator =
heap->memory_allocator()->page_allocator(executable);
CHECK(SetPermissions(page_allocator, area_start, area_size,
PageAllocator::kReadWriteExecute));
}
}
......@@ -1230,7 +1228,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (!CommitBlock(start, size)) {
return nullptr;
}
VirtualMemory reservation(start, size);
VirtualMemory reservation(data_page_allocator(), start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation);
size_ += size;
......@@ -2134,12 +2132,12 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// -----------------------------------------------------------------------------
// NewSpace implementation
NewSpace::NewSpace(Heap* heap, size_t initial_semispace_capacity,
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity)
: SpaceWithLinearArea(heap, NEW_SPACE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_() {
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
DCHECK(
base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
......@@ -3357,10 +3355,13 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
if (access == PageAllocator::kRead) {
page->MakeHeaderRelocatable();
}
// TODO(ishell): use p->reserved_memory()->SetPermissions() once it's always
// initialized.
CHECK(SetPermissions(page->reserved_memory()->page_allocator(),
page->address() + area_start_offset,
// TODO(ishell): use page->reserved_memory()->SetPermissions() once it's
// always initialized.
v8::PageAllocator* page_allocator =
page->IsFlagSet(Page::IS_EXECUTABLE)
? heap()->memory_allocator()->code_page_allocator()
: heap()->memory_allocator()->data_page_allocator();
CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
page->size() - area_start_offset, access));
}
}
......
......@@ -1086,7 +1086,8 @@ class MemoryChunkValidator {
// manages a range of virtual memory.
class CodeRange {
public:
CodeRange(Isolate* isolate, size_t requested_size);
CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
size_t requested_size);
~CodeRange();
bool valid() { return virtual_memory_.IsReserved(); }
......@@ -1425,8 +1426,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
void* hint, VirtualMemory* controller);
......@@ -1478,6 +1477,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Guaranteed to be a valid pointer.
v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
// Returns page allocator suitable for allocating pages with requested
// executability.
v8::PageAllocator* page_allocator(Executability executable) {
return executable == EXECUTABLE ? code_page_allocator_
: data_page_allocator_;
}
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
......@@ -2619,8 +2625,8 @@ class NewSpace : public SpaceWithLinearArea {
public:
typedef PageIterator iterator;
NewSpace(Heap* heap, size_t initial_semispace_capacity,
size_t max_semispace_capacity);
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity, size_t max_semispace_capacity);
~NewSpace() override { TearDown(); }
......
......@@ -31,15 +31,16 @@ StoreBuffer::StoreBuffer(Heap* heap)
}
void StoreBuffer::SetUp() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
const size_t requested_size = kStoreBufferSize * kStoreBuffers;
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers.
const size_t alignment =
std::max<size_t>(kStoreBufferSize, AllocatePageSize());
std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
&reservation)) {
if (!AlignedAllocVirtualMemory(page_allocator, requested_size, alignment,
hint, &reservation)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
......
......@@ -804,12 +804,14 @@ void WasmCodeManager::AssignRanges(Address start, Address end,
}
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
size = RoundUp(size, AllocatePageSize());
if (hint == nullptr) hint = GetRandomMmapAddr();
size = RoundUp(size, page_allocator->AllocatePageSize());
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
hint, ret)) {
if (!AlignedAllocVirtualMemory(page_allocator, size,
page_allocator->AllocatePageSize(), hint,
ret)) {
DCHECK(!ret->IsReserved());
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
......
......@@ -206,7 +206,7 @@ TEST(CodeRange) {
const size_t code_range_size = 32*MB;
CcTest::InitializeVM();
CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()),
code_range_size);
GetPlatformPageAllocator(), code_range_size);
size_t current_allocated = 0;
size_t total_allocated = 0;
std::vector<Block> blocks;
......
......@@ -133,7 +133,8 @@ TEST(Regress3540) {
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
size_t code_range_size =
kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
CodeRange* code_range = new CodeRange(isolate, code_range_size);
CodeRange* code_range = new CodeRange(
isolate, memory_allocator->code_page_allocator(), code_range_size);
Address address;
size_t size;
......@@ -168,16 +169,19 @@ TEST(MemoryChunk) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size;
for (int i = 0; i < 100; i++) {
initial_commit_area_size =
RoundUp(PseudorandomAreaSize(), CommitPageSize());
RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
// With CodeRange.
const size_t code_range_size = 32 * MB;
CodeRange* code_range = new CodeRange(isolate, code_range_size);
CodeRange* code_range =
new CodeRange(isolate, page_allocator, code_range_size);
VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space());
......@@ -246,7 +250,8 @@ TEST(NewSpace) {
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
NewSpace new_space(heap, CcTest::heap()->InitialSemiSpaceSize(),
NewSpace new_space(heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize());
CHECK(new_space.MaximumCapacity());
......
......@@ -143,7 +143,8 @@ TEST(AllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result;
bool success =
v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result);
v8::internal::AllocVirtualMemory(v8::internal::GetPlatformPageAllocator(),
GetHugeMemoryAmount(), nullptr, &result);
// On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved());
CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called);
......@@ -154,8 +155,8 @@ TEST(AlignedAllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory(
GetHugeMemoryAmount(), v8::internal::AllocatePageSize(), nullptr,
&result);
v8::internal::GetPlatformPageAllocator(), GetHugeMemoryAmount(),
v8::internal::AllocatePageSize(), nullptr, &result);
// On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved());
CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment