Commit 3d76e88f authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Explicitly specify page allocator instance for VirtualMemory.

The provided page allocator will serve all the memory requests done by the virtual
memory object.
This is a necessary cleanup before introducing BoundedPageAllocator.

Bug: v8:8096
Change-Id: I95477d67e5f532013322a991db3ee1a1f2e821e6
Reviewed-on: https://chromium-review.googlesource.com/1210122Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55703}
parent 23077519
...@@ -220,15 +220,10 @@ bool OnCriticalMemoryPressure(size_t length) { ...@@ -220,15 +220,10 @@ bool OnCriticalMemoryPressure(size_t length) {
return true; return true;
} }
VirtualMemory::VirtualMemory() VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
: page_allocator_(GetPlatformPageAllocator()), void* hint, size_t alignment)
address_(kNullAddress), : page_allocator_(page_allocator), address_(kNullAddress), size_(0) {
size_(0) {} DCHECK_NOT_NULL(page_allocator);
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: page_allocator_(GetPlatformPageAllocator()),
address_(kNullAddress),
size_(0) {
size_t page_size = page_allocator_->AllocatePageSize(); size_t page_size = page_allocator_->AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size); size_t alloc_size = RoundUp(size, page_size);
address_ = reinterpret_cast<Address>(AllocatePages( address_ = reinterpret_cast<Address>(AllocatePages(
...@@ -245,6 +240,7 @@ VirtualMemory::~VirtualMemory() { ...@@ -245,6 +240,7 @@ VirtualMemory::~VirtualMemory() {
} }
void VirtualMemory::Reset() { void VirtualMemory::Reset() {
page_allocator_ = nullptr;
address_ = kNullAddress; address_ = kNullAddress;
size_ = 0; size_ = 0;
} }
...@@ -277,14 +273,15 @@ void VirtualMemory::Free() { ...@@ -277,14 +273,15 @@ void VirtualMemory::Free() {
DCHECK(IsReserved()); DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live // Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region. // inside the allocated region.
v8::PageAllocator* page_allocator = page_allocator_;
Address address = address_; Address address = address_;
size_t size = size_; size_t size = size_;
CHECK(InVM(address, size)); CHECK(InVM(address, size));
Reset(); Reset();
// FreePages expects size to be aligned to allocation granularity. Trimming // FreePages expects size to be aligned to allocation granularity. Trimming
// may leave size at only commit granularity. Align it here. // may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator_, reinterpret_cast<void*>(address), CHECK(FreePages(page_allocator, reinterpret_cast<void*>(address),
RoundUp(size, page_allocator_->AllocatePageSize()))); RoundUp(size, page_allocator->AllocatePageSize())));
} }
void VirtualMemory::TakeControl(VirtualMemory* from) { void VirtualMemory::TakeControl(VirtualMemory* from) {
...@@ -295,8 +292,9 @@ void VirtualMemory::TakeControl(VirtualMemory* from) { ...@@ -295,8 +292,9 @@ void VirtualMemory::TakeControl(VirtualMemory* from) {
from->Reset(); from->Reset();
} }
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) { bool AllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
VirtualMemory vm(size, hint); void* hint, VirtualMemory* result) {
VirtualMemory vm(page_allocator, size, hint);
if (vm.IsReserved()) { if (vm.IsReserved()) {
result->TakeControl(&vm); result->TakeControl(&vm);
return true; return true;
...@@ -304,9 +302,10 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) { ...@@ -304,9 +302,10 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
return false; return false;
} }
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, bool AlignedAllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
size_t alignment, void* hint,
VirtualMemory* result) { VirtualMemory* result) {
VirtualMemory vm(size, hint, alignment); VirtualMemory vm(page_allocator, size, hint, alignment);
if (vm.IsReserved()) { if (vm.IsReserved()) {
result->TakeControl(&vm); result->TakeControl(&vm);
return true; return true;
......
...@@ -152,22 +152,24 @@ V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator, ...@@ -152,22 +152,24 @@ V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator,
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length); V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory. // Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory { class V8_EXPORT_PRIVATE VirtualMemory final {
public: public:
// Empty VirtualMemory object, controlling no reserved memory. // Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory(); VirtualMemory()
: page_allocator_(nullptr), address_(kNullAddress), size_(0) {}
// Reserves virtual memory containing an area of the given size that is // Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by // aligned per alignment. This may not be at the position returned by
// address(). // address().
VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize()); VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
size_t alignment = AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address // Construct a virtual memory by assigning it some already mapped address
// and size. // and size.
VirtualMemory(Address address, size_t size) VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
: page_allocator_(GetPlatformPageAllocator()), : page_allocator_(page_allocator), address_(address), size_(size) {
address_(address), DCHECK_NOT_NULL(page_allocator);
size_(size) {} }
// Releases the reserved memory, if any, controlled by this VirtualMemory // Releases the reserved memory, if any, controlled by this VirtualMemory
// object. // object.
...@@ -227,8 +229,10 @@ class V8_EXPORT_PRIVATE VirtualMemory { ...@@ -227,8 +229,10 @@ class V8_EXPORT_PRIVATE VirtualMemory {
size_t size_; // Size of the virtual memory. size_t size_; // Size of the virtual memory.
}; };
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result); bool AllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(v8::PageAllocator* page_allocator, size_t size,
size_t alignment, void* hint,
VirtualMemory* result); VirtualMemory* result);
} // namespace internal } // namespace internal
......
...@@ -4729,7 +4729,8 @@ void Heap::SetUp() { ...@@ -4729,7 +4729,8 @@ void Heap::SetUp() {
space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this); space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
space_[NEW_SPACE] = new_space_ = space_[NEW_SPACE] = new_space_ =
new NewSpace(this, initial_semispace_size_, max_semi_space_size_); new NewSpace(this, memory_allocator_->data_page_allocator(),
initial_semispace_size_, max_semi_space_size_);
space_[OLD_SPACE] = old_space_ = new OldSpace(this); space_[OLD_SPACE] = old_space_ = new OldSpace(this);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this); space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this); space_[MAP_SPACE] = map_space_ = new MapSpace(this);
......
...@@ -100,7 +100,8 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() { ...@@ -100,7 +100,8 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint = static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
LAZY_INSTANCE_INITIALIZER; LAZY_INSTANCE_INITIALIZER;
CodeRange::CodeRange(Isolate* isolate, size_t requested) CodeRange::CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
size_t requested)
: isolate_(isolate), : isolate_(isolate),
free_list_(0), free_list_(0),
allocation_list_(0), allocation_list_(0),
...@@ -135,8 +136,9 @@ CodeRange::CodeRange(Isolate* isolate, size_t requested) ...@@ -135,8 +136,9 @@ CodeRange::CodeRange(Isolate* isolate, size_t requested)
VirtualMemory reservation; VirtualMemory reservation;
void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested); void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
if (!AlignedAllocVirtualMemory( if (!AlignedAllocVirtualMemory(
requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()), hint, page_allocator, requested,
&reservation)) { Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()),
hint, &reservation)) {
V8::FatalProcessOutOfMemory(isolate, V8::FatalProcessOutOfMemory(isolate,
"CodeRange setup: allocate virtual memory"); "CodeRange setup: allocate virtual memory");
} }
...@@ -313,7 +315,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity, ...@@ -313,7 +315,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
lowest_ever_allocated_(static_cast<Address>(-1ll)), lowest_ever_allocated_(static_cast<Address>(-1ll)),
highest_ever_allocated_(kNullAddress), highest_ever_allocated_(kNullAddress),
unmapper_(isolate->heap(), this) { unmapper_(isolate->heap(), this) {
code_range_ = new CodeRange(isolate_, code_range_size); code_range_ = new CodeRange(isolate_, code_page_allocator_, code_range_size);
} }
...@@ -526,28 +528,18 @@ void MemoryAllocator::FreeMemory(Address base, size_t size, ...@@ -526,28 +528,18 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
} }
} }
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
void* hint,
VirtualMemory* controller) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) {
return kNullAddress;
}
Address result = reservation.address();
size_ += reservation.size();
controller->TakeControl(&reservation);
return result;
}
Address MemoryAllocator::AllocateAlignedMemory( Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment, size_t reserve_size, size_t commit_size, size_t alignment,
Executability executable, void* hint, VirtualMemory* controller) { Executability executable, void* hint, VirtualMemory* controller) {
v8::PageAllocator* page_allocator = this->page_allocator(executable);
DCHECK(commit_size <= reserve_size); DCHECK(commit_size <= reserve_size);
VirtualMemory reservation; VirtualMemory reservation;
Address base = if (!AlignedAllocVirtualMemory(page_allocator, reserve_size, alignment, hint,
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation); &reservation)) {
if (base == kNullAddress) return kNullAddress; return kNullAddress;
}
Address base = reservation.address();
size_ += reservation.size();
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
if (!CommitExecutableMemory(&reservation, base, commit_size, if (!CommitExecutableMemory(&reservation, base, commit_size,
...@@ -614,8 +606,10 @@ void MemoryChunk::SetReadAndExecutable() { ...@@ -614,8 +606,10 @@ void MemoryChunk::SetReadAndExecutable() {
size_t protect_size = RoundUp(area_size(), page_size); size_t protect_size = RoundUp(area_size(), page_size);
// TODO(ishell): use reservation_.SetPermissions() once it's always // TODO(ishell): use reservation_.SetPermissions() once it's always
// initialized. // initialized.
CHECK(SetPermissions(reservation_.page_allocator(), protect_start, v8::PageAllocator* page_allocator =
protect_size, PageAllocator::kReadExecute)); heap()->memory_allocator()->code_page_allocator();
CHECK(SetPermissions(page_allocator, protect_start, protect_size,
PageAllocator::kReadExecute));
} }
} }
...@@ -635,8 +629,10 @@ void MemoryChunk::SetReadAndWritable() { ...@@ -635,8 +629,10 @@ void MemoryChunk::SetReadAndWritable() {
size_t unprotect_size = RoundUp(area_size(), page_size); size_t unprotect_size = RoundUp(area_size(), page_size);
// TODO(ishell): use reservation_.SetPermissions() once it's always // TODO(ishell): use reservation_.SetPermissions() once it's always
// initialized. // initialized.
CHECK(SetPermissions(reservation_.page_allocator(), unprotect_start, v8::PageAllocator* page_allocator =
unprotect_size, PageAllocator::kReadWrite)); heap()->memory_allocator()->code_page_allocator();
CHECK(SetPermissions(page_allocator, unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
} }
} }
...@@ -706,7 +702,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -706,7 +702,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t area_size = RoundUp(area_end - area_start, page_size); size_t area_size = RoundUp(area_end - area_start, page_size);
// TODO(ishell): use reservation->SetPermissions() once it's always // TODO(ishell): use reservation->SetPermissions() once it's always
// initialized. // initialized.
CHECK(SetPermissions(reservation->page_allocator(), area_start, area_size, v8::PageAllocator* page_allocator =
heap->memory_allocator()->page_allocator(executable);
CHECK(SetPermissions(page_allocator, area_start, area_size,
PageAllocator::kReadWriteExecute)); PageAllocator::kReadWriteExecute));
} }
} }
...@@ -1230,7 +1228,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { ...@@ -1230,7 +1228,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (!CommitBlock(start, size)) { if (!CommitBlock(start, size)) {
return nullptr; return nullptr;
} }
VirtualMemory reservation(start, size); VirtualMemory reservation(data_page_allocator(), start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation); NOT_EXECUTABLE, owner, &reservation);
size_ += size; size_ += size;
...@@ -2134,12 +2132,12 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() { ...@@ -2134,12 +2132,12 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// NewSpace implementation // NewSpace implementation
NewSpace::NewSpace(Heap* heap, size_t initial_semispace_capacity, NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity) size_t max_semispace_capacity)
: SpaceWithLinearArea(heap, NEW_SPACE), : SpaceWithLinearArea(heap, NEW_SPACE),
to_space_(heap, kToSpace), to_space_(heap, kToSpace),
from_space_(heap, kFromSpace), from_space_(heap, kFromSpace) {
reservation_() {
DCHECK(initial_semispace_capacity <= max_semispace_capacity); DCHECK(initial_semispace_capacity <= max_semispace_capacity);
DCHECK( DCHECK(
base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity))); base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
...@@ -3357,10 +3355,13 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) { ...@@ -3357,10 +3355,13 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
if (access == PageAllocator::kRead) { if (access == PageAllocator::kRead) {
page->MakeHeaderRelocatable(); page->MakeHeaderRelocatable();
} }
// TODO(ishell): use p->reserved_memory()->SetPermissions() once it's always // TODO(ishell): use page->reserved_memory()->SetPermissions() once it's
// initialized. // always initialized.
CHECK(SetPermissions(page->reserved_memory()->page_allocator(), v8::PageAllocator* page_allocator =
page->address() + area_start_offset, page->IsFlagSet(Page::IS_EXECUTABLE)
? heap()->memory_allocator()->code_page_allocator()
: heap()->memory_allocator()->data_page_allocator();
CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
page->size() - area_start_offset, access)); page->size() - area_start_offset, access));
} }
} }
......
...@@ -1086,7 +1086,8 @@ class MemoryChunkValidator { ...@@ -1086,7 +1086,8 @@ class MemoryChunkValidator {
// manages a range of virtual memory. // manages a range of virtual memory.
class CodeRange { class CodeRange {
public: public:
CodeRange(Isolate* isolate, size_t requested_size); CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
size_t requested_size);
~CodeRange(); ~CodeRange();
bool valid() { return virtual_memory_.IsReserved(); } bool valid() { return virtual_memory_.IsReserved(); }
...@@ -1425,8 +1426,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1425,8 +1426,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size, MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space); Executability executable, Space* space);
Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable, size_t alignment, Executability executable,
void* hint, VirtualMemory* controller); void* hint, VirtualMemory* controller);
...@@ -1478,6 +1477,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1478,6 +1477,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Guaranteed to be a valid pointer. // Guaranteed to be a valid pointer.
v8::PageAllocator* code_page_allocator() { return code_page_allocator_; } v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
// Returns page allocator suitable for allocating pages with requested
// executability.
v8::PageAllocator* page_allocator(Executability executable) {
return executable == EXECUTABLE ? code_page_allocator_
: data_page_allocator_;
}
CodeRange* code_range() { return code_range_; } CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; } Unmapper* unmapper() { return &unmapper_; }
...@@ -2619,8 +2625,8 @@ class NewSpace : public SpaceWithLinearArea { ...@@ -2619,8 +2625,8 @@ class NewSpace : public SpaceWithLinearArea {
public: public:
typedef PageIterator iterator; typedef PageIterator iterator;
NewSpace(Heap* heap, size_t initial_semispace_capacity, NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t max_semispace_capacity); size_t initial_semispace_capacity, size_t max_semispace_capacity);
~NewSpace() override { TearDown(); } ~NewSpace() override { TearDown(); }
......
...@@ -31,15 +31,16 @@ StoreBuffer::StoreBuffer(Heap* heap) ...@@ -31,15 +31,16 @@ StoreBuffer::StoreBuffer(Heap* heap)
} }
void StoreBuffer::SetUp() { void StoreBuffer::SetUp() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
const size_t requested_size = kStoreBufferSize * kStoreBuffers; const size_t requested_size = kStoreBufferSize * kStoreBuffers;
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers. // use a bit test to detect the ends of the buffers.
const size_t alignment = const size_t alignment =
std::max<size_t>(kStoreBufferSize, AllocatePageSize()); std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment); void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation; VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(requested_size, alignment, hint, if (!AlignedAllocVirtualMemory(page_allocator, requested_size, alignment,
&reservation)) { hint, &reservation)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
} }
......
...@@ -804,12 +804,14 @@ void WasmCodeManager::AssignRanges(Address start, Address end, ...@@ -804,12 +804,14 @@ void WasmCodeManager::AssignRanges(Address start, Address end,
} }
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) { void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0); DCHECK_GT(size, 0);
size = RoundUp(size, AllocatePageSize()); size = RoundUp(size, page_allocator->AllocatePageSize());
if (hint == nullptr) hint = GetRandomMmapAddr(); if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()), if (!AlignedAllocVirtualMemory(page_allocator, size,
hint, ret)) { page_allocator->AllocatePageSize(), hint,
ret)) {
DCHECK(!ret->IsReserved()); DCHECK(!ret->IsReserved());
} }
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
......
...@@ -206,7 +206,7 @@ TEST(CodeRange) { ...@@ -206,7 +206,7 @@ TEST(CodeRange) {
const size_t code_range_size = 32*MB; const size_t code_range_size = 32*MB;
CcTest::InitializeVM(); CcTest::InitializeVM();
CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()), CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()),
code_range_size); GetPlatformPageAllocator(), code_range_size);
size_t current_allocated = 0; size_t current_allocated = 0;
size_t total_allocated = 0; size_t total_allocated = 0;
std::vector<Block> blocks; std::vector<Block> blocks;
......
...@@ -133,7 +133,8 @@ TEST(Regress3540) { ...@@ -133,7 +133,8 @@ TEST(Regress3540) {
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
size_t code_range_size = size_t code_range_size =
kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize; kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
CodeRange* code_range = new CodeRange(isolate, code_range_size); CodeRange* code_range = new CodeRange(
isolate, memory_allocator->code_page_allocator(), code_range_size);
Address address; Address address;
size_t size; size_t size;
...@@ -168,16 +169,19 @@ TEST(MemoryChunk) { ...@@ -168,16 +169,19 @@ TEST(MemoryChunk) {
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t reserve_area_size = 1 * MB; size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size; size_t initial_commit_area_size;
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
initial_commit_area_size = initial_commit_area_size =
RoundUp(PseudorandomAreaSize(), CommitPageSize()); RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
// With CodeRange. // With CodeRange.
const size_t code_range_size = 32 * MB; const size_t code_range_size = 32 * MB;
CodeRange* code_range = new CodeRange(isolate, code_range_size); CodeRange* code_range =
new CodeRange(isolate, page_allocator, code_range_size);
VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size, VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space()); initial_commit_area_size, EXECUTABLE, heap->code_space());
...@@ -246,7 +250,8 @@ TEST(NewSpace) { ...@@ -246,7 +250,8 @@ TEST(NewSpace) {
new MemoryAllocator(isolate, heap->MaxReserved(), 0); new MemoryAllocator(isolate, heap->MaxReserved(), 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator); TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
NewSpace new_space(heap, CcTest::heap()->InitialSemiSpaceSize(), NewSpace new_space(heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize()); CcTest::heap()->InitialSemiSpaceSize());
CHECK(new_space.MaximumCapacity()); CHECK(new_space.MaximumCapacity());
......
...@@ -143,7 +143,8 @@ TEST(AllocVirtualMemoryOOM) { ...@@ -143,7 +143,8 @@ TEST(AllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called); CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result; v8::internal::VirtualMemory result;
bool success = bool success =
v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result); v8::internal::AllocVirtualMemory(v8::internal::GetPlatformPageAllocator(),
GetHugeMemoryAmount(), nullptr, &result);
// On a few systems, allocation somehow succeeds. // On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved()); CHECK_IMPLIES(success, result.IsReserved());
CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called); CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called);
...@@ -154,8 +155,8 @@ TEST(AlignedAllocVirtualMemoryOOM) { ...@@ -154,8 +155,8 @@ TEST(AlignedAllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called); CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result; v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory( bool success = v8::internal::AlignedAllocVirtualMemory(
GetHugeMemoryAmount(), v8::internal::AllocatePageSize(), nullptr, v8::internal::GetPlatformPageAllocator(), GetHugeMemoryAmount(),
&result); v8::internal::AllocatePageSize(), nullptr, &result);
// On a few systems, allocation somehow succeeds. // On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved()); CHECK_IMPLIES(success, result.IsReserved());
CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called); CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment