Commit 16816e53 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Introduce BoundedPageAllocator and use it instead of CodeRange.

Bug: v8:8096
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: If44c1a9a76c517fe329485d385f445b2be9f5ec2
Reviewed-on: https://chromium-review.googlesource.com/1213186Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55744}
parent 2e661ba1
...@@ -2991,6 +2991,8 @@ v8_component("v8_libbase") { ...@@ -2991,6 +2991,8 @@ v8_component("v8_libbase") {
"src/base/base-export.h", "src/base/base-export.h",
"src/base/bits.cc", "src/base/bits.cc",
"src/base/bits.h", "src/base/bits.h",
"src/base/bounded-page-allocator.cc",
"src/base/bounded-page-allocator.h",
"src/base/build_config.h", "src/base/build_config.h",
"src/base/compiler-specific.h", "src/base/compiler-specific.h",
"src/base/cpu.cc", "src/base/cpu.cc",
......
...@@ -161,7 +161,9 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address, ...@@ -161,7 +161,9 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
if (!OnCriticalMemoryPressure(request_size)) break; if (!OnCriticalMemoryPressure(request_size)) break;
} }
#if defined(LEAK_SANITIZER) #if defined(LEAK_SANITIZER)
if (result != nullptr) { if (result != nullptr && page_allocator == GetPlatformPageAllocator()) {
// Notify LSAN only about the plaform memory allocations or we will
// "allocate"/"deallocate" certain parts of memory twice.
__lsan_register_root_region(result, size); __lsan_register_root_region(result, size);
} }
#endif #endif
...@@ -174,7 +176,9 @@ bool FreePages(v8::PageAllocator* page_allocator, void* address, ...@@ -174,7 +176,9 @@ bool FreePages(v8::PageAllocator* page_allocator, void* address,
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1)); DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
bool result = page_allocator->FreePages(address, size); bool result = page_allocator->FreePages(address, size);
#if defined(LEAK_SANITIZER) #if defined(LEAK_SANITIZER)
if (result) { if (result && page_allocator == GetPlatformPageAllocator()) {
// Notify LSAN only about the plaform memory allocations or we will
// "allocate"/"deallocate" certain parts of memory twice.
__lsan_unregister_root_region(address, size); __lsan_unregister_root_region(address, size);
} }
#endif #endif
...@@ -187,7 +191,9 @@ bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size, ...@@ -187,7 +191,9 @@ bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
DCHECK_LT(new_size, size); DCHECK_LT(new_size, size);
bool result = page_allocator->ReleasePages(address, size, new_size); bool result = page_allocator->ReleasePages(address, size, new_size);
#if defined(LEAK_SANITIZER) #if defined(LEAK_SANITIZER)
if (result) { if (result && page_allocator == GetPlatformPageAllocator()) {
// Notify LSAN only about the plaform memory allocations or we will
// "allocate"/"deallocate" certain parts of memory twice.
__lsan_unregister_root_region(address, size); __lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, new_size); __lsan_register_root_region(address, new_size);
} }
...@@ -225,11 +231,12 @@ VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size, ...@@ -225,11 +231,12 @@ VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
: page_allocator_(page_allocator), address_(kNullAddress), size_(0) { : page_allocator_(page_allocator), address_(kNullAddress), size_(0) {
DCHECK_NOT_NULL(page_allocator); DCHECK_NOT_NULL(page_allocator);
size_t page_size = page_allocator_->AllocatePageSize(); size_t page_size = page_allocator_->AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size); alignment = RoundUp(alignment, page_size);
size = RoundUp(size, page_size);
address_ = reinterpret_cast<Address>(AllocatePages( address_ = reinterpret_cast<Address>(AllocatePages(
page_allocator_, hint, alloc_size, alignment, PageAllocator::kNoAccess)); page_allocator_, hint, size, alignment, PageAllocator::kNoAccess));
if (address_ != kNullAddress) { if (address_ != kNullAddress) {
size_ = alloc_size; size_ = size;
} }
} }
...@@ -260,12 +267,13 @@ size_t VirtualMemory::Release(Address free_start) { ...@@ -260,12 +267,13 @@ size_t VirtualMemory::Release(Address free_start) {
// Notice: Order is important here. The VirtualMemory object might live // Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region. // inside the allocated region.
const size_t free_size = size_ - (free_start - address_); const size_t free_size = size_ - (free_start - address_);
size_t old_size = size_;
CHECK(InVM(free_start, free_size)); CHECK(InVM(free_start, free_size));
DCHECK_LT(address_, free_start); DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, address_ + size_); DCHECK_LT(free_start, address_ + size_);
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(address_), size_,
size_ - free_size));
size_ -= free_size; size_ -= free_size;
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(address_),
old_size, size_));
return free_size; return free_size;
} }
......
...@@ -158,10 +158,11 @@ class V8_EXPORT_PRIVATE VirtualMemory final { ...@@ -158,10 +158,11 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
VirtualMemory() = default; VirtualMemory() = default;
// Reserves virtual memory containing an area of the given size that is // Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
// address(). // size.
// This may not be at the position returned by address().
VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint, VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
size_t alignment = AllocatePageSize()); size_t alignment = 1);
// Construct a virtual memory by assigning it some already mapped address // Construct a virtual memory by assigning it some already mapped address
// and size. // and size.
......
...@@ -8703,11 +8703,10 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) { ...@@ -8703,11 +8703,10 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) { void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (isolate->heap()->memory_allocator()->code_range()->valid()) { i::MemoryAllocator* memory_allocator = isolate->heap()->memory_allocator();
*start = reinterpret_cast<void*>( if (memory_allocator->code_range_valid()) {
isolate->heap()->memory_allocator()->code_range()->start()); *start = reinterpret_cast<void*>(memory_allocator->code_range_start());
*length_in_bytes = *length_in_bytes = memory_allocator->code_range_size();
isolate->heap()->memory_allocator()->code_range()->size();
} else { } else {
*start = nullptr; *start = nullptr;
*length_in_bytes = 0; *length_in_bytes = 0;
......
...@@ -63,7 +63,7 @@ AssemblerOptions AssemblerOptions::Default( ...@@ -63,7 +63,7 @@ AssemblerOptions AssemblerOptions::Default(
options.inline_offheap_trampolines = !serializer; options.inline_offheap_trampolines = !serializer;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
options.code_range_start = options.code_range_start =
isolate->heap()->memory_allocator()->code_range()->start(); isolate->heap()->memory_allocator()->code_range_start();
#endif #endif
return options; return options;
} }
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/bounded-page-allocator.h"
namespace v8 {
namespace base {
BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
Address start, size_t size,
size_t allocate_page_size)
: allocate_page_size_(allocate_page_size),
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
region_allocator_(start, size, allocate_page_size_) {
CHECK_NOT_NULL(page_allocator);
CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
}
void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
size_t alignment,
PageAllocator::Permission access) {
LockGuard<Mutex> guard(&mutex_);
CHECK(IsAligned(alignment, region_allocator_.page_size()));
// Region allocator does not support alignments bigger than it's own
// allocation alignment.
CHECK_LE(alignment, allocate_page_size_);
// TODO(ishell): Consider using randomized version here.
Address address = region_allocator_.AllocateRegion(size);
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
access));
return reinterpret_cast<void*>(address);
}
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
LockGuard<Mutex> guard(&mutex_);
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
return true;
}
bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
size_t new_size) {
Address address = reinterpret_cast<Address>(raw_address);
#ifdef DEBUG
{
CHECK_LT(new_size, size);
CHECK(IsAligned(size - new_size, commit_page_size_));
// There must be an allocated region at given |address| of a size not
// smaller than |size|.
LockGuard<Mutex> guard(&mutex_);
size_t used_region_size = region_allocator_.CheckRegion(address);
CHECK_LE(size, used_region_size);
}
#endif
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess);
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
PageAllocator::Permission access) {
DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
DCHECK(IsAligned(size, commit_page_size_));
DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
return page_allocator_->SetPermissions(address, size, access);
}
} // namespace base
} // namespace v8
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
#define V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
#include "include/v8-platform.h"
#include "src/base/platform/mutex.h"
#include "src/base/region-allocator.h"
namespace v8 {
namespace base {
// This is a v8::PageAllocator implementation that allocates pages within the
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
// The main application of bounded page allocator are
// - V8 heap pointer compression which requires the whole V8 heap to be
// allocated within a contiguous range of virtual address space,
// - executable page allocation, which allows to use PC-relative 32-bit code
// displacement on certain 64-bit platforms.
// Bounded page allocator uses other page allocator instance for doing actual
// page allocations.
// The implementation is thread-safe.
class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
public:
typedef uintptr_t Address;
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size);
~BoundedPageAllocator() override = default;
Address begin() const { return region_allocator_.begin(); }
size_t size() const { return region_allocator_.size(); }
// Returns true if given address is in the range controlled by the bounded
// page allocator instance.
bool contains(Address address) const {
return region_allocator_.contains(address);
}
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override {
page_allocator_->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() override {
return page_allocator_->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
private:
v8::base::Mutex mutex_;
const size_t allocate_page_size_;
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
};
} // namespace base
} // namespace v8
#endif // V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
...@@ -61,6 +61,10 @@ class V8_BASE_EXPORT RegionAllocator final { ...@@ -61,6 +61,10 @@ class V8_BASE_EXPORT RegionAllocator final {
return whole_region_.contains(address); return whole_region_.contains(address);
} }
bool contains(Address address, size_t size) const {
return whole_region_.contains(address, size);
}
// Total size of not yet aquired regions. // Total size of not yet aquired regions.
size_t free_size() const { return free_size_; } size_t free_size() const { return free_size_; }
...@@ -84,6 +88,12 @@ class V8_BASE_EXPORT RegionAllocator final { ...@@ -84,6 +88,12 @@ class V8_BASE_EXPORT RegionAllocator final {
return (address - begin()) < size(); return (address - begin()) < size();
} }
bool contains(Address address, size_t size) const {
STATIC_ASSERT(std::is_unsigned<Address>::value);
Address offset = address - begin();
return (offset < size_) && (offset <= size_ - size);
}
bool is_used() const { return is_used_; } bool is_used() const { return is_used_; }
void set_is_used(bool used) { is_used_ = used; } void set_is_used(bool used) { is_used_ = used; }
...@@ -155,6 +165,7 @@ class V8_BASE_EXPORT RegionAllocator final { ...@@ -155,6 +165,7 @@ class V8_BASE_EXPORT RegionAllocator final {
FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom); FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom);
FRIEND_TEST(RegionAllocatorTest, Fragmentation); FRIEND_TEST(RegionAllocatorTest, Fragmentation);
FRIEND_TEST(RegionAllocatorTest, FindRegion); FRIEND_TEST(RegionAllocatorTest, FindRegion);
FRIEND_TEST(RegionAllocatorTest, Contains);
DISALLOW_COPY_AND_ASSIGN(RegionAllocator); DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
}; };
......
...@@ -49,10 +49,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, ...@@ -49,10 +49,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options; return options;
} }
CodeRange* code_range = isolate->heap()->memory_allocator()->code_range();
bool pc_relative_calls_fit_in_code_range = bool pc_relative_calls_fit_in_code_range =
code_range->valid() && isolate->heap()->memory_allocator()->code_range_valid() &&
code_range->size() <= kMaxPCRelativeCodeRangeInMB * MB; isolate->heap()->memory_allocator()->code_range_size() <=
kMaxPCRelativeCodeRangeInMB * MB;
options.isolate_independent_code = true; options.isolate_independent_code = true;
options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range; options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range;
......
...@@ -63,8 +63,8 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size, ...@@ -63,8 +63,8 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
bool is_turbofanned, int stack_slots, bool is_turbofanned, int stack_slots,
int safepoint_table_offset, int handler_table_offset) { int safepoint_table_offset, int handler_table_offset) {
DCHECK(IsAligned(code->address(), kCodeAlignment)); DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK(!heap->memory_allocator()->code_range()->valid() || DCHECK(!heap->memory_allocator()->code_range_valid() ||
heap->memory_allocator()->code_range()->contains(code->address()) || heap->memory_allocator()->code_range_contains(code->address()) ||
object_size <= heap->code_space()->AreaSize()); object_size <= heap->code_space()->AreaSize());
bool has_unwinding_info = desc.unwinding_info != nullptr; bool has_unwinding_info = desc.unwinding_info != nullptr;
...@@ -2663,8 +2663,8 @@ Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) { ...@@ -2663,8 +2663,8 @@ Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
heap->ZapCodeObject(result->address(), size); heap->ZapCodeObject(result->address(), size);
result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER); result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
DCHECK(IsAligned(result->address(), kCodeAlignment)); DCHECK(IsAligned(result->address(), kCodeAlignment));
DCHECK(!heap->memory_allocator()->code_range()->valid() || DCHECK(!heap->memory_allocator()->code_range_valid() ||
heap->memory_allocator()->code_range()->contains(result->address()) || heap->memory_allocator()->code_range_contains(result->address()) ||
static_cast<int>(size) <= heap->code_space()->AreaSize()); static_cast<int>(size) <= heap->code_space()->AreaSize());
return handle(Code::cast(result), isolate()); return handle(Code::cast(result), isolate());
} }
...@@ -2727,9 +2727,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) { ...@@ -2727,9 +2727,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
if (FLAG_verify_heap) new_code->ObjectVerify(isolate()); if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
#endif #endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment)); DCHECK(IsAligned(new_code->address(), kCodeAlignment));
DCHECK( DCHECK(!heap->memory_allocator()->code_range_valid() ||
!heap->memory_allocator()->code_range()->valid() || heap->memory_allocator()->code_range_contains(new_code->address()) ||
heap->memory_allocator()->code_range()->contains(new_code->address()) ||
obj_size <= heap->code_space()->AreaSize()); obj_size <= heap->code_space()->AreaSize());
return new_code; return new_code;
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_ #ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_ #define V8_HEAP_SPACES_INL_H_
#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h" #include "src/base/v8-fallthrough.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
...@@ -501,6 +502,28 @@ bool LocalAllocationBuffer::TryFreeLast(HeapObject* object, int object_size) { ...@@ -501,6 +502,28 @@ bool LocalAllocationBuffer::TryFreeLast(HeapObject* object, int object_size) {
return false; return false;
} }
// -----------------------------------------------------------------------------
// MemoryAllocator
bool MemoryAllocator::code_range_valid() const {
return code_page_allocator_instance_.get() != nullptr;
}
Address MemoryAllocator::code_range_start() const {
DCHECK(code_range_valid());
return code_page_allocator_instance_->begin();
}
size_t MemoryAllocator::code_range_size() const {
DCHECK(code_range_valid());
return code_page_allocator_instance_->size();
}
bool MemoryAllocator::code_range_contains(Address address) const {
DCHECK(code_range_valid());
return code_page_allocator_instance_->contains(address);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
This diff is collapsed.
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "src/allocation.h" #include "src/allocation.h"
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/iterator.h" #include "src/base/iterator.h"
#include "src/base/list.h" #include "src/base/list.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
...@@ -32,7 +33,7 @@ namespace internal { ...@@ -32,7 +33,7 @@ namespace internal {
namespace heap { namespace heap {
class HeapTester; class HeapTester;
class TestCodeRangeScope; class TestCodePageAllocatorScope;
} // namespace heap } // namespace heap
class AllocationObserver; class AllocationObserver;
...@@ -1077,95 +1078,6 @@ class MemoryChunkValidator { ...@@ -1077,95 +1078,6 @@ class MemoryChunkValidator {
}; };
// ----------------------------------------------------------------------------
// All heap objects containing executable code (code objects) must be allocated
// from a 2 GB range of memory, so that they can call each other using 32-bit
// displacements. This happens automatically on 32-bit platforms, where 32-bit
// displacements cover the entire 4GB virtual address space. On 64-bit
// platforms, we support this using the CodeRange object, which reserves and
// manages a range of virtual memory.
class CodeRange {
public:
CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
size_t requested_size);
~CodeRange();
bool valid() { return virtual_memory_.IsReserved(); }
Address start() {
DCHECK(valid());
return virtual_memory_.address();
}
size_t size() {
DCHECK(valid());
return virtual_memory_.size();
}
bool contains(Address address) {
if (!valid()) return false;
Address start = virtual_memory_.address();
return start <= address && address < start + virtual_memory_.size();
}
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated);
void FreeRawMemory(Address buf, size_t length);
private:
class FreeBlock {
public:
FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg, size_t size_arg)
: start(start_arg), size(size_arg) {
DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
DCHECK(size >= static_cast<size_t>(Page::kPageSize));
}
FreeBlock(void* start_arg, size_t size_arg)
: start(reinterpret_cast<Address>(start_arg)), size(size_arg) {
DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
DCHECK(size >= static_cast<size_t>(Page::kPageSize));
}
Address start;
size_t size;
};
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, returns false.
bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static bool CompareFreeBlockAddress(const FreeBlock& left,
const FreeBlock& right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
VirtualMemory virtual_memory_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread.
base::Mutex code_range_mutex_;
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
std::vector<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
std::vector<FreeBlock> allocation_list_;
size_t current_allocation_block_index_;
size_t requested_code_range_size_;
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
// The process-wide singleton that keeps track of code range regions with the // The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory // intention to reuse free code range regions as a workaround for CFG memory
// leaks (see crbug.com/870054). // leaks (see crbug.com/870054).
...@@ -1484,10 +1396,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1484,10 +1396,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
: data_page_allocator_; : data_page_allocator_;
} }
CodeRange* code_range() { return code_range_; } V8_INLINE bool code_range_valid() const;
V8_INLINE Address code_range_start() const;
V8_INLINE size_t code_range_size() const;
V8_INLINE bool code_range_contains(Address address) const;
Unmapper* unmapper() { return &unmapper_; } Unmapper* unmapper() { return &unmapper_; }
private: private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
size_t requested);
// PreFree logically frees the object, i.e., it takes care of the size // PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback. // bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk); void PreFreeMemory(MemoryChunk* chunk);
...@@ -1536,10 +1455,30 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1536,10 +1455,30 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
Isolate* isolate_; Isolate* isolate_;
// This object controls virtual space reserved for V8 heap instance.
// Depending on the configuration it may contain the following:
// - no reservation (on 32-bit architectures)
// - code range reservation used by bounded code page allocator (on 64-bit
// architectures without pointers compression in V8 heap)
// - data + code range reservation (on 64-bit architectures with pointers
// compression in V8 heap)
VirtualMemory heap_reservation_;
// Page allocator used for allocating data pages. Depending on the
// configuration it may be a page allocator instance provided by v8::Platform
// or a BoundedPageAllocator (when pointer compression is enabled).
v8::PageAllocator* data_page_allocator_; v8::PageAllocator* data_page_allocator_;
// Page allocator used for allocating code pages. Depending on the
// configuration it may be a page allocator instance provided by v8::Platform
// or a BoundedPageAllocator (when pointer compression is enabled or
// on those 64-bit architectures where pc-relative 32-bit displacement
// can be used for call and jump instructions).
v8::PageAllocator* code_page_allocator_; v8::PageAllocator* code_page_allocator_;
CodeRange* code_range_; // This unique pointer owns the instance of bounded code allocator
// that controls executable pages allocation.
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
// Maximum space size in bytes. // Maximum space size in bytes.
size_t capacity_; size_t capacity_;
...@@ -1563,7 +1502,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1563,7 +1502,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Data structure to remember allocated executable memory chunks. // Data structure to remember allocated executable memory chunks.
std::unordered_set<MemoryChunk*> executable_memory_; std::unordered_set<MemoryChunk*> executable_memory_;
friend class heap::TestCodeRangeScope; friend class heap::TestCodePageAllocatorScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
}; };
......
...@@ -205,8 +205,8 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs, ...@@ -205,8 +205,8 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// Check whether we interrupted setup/teardown of a stack frame in JS code. // Check whether we interrupted setup/teardown of a stack frame in JS code.
// Avoid this check for C++ code, as that would trigger false positives. // Avoid this check for C++ code, as that would trigger false positives.
if (regs->pc && if (regs->pc && isolate->heap()->memory_allocator()->code_range_valid() &&
isolate->heap()->memory_allocator()->code_range()->contains( isolate->heap()->memory_allocator()->code_range_contains(
reinterpret_cast<i::Address>(regs->pc)) && reinterpret_cast<i::Address>(regs->pc)) &&
IsNoFrameRegion(reinterpret_cast<i::Address>(regs->pc))) { IsNoFrameRegion(reinterpret_cast<i::Address>(regs->pc))) {
// The frame is not setup, so it'd be hard to iterate the stack. Bailout. // The frame is not setup, so it'd be hard to iterate the stack. Bailout.
......
...@@ -172,80 +172,6 @@ TEST(StressJS) { ...@@ -172,80 +172,6 @@ TEST(StressJS) {
env->Exit(); env->Exit();
} }
// CodeRange test.
// Tests memory management in a CodeRange by allocating and freeing blocks,
// using a pseudorandom generator to choose block sizes geometrically
// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
// Ensure that the freed chunks are collected and reused by allocating (in
// total) more than the size of the CodeRange.
// This pseudorandom generator does not need to be particularly good.
// Use the lower half of the V8::Random() generator.
unsigned int Pseudorandom() {
static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFF) + (lo >> 16); // Provably not 0.
return lo & 0xFFFF;
}
namespace {
// Plain old data class. Represents a block of allocated memory.
class Block {
public:
Block(Address base_arg, int size_arg)
: base(base_arg), size(size_arg) {}
Address base;
int size;
};
} // namespace
TEST(CodeRange) {
const size_t code_range_size = 32*MB;
CcTest::InitializeVM();
CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()),
GetPlatformPageAllocator(), code_range_size);
size_t current_allocated = 0;
size_t total_allocated = 0;
std::vector<Block> blocks;
blocks.reserve(1000);
while (total_allocated < 5 * code_range_size) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than
// kMaxRegularHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxRegularHeapObjectSize.
size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
requested = RoundUp(requested, MemoryAllocator::GetCommitPageSize());
size_t allocated = 0;
// The request size has to be at least 2 code guard pages larger than the
// actual commit size.
Address base = code_range.AllocateRawMemory(
requested, requested - (2 * MemoryAllocator::CodePageGuardSize()),
&allocated);
CHECK_NE(base, kNullAddress);
blocks.emplace_back(base, static_cast<int>(allocated));
current_allocated += static_cast<int>(allocated);
total_allocated += static_cast<int>(allocated);
} else {
// Free a block.
size_t index = Pseudorandom() % blocks.size();
code_range.FreeRawMemory(blocks[index].base, blocks[index].size);
current_allocated -= blocks[index].size;
if (index < blocks.size() - 1) {
blocks[index] = blocks.back();
}
blocks.pop_back();
}
}
}
} // namespace heap } // namespace heap
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <stdlib.h> #include <stdlib.h>
#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/heap/factory.h" #include "src/heap/factory.h"
#include "src/heap/spaces-inl.h" #include "src/heap/spaces-inl.h"
...@@ -59,41 +60,43 @@ class TestMemoryAllocatorScope { ...@@ -59,41 +60,43 @@ class TestMemoryAllocatorScope {
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope); DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
}; };
// Temporarily sets a given code page allocator in an isolate.
// Temporarily sets a given code range in an isolate. class TestCodePageAllocatorScope {
class TestCodeRangeScope {
public: public:
TestCodeRangeScope(Isolate* isolate, CodeRange* code_range) TestCodePageAllocatorScope(Isolate* isolate,
v8::PageAllocator* code_page_allocator)
: isolate_(isolate), : isolate_(isolate),
old_code_range_(isolate->heap()->memory_allocator()->code_range()) { old_code_page_allocator_(
isolate->heap()->memory_allocator()->code_range_ = code_range; isolate->heap()->memory_allocator()->code_page_allocator()) {
isolate->heap()->memory_allocator()->code_page_allocator_ =
code_page_allocator;
} }
~TestCodeRangeScope() { ~TestCodePageAllocatorScope() {
isolate_->heap()->memory_allocator()->code_range_ = old_code_range_; isolate_->heap()->memory_allocator()->code_page_allocator_ =
old_code_page_allocator_;
} }
private: private:
Isolate* isolate_; Isolate* isolate_;
CodeRange* old_code_range_; v8::PageAllocator* old_code_page_allocator_;
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope); DISALLOW_COPY_AND_ASSIGN(TestCodePageAllocatorScope);
}; };
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
CodeRange* code_range, size_t reserve_area_size, v8::PageAllocator* code_page_allocator,
size_t commit_area_size, Executability executable, size_t reserve_area_size, size_t commit_area_size,
Space* space) { Executability executable, Space* space) {
MemoryAllocator* memory_allocator = MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0); new MemoryAllocator(isolate, heap->MaxReserved(), 0);
{ {
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range); TestCodePageAllocatorScope test_code_page_allocator_scope(
isolate, code_page_allocator);
v8::PageAllocator* data_page_allocator = v8::PageAllocator* page_allocator =
memory_allocator->data_page_allocator(); memory_allocator->page_allocator(executable);
v8::PageAllocator* code_page_allocator =
memory_allocator->code_page_allocator();
size_t header_size = (executable == EXECUTABLE) size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset() ? MemoryAllocator::CodePageGuardStartOffset()
...@@ -103,15 +106,12 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, ...@@ -103,15 +106,12 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk( MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, space); reserve_area_size, commit_area_size, executable, space);
size_t alignment = code_range != nullptr && code_range->valid()
? MemoryChunk::kAlignment
: code_page_allocator->CommitPageSize();
size_t reserved_size = size_t reserved_size =
((executable == EXECUTABLE)) ((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size, ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment) page_allocator->CommitPageSize())
: RoundUp(header_size + reserve_area_size, : RoundUp(header_size + reserve_area_size,
data_page_allocator->CommitPageSize()); page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size); CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size()); memory_chunk->address() + memory_chunk->size());
...@@ -125,39 +125,6 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, ...@@ -125,39 +125,6 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
delete memory_allocator; delete memory_allocator;
} }
TEST(Regress3540) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
size_t code_range_size =
kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
CodeRange* code_range = new CodeRange(
isolate, memory_allocator->code_page_allocator(), code_range_size);
Address address;
size_t size;
size_t request_size = code_range_size - Page::kPageSize;
address = code_range->AllocateRawMemory(
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
&size);
CHECK_NE(address, kNullAddress);
Address null_address;
size_t null_size;
request_size = code_range_size - Page::kPageSize;
null_address = code_range->AllocateRawMemory(
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
&null_size);
CHECK_EQ(null_address, kNullAddress);
code_range->FreeRawMemory(address, size);
delete code_range;
memory_allocator->TearDown();
delete memory_allocator;
}
static unsigned int PseudorandomAreaSize() { static unsigned int PseudorandomAreaSize() {
static uint32_t lo = 2345; static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16); lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
...@@ -180,16 +147,21 @@ TEST(MemoryChunk) { ...@@ -180,16 +147,21 @@ TEST(MemoryChunk) {
// With CodeRange. // With CodeRange.
const size_t code_range_size = 32 * MB; const size_t code_range_size = 32 * MB;
CodeRange* code_range = VirtualMemory code_range_reservation;
new CodeRange(isolate, page_allocator, code_range_size); CHECK(AlignedAllocVirtualMemory(page_allocator, code_range_size,
MemoryChunk::kAlignment, nullptr,
&code_range_reservation));
base::BoundedPageAllocator code_page_allocator(
page_allocator, code_range_reservation.address(),
code_range_reservation.size(), MemoryChunk::kAlignment);
VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size, VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space()); initial_commit_area_size, EXECUTABLE, heap->code_space());
VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size, VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, NOT_EXECUTABLE, initial_commit_area_size, NOT_EXECUTABLE,
heap->old_space()); heap->old_space());
delete code_range;
} }
} }
......
...@@ -322,5 +322,56 @@ TEST(RegionAllocatorTest, FindRegion) { ...@@ -322,5 +322,56 @@ TEST(RegionAllocatorTest, FindRegion) {
} }
} }
TEST(RegionAllocatorTest, Contains) {
using Region = RegionAllocator::Region;
struct {
Address start;
size_t size;
} test_cases[] = {{153, 771}, {0, 227}, {-447, 447}};
for (size_t i = 0; i < arraysize(test_cases); i++) {
Address start = test_cases[i].start;
size_t size = test_cases[i].size;
Address end = start + size; // exclusive
Region region(start, size, true);
// Test single-argument contains().
CHECK(!region.contains(start - 1041));
CHECK(!region.contains(start - 1));
CHECK(!region.contains(end));
CHECK(!region.contains(end + 1));
CHECK(!region.contains(end + 113));
CHECK(region.contains(start));
CHECK(region.contains(start + 1));
CHECK(region.contains(start + size / 2));
CHECK(region.contains(end - 1));
// Test two-arguments contains().
CHECK(!region.contains(start - 17, 17));
CHECK(!region.contains(start - 17, size * 2));
CHECK(!region.contains(end, 1));
CHECK(!region.contains(end, static_cast<size_t>(0 - end)));
CHECK(region.contains(start, size));
CHECK(region.contains(start, 10));
CHECK(region.contains(start + 11, 120));
CHECK(region.contains(end - 13, 13));
CHECK(!region.contains(end, 0));
// Zero-size queries.
CHECK(!region.contains(start - 10, 0));
CHECK(!region.contains(start - 1, 0));
CHECK(!region.contains(end, 0));
CHECK(!region.contains(end + 10, 0));
CHECK(region.contains(start, 0));
CHECK(region.contains(start + 10, 0));
CHECK(region.contains(end - 1, 0));
}
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment