Commit 92da5a47 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Don't heap allocate instances of base::VirtualMemory.

- Changes some instance fields from VM pointers to VM.  
- Changes some comments to correctly describe code.  

Bug: v8:6635
Change-Id: I9ec93ef0b09d541c966caa6482c5832cd6b1e149
Reviewed-on: https://chromium-review.googlesource.com/584931
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46880}
parent 57031e82
......@@ -285,11 +285,10 @@ class V8_BASE_EXPORT OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
// Represents and controls an area of reserved memory.
// Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or copy-contructing. This removes the reserved memory
// from the original object.
// object by calling TakeControl. This removes the reserved memory from the
// 'from' instance.
class V8_BASE_EXPORT VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
......
......@@ -13,15 +13,17 @@ namespace v8 {
namespace internal {
void SequentialMarkingDeque::SetUp() {
backing_store_ =
new base::VirtualMemory(kMaxSize, heap_->GetRandomMmapAddr());
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
base::VirtualMemory reservation(kMaxSize, heap_->GetRandomMmapAddr());
if (!reservation.IsReserved()) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
}
backing_store_committed_size_ = 0;
backing_store_.TakeControl(&reservation);
}
void SequentialMarkingDeque::TearDown() { delete backing_store_; }
void SequentialMarkingDeque::TearDown() {
if (backing_store_.IsReserved()) backing_store_.Release();
}
void SequentialMarkingDeque::StartUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
......@@ -32,7 +34,7 @@ void SequentialMarkingDeque::StartUsing() {
}
in_use_ = true;
EnsureCommitted();
array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
array_ = reinterpret_cast<HeapObject**>(backing_store_.address());
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
......@@ -64,8 +66,8 @@ void SequentialMarkingDeque::Clear() {
void SequentialMarkingDeque::Uncommit() {
DCHECK(!in_use_);
bool success = backing_store_->Uncommit(backing_store_->address(),
backing_store_committed_size_);
bool success = backing_store_.Uncommit(backing_store_.address(),
backing_store_committed_size_);
backing_store_committed_size_ = 0;
CHECK(success);
}
......@@ -75,7 +77,7 @@ void SequentialMarkingDeque::EnsureCommitted() {
if (backing_store_committed_size_ > 0) return;
for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
if (backing_store_->Commit(backing_store_->address(), size, false)) {
if (backing_store_.Commit(backing_store_.address(), size, false)) {
backing_store_committed_size_ = size;
break;
}
......
......@@ -23,8 +23,7 @@ class HeapObject;
class SequentialMarkingDeque {
public:
explicit SequentialMarkingDeque(Heap* heap)
: backing_store_(nullptr),
backing_store_committed_size_(0),
: backing_store_committed_size_(0),
array_(nullptr),
top_(0),
bottom_(0),
......@@ -132,7 +131,7 @@ class SequentialMarkingDeque {
base::Mutex mutex_;
base::VirtualMemory* backing_store_;
base::VirtualMemory backing_store_;
size_t backing_store_committed_size_;
HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
......
......@@ -88,17 +88,14 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
// -----------------------------------------------------------------------------
// CodeRange
CodeRange::CodeRange(Isolate* isolate)
: isolate_(isolate),
code_range_(NULL),
free_list_(0),
allocation_list_(0),
current_allocation_block_index_(0) {}
bool CodeRange::SetUp(size_t requested) {
DCHECK(code_range_ == NULL);
DCHECK(!virtual_memory_.IsReserved());
if (requested == 0) {
// When a target requires the code range feature, we put all code objects
......@@ -122,38 +119,31 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new base::VirtualMemory(
base::VirtualMemory reservation(
requested,
Max(kCodeRangeAreaAlignment,
static_cast<size_t>(base::OS::AllocateAlignment())),
base::OS::GetRandomMmapAddr());
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
code_range_ = NULL;
return false;
}
if (!reservation.IsReserved()) return false;
// We are sure that we have mapped a block of requested addresses.
DCHECK(code_range_->size() == requested);
Address base = reinterpret_cast<Address>(code_range_->address());
DCHECK(reservation.size() == requested);
Address base = reinterpret_cast<Address>(reservation.address());
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
if (reserved_area > 0) {
if (!code_range_->Commit(base, reserved_area, true)) {
delete code_range_;
code_range_ = NULL;
return false;
}
if (!reservation.Commit(base, reserved_area, true)) return false;
base += reserved_area;
}
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
size_t size = reservation.size() - (aligned_base - base) - reserved_area;
allocation_list_.Add(FreeBlock(aligned_base, size));
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
LOG(isolate_, NewEvent("CodeRange", reservation.address(), requested));
virtual_memory_.TakeControl(&reservation);
return true;
}
......@@ -224,7 +214,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
code_range_, current.start, commit_size, *allocated)) {
&virtual_memory_, current.start, commit_size, *allocated)) {
*allocated = 0;
ReleaseBlock(&current);
return NULL;
......@@ -240,7 +230,7 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return code_range_->Uncommit(start, length);
return virtual_memory_.Uncommit(start, length);
}
......@@ -248,13 +238,12 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length);
virtual_memory_.Uncommit(address, length);
}
void CodeRange::TearDown() {
delete code_range_; // Frees all memory in the virtual memory range.
code_range_ = NULL;
if (virtual_memory_.IsReserved()) virtual_memory_.Release();
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Free();
allocation_list_.Free();
......
......@@ -1037,19 +1037,19 @@ class CodeRange {
// Returns false on failure.
bool SetUp(size_t requested_size);
bool valid() { return code_range_ != NULL; }
bool valid() { return virtual_memory_.IsReserved(); }
Address start() {
DCHECK(valid());
return static_cast<Address>(code_range_->address());
return static_cast<Address>(virtual_memory_.address());
}
size_t size() {
DCHECK(valid());
return code_range_->size();
return virtual_memory_.size();
}
bool contains(Address address) {
if (!valid()) return false;
Address start = static_cast<Address>(code_range_->address());
return start <= address && address < start + code_range_->size();
Address start = static_cast<Address>(virtual_memory_.address());
return start <= address && address < start + virtual_memory_.size();
}
// Allocates a chunk of memory from the large-object portion of
......@@ -1099,7 +1099,7 @@ class CodeRange {
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
base::VirtualMemory* code_range_;
base::VirtualMemory virtual_memory_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread.
......
......@@ -16,11 +16,7 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
: heap_(heap),
top_(nullptr),
current_(0),
mode_(NOT_IN_GC),
virtual_memory_(nullptr) {
: heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
......@@ -35,10 +31,9 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
virtual_memory_ =
new base::VirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr());
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
base::VirtualMemory reservation(kStoreBufferSize * 3,
heap_->GetRandomMmapAddr());
uintptr_t start_as_int = reinterpret_cast<uintptr_t>(reservation.address());
start_[0] =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
......@@ -46,30 +41,30 @@ void StoreBuffer::SetUp() {
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
Address* vm_limit = reinterpret_cast<Address*>(
reinterpret_cast<char*>(virtual_memory_->address()) +
virtual_memory_->size());
reinterpret_cast<char*>(reservation.address()) + reservation.size());
USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
}
if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
false)) { // Not executable.
if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
top_ = start_[current_];
virtual_memory_.TakeControl(&reservation);
}
void StoreBuffer::TearDown() {
delete virtual_memory_;
if (virtual_memory_.IsReserved()) virtual_memory_.Release();
top_ = nullptr;
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
......
......@@ -208,7 +208,7 @@ class StoreBuffer {
// IN_GC mode.
StoreBufferMode mode_;
base::VirtualMemory* virtual_memory_;
base::VirtualMemory virtual_memory_;
// Callbacks are more efficient than reading out the gc state for every
// store buffer operation.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment