Commit d0468ded authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[heap] Fix StoreBuffer setup.

- Solves a problem for PPC in a configuration where commit page size
  is 64K. https://chromium-review.googlesource.com/c/v8/v8/+/1149515
- Uses existing VM allocation code to get properly aligned memory.
- Makes sure the size for SetPermissions is a multiple of system page
  size.

Bug:chromium:756050

Change-Id: Ib3799ab7a3bb44b0091c234234c1cc47938379c2
Reviewed-on: https://chromium-review.googlesource.com/1161210
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54930}
parent 083c5a6c
...@@ -31,22 +31,28 @@ StoreBuffer::StoreBuffer(Heap* heap) ...@@ -31,22 +31,28 @@ StoreBuffer::StoreBuffer(Heap* heap)
} }
void StoreBuffer::SetUp() { void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer const size_t requested_size = kStoreBufferSize * kStoreBuffers;
// aligned to 2x the size. This lets us use a bit test to detect the end of // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// the area. // use a bit test to detect the ends of the buffers.
const size_t alignment =
std::max<size_t>(kStoreBufferSize, AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation; VirtualMemory reservation;
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(), if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
&reservation)) { &reservation)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
} }
Address start = reservation.address(); Address start = reservation.address();
start_[0] = reinterpret_cast<Address*>(::RoundUp(start, kStoreBufferSize)); const size_t allocated_size = reservation.size();
start_[0] = reinterpret_cast<Address*>(start);
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize); limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
start_[1] = limit_[0]; start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize); limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
Address* vm_limit = reinterpret_cast<Address*>(start + reservation.size()); // Sanity check the buffers.
Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
USE(vm_limit); USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) { for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address()); DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
...@@ -56,8 +62,9 @@ void StoreBuffer::SetUp() { ...@@ -56,8 +62,9 @@ void StoreBuffer::SetUp() {
DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask); DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
} }
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]), // Set RW permissions only on the pages we use.
kStoreBufferSize * kStoreBuffers, const size_t used_size = RoundUp(requested_size, CommitPageSize());
if (!reservation.SetPermissions(start, used_size,
PageAllocator::kReadWrite)) { PageAllocator::kReadWrite)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
} }
...@@ -66,7 +73,6 @@ void StoreBuffer::SetUp() { ...@@ -66,7 +73,6 @@ void StoreBuffer::SetUp() {
virtual_memory_.TakeControl(&reservation); virtual_memory_.TakeControl(&reservation);
} }
void StoreBuffer::TearDown() { void StoreBuffer::TearDown() {
if (virtual_memory_.IsReserved()) virtual_memory_.Free(); if (virtual_memory_.IsReserved()) virtual_memory_.Free();
top_ = nullptr; top_ = nullptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment