Commit a9aeaa65 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[memory] Change OS::Allocate on Windows to not over allocate.

- Changes OS::Allocate to first try an exact size aligned
  allocation, then padded allocations. All padded allocations should
  be trimmed.

Bug: chromium:800511
Change-Id: Iccab2eddbf2a3b08d2b83b95f96c766c9fad7a82
Reviewed-on: https://chromium-review.googlesource.com/875242Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Bill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50706}
parent 2d72051d
......@@ -100,34 +100,44 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t padded_size = size + (alignment - page_size);
int flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
int protect = GetProtectionFromMemoryPermission(access);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
// First, try an exact size aligned allocation.
uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
uint8_t* aligned_base = RoundUp(base, alignment);
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
CHECK(Free(base, size));
// Clear the hint. It's unlikely we can allocate at this address.
address = nullptr;
// Add the maximum misalignment so we are guaranteed an aligned base address
// in the allocated region.
size_t padded_size = size + (alignment - page_size);
const int kMaxAttempts = 3;
uint8_t* base = nullptr;
uint8_t* aligned_base = nullptr;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
// If we can't allocate, we're OOM.
if (base == nullptr) break;
aligned_base = RoundUp(base, alignment);
// If address is suitably aligned, we're done.
if (base == aligned_base) break;
// Try to trim the unaligned prefix by freeing the entire padded allocation
// and then calling VirtualAlloc at the aligned_base.
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
aligned_base = RoundUp(base, alignment);
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
// base will be nullptr.
if (base != nullptr) break;
// Clear the hint. It's unlikely we can allocate at this address.
address = nullptr;
}
DCHECK_EQ(base, aligned_base);
return reinterpret_cast<void*>(base);
......
......@@ -790,34 +790,44 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t padded_size = size + (alignment - page_size);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
// First, try an exact size aligned allocation.
uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
uint8_t* aligned_base = RoundUp(base, alignment);
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
CHECK(Free(base, size));
// Clear the hint. It's unlikely we can allocate at this address.
address = nullptr;
// Add the maximum misalignment so we are guaranteed an aligned base address
// in the allocated region.
size_t padded_size = size + (alignment - page_size);
const int kMaxAttempts = 3;
uint8_t* base = nullptr;
uint8_t* aligned_base = nullptr;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
// If we can't allocate, we're OOM.
if (base == nullptr) break;
aligned_base = RoundUp(base, alignment);
// If address is suitably aligned, we're done.
if (base == aligned_base) break;
// Try to trim the unaligned prefix by freeing the entire padded allocation
// and then calling VirtualAlloc at the aligned_base.
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
aligned_base = RoundUp(base, alignment);
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
// base will be nullptr.
if (base != nullptr) break;
// Clear the hint. It's unlikely we can allocate at this address.
address = nullptr;
}
DCHECK_EQ(base, aligned_base);
return reinterpret_cast<void*>(base);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment