Commit a31ffef8 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[heap] Take expected OS page size into account for kStoreBufferSize

This CL fixes allocation size alignment violation when allocating store buffer.
If the actual CommitPageSize happens to be bigger than kMinExpectedOSPageSize
we will have a bit of memory wastage but that's a fair trade-off for having
fast store buffer overflow check in write barriers.

Change-Id: I1d775aa8b203cb198e8332477b0bc2befcd9b006
Reviewed-on: https://chromium-review.googlesource.com/c/1351007
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57871}
parent 33ff811d
......@@ -162,13 +162,13 @@ constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
constexpr bool kRequiresCodeRange = true;
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr size_t kMaximalCodeRangeSize = 512 * MB;
constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_ARM64
constexpr size_t kMaximalCodeRangeSize = 128 * MB;
constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#else
constexpr size_t kMaximalCodeRangeSize = 128 * MB;
constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#endif
#if V8_OS_WIN
constexpr size_t kMinimumCodeRangeSize = 4 * MB;
......@@ -186,17 +186,17 @@ constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
constexpr bool kRequiresCodeRange = true;
constexpr size_t kMaximalCodeRangeSize = 256 * MB;
constexpr size_t kMinimumCodeRangeSize = 3 * MB;
constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#else
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#endif
constexpr size_t kReservedCodeRangePages = 0;
#endif
......
......@@ -162,7 +162,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
// Fullfilling both reserved pages requirement and huge code area
// alignments is not supported (requires re-implementation).
DCHECK_LE(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize());
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
}
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
......@@ -171,7 +171,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
page_allocator->AllocatePageSize());
VirtualMemory reservation(
page_allocator, requested, reinterpret_cast<void*>(hint),
Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()));
Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
if (!reservation.IsReserved()) {
V8::FatalProcessOutOfMemory(isolate_,
"CodeRange setup: allocate virtual memory");
......@@ -198,7 +198,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
size_t size =
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
MemoryChunk::kPageSize);
DCHECK(IsAligned(aligned_base, kCodeRangeAreaAlignment));
DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
LOG(isolate_,
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
......
......@@ -6,6 +6,7 @@
#include <algorithm>
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/template-utils.h"
#include "src/counters.h"
......@@ -32,9 +33,16 @@ StoreBuffer::StoreBuffer(Heap* heap)
void StoreBuffer::SetUp() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
const size_t requested_size = kStoreBufferSize * kStoreBuffers;
// Round up the requested size in order to fulfill the VirtualMemory's
// requrements on the requested size alignment. This may cause a bit of
// memory wastage if the actual CommitPageSize() will be bigger than the
// kMinExpectedOSPageSize value but this is a trade-off for keeping the
// store buffer overflow check in write barriers cheap.
const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
page_allocator->CommitPageSize());
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers.
STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
const size_t alignment =
std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
......
......@@ -27,9 +27,11 @@ class StoreBuffer {
public:
enum StoreBufferMode { IN_GC, NOT_IN_GC };
static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
static const int kStoreBufferSize =
Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
1 << (11 + kPointerSizeLog2));
static const int kStoreBufferMask = kStoreBufferSize - 1;
static const intptr_t kDeletionTag = 1;
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment