Commit 3996bd02 authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[ext-code-space] Fix external code space on Windows

... and introduce CodeRange::GetWritableReservedAreaSize() as a
bottleneck for calculating a size of the writable area used for unwind
information.

Bug: v8:11880
Change-Id: Ifa2a3f74ce994cffb6bb8bef12ab17b69dabd706
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3244409
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Auto-Submit: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77555}
parent 4ea30516
......@@ -507,6 +507,9 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
assert(!v8_enable_map_packing || v8_current_cpu == "x64",
"Map packing is only supported on x64")
assert(!v8_enable_external_code_space || v8_enable_pointer_compression,
"External code space feature requires pointer compression")
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
......
......@@ -86,6 +86,11 @@ void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
CodeRange::~CodeRange() { Free(); }
// static
size_t CodeRange::GetWritableReservedAreaSize() {
return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
}
bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
size_t requested) {
DCHECK_NE(requested, 0);
......@@ -96,8 +101,7 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
if (requested <= kMinimumCodeRangeSize) {
requested = kMinimumCodeRangeSize;
}
const size_t reserved_area =
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
const size_t reserved_area = GetWritableReservedAreaSize();
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
// Fullfilling both reserved pages requirement and huge code area
......@@ -113,14 +117,15 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
// base_alignment should be kAnyBaseAlignment when V8_ENABLE_NEAR_CODE_RANGE
// is enabled so that InitReservation would not break the alignment in
// GetAddressHint().
const size_t allocate_page_size = page_allocator->AllocatePageSize();
params.base_alignment =
V8_EXTERNAL_CODE_SPACE_BOOL
? base::bits::RoundUpToPowerOfTwo(requested)
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = reserved_area;
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
params.page_size = MemoryChunk::kPageSize;
params.requested_start_hint = GetCodeRangeAddressHint()->GetAddressHint(
requested, page_allocator->AllocatePageSize());
params.requested_start_hint =
GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
if (!VirtualMemoryCage::InitReservation(params)) return false;
......
......@@ -68,7 +68,11 @@ class CodeRangeAddressHint {
// 4) |base()| is CommitPageSize()-aligned
class CodeRange final : public VirtualMemoryCage {
public:
V8_EXPORT_PRIVATE ~CodeRange();
V8_EXPORT_PRIVATE ~CodeRange() override;
// Returns the size of the initial area of a code-range, which is marked
// writable and reserved to contain unwind information.
static size_t GetWritableReservedAreaSize();
uint8_t* embedded_blob_code_copy() const {
// remap_embedded_builtins_mutex_ is designed to protect write contention to
......
......@@ -3045,7 +3045,7 @@ int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
}
size_t Heap::GetCodeRangeReservedAreaSize() {
return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
return CodeRange::GetWritableReservedAreaSize();
}
HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment