Commit 721e3825 authored by Lei Zhang's avatar Lei Zhang Committed by Commit Bot

Fix 32-bit MSVC build.

In backing-store.cc, define GetGuardedRegion(), which is only used on
64-bit, only when V8_TARGET_ARCH_64_BIT evals to true. Then add
GetRegion(), which returns the appropriate region depending on whether
guards are enabled or not, and use it to simplify the only caller to
GetGuardedRegion().

Similarly, define |kFullGuardSize| as 64-bit only, and add
GetReservationSize() to only access |kFullGuardSize| when appropriate on
64-bit platforms.

Change-Id: Iefae7969a6138118d466a9d48e0ea62d94ff07f3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1899547
Commit-Queue: Lei Zhang <thestig@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64782}
parent add50f20
...@@ -37,7 +37,10 @@ constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB ...@@ -37,7 +37,10 @@ constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
constexpr uint64_t kOneGiB = 1024 * 1024 * 1024; constexpr uint64_t kOneGiB = 1024 * 1024 * 1024;
constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB; constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB;
#if V8_TARGET_ARCH_64_BIT
constexpr uint64_t kFullGuardSize = 10 * kOneGiB; constexpr uint64_t kFullGuardSize = 10 * kOneGiB;
#endif
std::atomic<uint64_t> reserved_address_space_{0}; std::atomic<uint64_t> reserved_address_space_{0};
...@@ -55,6 +58,7 @@ enum class AllocationStatus { ...@@ -55,6 +58,7 @@ enum class AllocationStatus {
kOtherFailure // Failed for an unknown reason kOtherFailure // Failed for an unknown reason
}; };
#if V8_TARGET_ARCH_64_BIT
base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) { base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
// Guard regions always look like this: // Guard regions always look like this:
// |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx| // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
...@@ -68,6 +72,29 @@ base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) { ...@@ -68,6 +72,29 @@ base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
return base::AddressRegion(start - (2 * kOneGiB), return base::AddressRegion(start - (2 * kOneGiB),
static_cast<size_t>(kFullGuardSize)); static_cast<size_t>(kFullGuardSize));
} }
#endif
base::AddressRegion GetRegion(bool has_guard_regions, void* buffer_start,
size_t byte_length, size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT
if (has_guard_regions) return GetGuardedRegion(buffer_start, byte_length);
#else
DCHECK(!has_guard_regions);
#endif
return base::AddressRegion(reinterpret_cast<Address>(buffer_start),
byte_capacity);
}
size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT
if (has_guard_regions) return kFullGuardSize;
#else
DCHECK(!has_guard_regions);
#endif
return byte_capacity;
}
void RecordStatus(Isolate* isolate, AllocationStatus status) { void RecordStatus(Isolate* isolate, AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample( isolate->counters()->wasm_memory_allocation_result()->AddSample(
...@@ -140,18 +167,16 @@ BackingStore::~BackingStore() { ...@@ -140,18 +167,16 @@ BackingStore::~BackingStore() {
} }
// Wasm memories are always allocated through the page allocator. // Wasm memories are always allocated through the page allocator.
auto region = auto region = GetRegion(has_guard_regions_, buffer_start_, byte_length_,
has_guard_regions_ byte_capacity_);
? GetGuardedRegion(buffer_start_, byte_length_)
: base::AddressRegion(reinterpret_cast<Address>(buffer_start_),
byte_capacity_);
bool pages_were_freed = bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ || region.size() == 0 /* no need to free any pages */ ||
FreePages(GetPlatformPageAllocator(), FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(region.begin()), region.size()); reinterpret_cast<void*>(region.begin()), region.size());
CHECK(pages_were_freed); CHECK(pages_were_freed);
BackingStore::ReleaseReservation(has_guard_regions_ ? kFullGuardSize BackingStore::ReleaseReservation(
: byte_capacity_); GetReservationSize(has_guard_regions_, byte_capacity_));
Clear(); Clear();
return; return;
} }
...@@ -279,8 +304,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory( ...@@ -279,8 +304,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
size_t engine_max_pages = wasm::max_mem_pages(); size_t engine_max_pages = wasm::max_mem_pages();
size_t byte_capacity = size_t byte_capacity =
std::min(engine_max_pages, maximum_pages) * wasm::kWasmPageSize; std::min(engine_max_pages, maximum_pages) * wasm::kWasmPageSize;
size_t reservation_size = size_t reservation_size = GetReservationSize(guards, byte_capacity);
guards ? static_cast<size_t>(kFullGuardSize) : byte_capacity;
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// 1. Enforce maximum address space reservation per engine. // 1. Enforce maximum address space reservation per engine.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment