Commit 25548464 authored by Marja Hölttä's avatar Marja Hölttä Committed by V8 LUCI CQ

[rab/gsab] Fix the v8_enable_webassembly=false build

This is moving needed stuff out of #if V8_ENABLE_WEBASSEMBLY.

Everything related to guards is still behind V8_ENABLE_WEBASSEMBLY,
since RAB / GSAB don't use guards.

Bug: v8:11111
Change-Id: I9b9fd0dbdcdc1f3c70d6e4f5eb1c70d1bab98e68
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2880221Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Marja Hölttä <marja@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74476}
parent d3d4a896
...@@ -37,6 +37,14 @@ namespace { ...@@ -37,6 +37,14 @@ namespace {
constexpr size_t kPlatformMaxPages = constexpr size_t kPlatformMaxPages =
std::numeric_limits<size_t>::max() / wasm::kWasmPageSize; std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
#if V8_TARGET_ARCH_64_BIT
constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif
#endif // V8_ENABLE_WEBASSEMBLY
#if V8_TARGET_ARCH_MIPS64 #if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors, // MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller. // address space limits needs to be smaller.
...@@ -50,12 +58,6 @@ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB ...@@ -50,12 +58,6 @@ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif #endif
constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
#if V8_TARGET_ARCH_64_BIT
constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif
std::atomic<uint64_t> reserved_address_space_{0}; std::atomic<uint64_t> reserved_address_space_{0};
// Allocation results are reported to UMA // Allocation results are reported to UMA
...@@ -75,7 +77,7 @@ enum class AllocationStatus { ...@@ -75,7 +77,7 @@ enum class AllocationStatus {
base::AddressRegion GetReservedRegion(bool has_guard_regions, base::AddressRegion GetReservedRegion(bool has_guard_regions,
void* buffer_start, void* buffer_start,
size_t byte_capacity) { size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) { if (has_guard_regions) {
// Guard regions always look like this: // Guard regions always look like this:
// |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx| // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
...@@ -97,7 +99,7 @@ base::AddressRegion GetReservedRegion(bool has_guard_regions, ...@@ -97,7 +99,7 @@ base::AddressRegion GetReservedRegion(bool has_guard_regions,
} }
size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) { size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) return kFullGuardSize; if (has_guard_regions) return kFullGuardSize;
#else #else
DCHECK(!has_guard_regions); DCHECK(!has_guard_regions);
...@@ -110,7 +112,6 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) { ...@@ -110,7 +112,6 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample( isolate->counters()->wasm_memory_allocation_result()->AddSample(
static_cast<int>(status)); static_cast<int>(status));
} }
#endif // V8_ENABLE_WEBASSEMBLY
inline void DebugCheckZero(void* start, size_t byte_length) { inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG #if DEBUG
...@@ -301,25 +302,6 @@ void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) { ...@@ -301,25 +302,6 @@ void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
} }
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
while (true) {
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(
old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
}
void BackingStore::ReleaseReservation(uint64_t num_bytes) {
uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
USE(old_reserved);
DCHECK_LE(num_bytes, old_reserved);
}
// Allocate a backing store for a Wasm memory. Always use the page allocator // Allocate a backing store for a Wasm memory. Always use the page allocator
// and add guard regions. // and add guard regions.
std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory( std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
...@@ -341,6 +323,25 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory( ...@@ -341,6 +323,25 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
} }
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
while (true) {
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(
old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
}
void BackingStore::ReleaseReservation(uint64_t num_bytes) {
uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
USE(old_reserved);
DCHECK_LE(num_bytes, old_reserved);
}
std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory( std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t page_size, Isolate* isolate, size_t byte_length, size_t page_size,
size_t initial_pages, size_t maximum_pages, bool is_wasm_memory, size_t initial_pages, size_t maximum_pages, bool is_wasm_memory,
...@@ -415,9 +416,13 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory( ...@@ -415,9 +416,13 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
// Get a pointer to the start of the buffer, skipping negative guard region // Get a pointer to the start of the buffer, skipping negative guard region
// if necessary. // if necessary.
#if V8_ENABLE_WEBASSEMBLY
byte* buffer_start = reinterpret_cast<byte*>(allocation_base) + byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
(guards ? kNegativeGuardSize : 0); (guards ? kNegativeGuardSize : 0);
#else
DCHECK(!guards);
byte* buffer_start = reinterpret_cast<byte*>(allocation_base);
#endif
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// 3. Commit the initial pages (allow read/write). // 3. Commit the initial pages (allow read/write).
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
......
...@@ -125,12 +125,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -125,12 +125,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static void BroadcastSharedWasmMemoryGrow(Isolate* isolate, static void BroadcastSharedWasmMemoryGrow(Isolate* isolate,
std::shared_ptr<BackingStore>); std::shared_ptr<BackingStore>);
// TODO(wasm): address space limitations should be enforced in page alloc.
// These methods enforce a limit on the total amount of address space,
// which is used for both backing stores and wasm memory.
static bool ReserveAddressSpace(uint64_t num_bytes);
static void ReleaseReservation(uint64_t num_bytes);
// Remove all memory objects in the given isolate that refer to this // Remove all memory objects in the given isolate that refer to this
// backing store. // backing store.
static void RemoveSharedWasmMemoryObjects(Isolate* isolate); static void RemoveSharedWasmMemoryObjects(Isolate* isolate);
...@@ -139,6 +133,12 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -139,6 +133,12 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static void UpdateSharedWasmMemoryObjects(Isolate* isolate); static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
// TODO(wasm): address space limitations should be enforced in page alloc.
// These methods enforce a limit on the total amount of address space,
// which is used for both backing stores and wasm memory.
static bool ReserveAddressSpace(uint64_t num_bytes);
static void ReleaseReservation(uint64_t num_bytes);
// Returns the size of the external memory owned by this backing store. // Returns the size of the external memory owned by this backing store.
// It is used for triggering GCs based on the external memory pressure. // It is used for triggering GCs based on the external memory pressure.
size_t PerIsolateAccountingLength() { size_t PerIsolateAccountingLength() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment