Commit 965e688d authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[wasm] Do not allocate guard regions for memory64

Memory64 currently does not use trap handling, so we should not allocate
a guard region (10GB total reservation).
This is implemented by adding a {WasmMemoryFlag} enum in the backing
store header, which replaces the previous {MemoryIndexType}. The flag is
not stored with the backing store, as the backing store does not care
about the index type, and we might want to share the same backing store
for memory32 and memory64 (if sizes permit this).
Instead, we (still) store the flag with the WasmMemoryObject and pass it
to the backing store methods.

R=jkummerow@chromium.org

Bug: v8:10949
Change-Id: I284b85b98d181ba5e8d454b24bfa48f6ac201be5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3789506Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82038}
parent 25506bc8
......@@ -95,10 +95,9 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
&page_size, &initial_pages, &max_pages),
ReadOnlyRoots(isolate).exception());
constexpr bool kIsWasmMemory = false;
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
isolate, byte_length, max_byte_length, page_size, initial_pages,
max_pages, kIsWasmMemory, shared);
max_pages, WasmMemoryFlag::kNotWasm, shared);
}
if (!backing_store) {
// Allocation of backing store failed.
......
......@@ -81,7 +81,11 @@ base::AddressRegion GetReservedRegion(bool has_guard_regions,
size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) return kFullGuardSize;
if (has_guard_regions) {
static_assert(kFullGuardSize > size_t{4} * GB);
DCHECK_LE(byte_capacity, size_t{4} * GB);
return kFullGuardSize;
}
#else
DCHECK(!has_guard_regions);
#endif
......@@ -312,7 +316,7 @@ void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t page_size, size_t initial_pages, size_t maximum_pages,
bool is_wasm_memory, SharedFlag shared) {
WasmMemoryFlag wasm_memory, SharedFlag shared) {
// Enforce engine limitation on the maximum number of pages.
if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
return nullptr;
......@@ -324,10 +328,11 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
#if V8_ENABLE_WEBASSEMBLY
bool guards = is_wasm_memory && trap_handler::IsTrapHandlerEnabled();
bool guards = wasm_memory == WasmMemoryFlag::kWasmMemory32 &&
trap_handler::IsTrapHandlerEnabled();
#else
CHECK(!is_wasm_memory);
bool guards = false;
CHECK_EQ(WasmMemoryFlag::kNotWasm, wasm_memory);
constexpr bool guards = false;
#endif // V8_ENABLE_WEBASSEMBLY
// For accounting purposes, whether a GC was necessary.
......@@ -400,6 +405,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
const bool is_wasm_memory = wasm_memory != WasmMemoryFlag::kNotWasm;
ResizableFlag resizable =
is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
......@@ -428,16 +434,20 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
// and add guard regions.
std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared) {
WasmMemoryFlag wasm_memory, SharedFlag shared) {
// Wasm pages must be a multiple of the allocation page size.
DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize());
DCHECK_LE(initial_pages, maximum_pages);
auto TryAllocate = [isolate, initial_pages, shared](size_t maximum_pages) {
DCHECK(wasm_memory == WasmMemoryFlag::kWasmMemory32 ||
wasm_memory == WasmMemoryFlag::kWasmMemory64);
auto TryAllocate = [isolate, initial_pages, wasm_memory,
shared](size_t maximum_pages) {
auto result = TryAllocateAndPartiallyCommitMemory(
isolate, initial_pages * wasm::kWasmPageSize,
maximum_pages * wasm::kWasmPageSize, wasm::kWasmPageSize, initial_pages,
maximum_pages, true, shared);
maximum_pages, wasm_memory, shared);
if (result && shared == SharedFlag::kShared) {
result->type_specific_data_.shared_wasm_memory_data =
new SharedWasmMemoryData();
......@@ -459,14 +469,14 @@ std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
return backing_store;
}
std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
size_t new_pages,
size_t max_pages) {
std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(
Isolate* isolate, size_t new_pages, size_t max_pages,
WasmMemoryFlag wasm_memory) {
// Note that we could allocate uninitialized to save initialization cost here,
// but since Wasm memories are allocated by the page allocator, the zeroing
// cost is already built-in.
auto new_backing_store = BackingStore::AllocateWasmMemory(
isolate, new_pages, max_pages,
isolate, new_pages, max_pages, wasm_memory,
is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared);
if (!new_backing_store ||
......
......@@ -18,6 +18,9 @@ namespace internal {
class Isolate;
class WasmMemoryObject;
// Whether this is Wasm memory, and if 32 or 64 bit.
enum class WasmMemoryFlag : uint8_t { kNotWasm, kWasmMemory32, kWasmMemory64 };
// Whether the backing store is shared or not.
enum class SharedFlag : uint8_t { kNotShared, kShared };
......@@ -53,17 +56,16 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
#if V8_ENABLE_WEBASSEMBLY
// Allocate the backing store for a Wasm memory.
static std::unique_ptr<BackingStore> AllocateWasmMemory(Isolate* isolate,
size_t initial_pages,
size_t maximum_pages,
SharedFlag shared);
static std::unique_ptr<BackingStore> AllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
WasmMemoryFlag wasm_memory, SharedFlag shared);
#endif // V8_ENABLE_WEBASSEMBLY
// Tries to allocate `maximum_pages` of memory and commit `initial_pages`.
static std::unique_ptr<BackingStore> TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t page_size, size_t initial_pages, size_t maximum_pages,
bool is_wasm_memory, SharedFlag shared);
WasmMemoryFlag wasm_memory, SharedFlag shared);
// Create a backing store that wraps existing allocated memory.
// If {free_on_destruct} is {true}, the memory will be freed using the
......@@ -126,7 +128,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// contents of this backing store into it.
std::unique_ptr<BackingStore> CopyWasmMemory(Isolate* isolate,
size_t new_pages,
size_t max_pages);
size_t max_pages,
WasmMemoryFlag wasm_memory);
// Attach the given memory object to this backing store. The memory object
// will be updated if this backing store is grown.
......
......@@ -1135,10 +1135,10 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
&initial_pages, &max_pages);
DCHECK(result.FromJust());
USE(result);
constexpr bool kIsWasmMemory = false;
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
main_thread_isolate(), byte_length, max_byte_length, page_size,
initial_pages, max_pages, kIsWasmMemory, SharedFlag::kNotShared);
initial_pages, max_pages, WasmMemoryFlag::kNotWasm,
SharedFlag::kNotShared);
}
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
......
......@@ -1686,9 +1686,10 @@ bool InstanceBuilder::AllocateMemory() {
? SharedFlag::kShared
: SharedFlag::kNotShared;
auto mem_type = module_->is_memory64 ? WasmMemoryFlag::kWasmMemory64
: WasmMemoryFlag::kWasmMemory32;
if (!WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared,
module_->is_memory64 ? WasmMemoryObject::kMemory64
: WasmMemoryObject::kMemory32)
mem_type)
.ToHandle(&memory_object_)) {
thrower_->RangeError(
"Out of memory: Cannot allocate Wasm memory for new instance");
......
......@@ -816,7 +816,7 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, Handle<JSArrayBuffer> buffer, int maximum,
MemoryIndexType index_type) {
WasmMemoryFlag memory_type) {
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor(), isolate);
......@@ -824,7 +824,7 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld));
memory_object->set_array_buffer(*buffer);
memory_object->set_maximum_pages(maximum);
memory_object->set_is_memory64(index_type == MemoryIndexType::kMemory64);
memory_object->set_is_memory64(memory_type == WasmMemoryFlag::kWasmMemory64);
if (buffer->is_shared()) {
auto backing_store = buffer->GetBackingStore();
......@@ -841,10 +841,10 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, int initial, int maximum, SharedFlag shared,
MemoryIndexType index_type) {
WasmMemoryFlag memory_type) {
bool has_maximum = maximum != kNoMaximum;
int engine_maximum = index_type == MemoryIndexType::kMemory64
int engine_maximum = memory_type == WasmMemoryFlag::kWasmMemory64
? static_cast<int>(wasm::max_mem64_pages())
: static_cast<int>(wasm::max_mem32_pages());
......@@ -875,7 +875,7 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
#endif
auto backing_store = BackingStore::AllocateWasmMemory(
isolate, initial, heuristic_maximum, shared);
isolate, initial, heuristic_maximum, memory_type, shared);
if (!backing_store) return {};
......@@ -1009,7 +1009,10 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
size_t min_growth = old_pages + 8 + (old_pages >> 3);
size_t new_capacity = std::clamp(new_pages, min_growth, max_pages);
std::unique_ptr<BackingStore> new_backing_store =
backing_store->CopyWasmMemory(isolate, new_pages, new_capacity);
backing_store->CopyWasmMemory(isolate, new_pages, new_capacity,
memory_object->is_memory64()
? WasmMemoryFlag::kWasmMemory64
: WasmMemoryFlag::kWasmMemory32);
if (!new_backing_store) {
// Crash on out-of-memory if the correctness fuzzer is running.
if (FLAG_correctness_fuzzer_suppressions) {
......
......@@ -253,9 +253,6 @@ class WasmTableObject
class WasmMemoryObject
: public TorqueGeneratedWasmMemoryObject<WasmMemoryObject, JSObject> {
public:
// Whether this memory object is a 64-bit memory.
enum MemoryIndexType { kMemory32, kMemory64 };
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
// Add an instance to the internal (weak) list.
......@@ -266,12 +263,12 @@ class WasmMemoryObject
V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
Isolate* isolate, Handle<JSArrayBuffer> buffer, int maximum,
MemoryIndexType index_type = MemoryIndexType::kMemory32);
WasmMemoryFlag memory_type = WasmMemoryFlag::kWasmMemory32);
V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
Isolate* isolate, int initial, int maximum,
SharedFlag shared = SharedFlag::kNotShared,
MemoryIndexType index_type = MemoryIndexType::kMemory32);
WasmMemoryFlag memory_type = WasmMemoryFlag::kWasmMemory32);
static constexpr int kNoMaximum = -1;
......
......@@ -3397,7 +3397,7 @@ void WebSnapshotDeserializer::DeserializeArrayBuffers() {
}
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
isolate_, byte_length, max_byte_length, page_size, initial_pages,
max_pages, false, shared);
max_pages, WasmMemoryFlag::kNotWasm, shared);
}
if (!backing_store) {
Throw("Create array buffer failed");
......
......@@ -74,8 +74,8 @@ TEST(BackingStore_Reclaim) {
// Make sure we can allocate memories without running out of address space.
Isolate* isolate = CcTest::InitIsolateOnce();
for (int i = 0; i < 256; ++i) {
auto backing_store =
BackingStore::AllocateWasmMemory(isolate, 1, 1, SharedFlag::kNotShared);
auto backing_store = BackingStore::AllocateWasmMemory(
isolate, 1, 1, WasmMemoryFlag::kWasmMemory32, SharedFlag::kNotShared);
CHECK(backing_store);
}
}
......
......@@ -2468,7 +2468,8 @@ class ValueSerializerTestWithSharedArrayBufferClone
auto pages = byte_length / i::wasm::kWasmPageSize;
auto i_isolate = reinterpret_cast<i::Isolate*>(isolate());
auto backing_store = i::BackingStore::AllocateWasmMemory(
i_isolate, pages, pages, i::SharedFlag::kShared);
i_isolate, pages, pages, i::WasmMemoryFlag::kWasmMemory32,
i::SharedFlag::kShared);
memcpy(backing_store->buffer_start(), data, byte_length);
i::Handle<i::JSArrayBuffer> buffer =
i_isolate->factory()->NewJSSharedArrayBuffer(
......
......@@ -13,8 +13,8 @@ namespace internal {
class BackingStoreTest : public TestWithIsolate {};
TEST_F(BackingStoreTest, GrowWasmMemoryInPlace) {
auto backing_store =
BackingStore::AllocateWasmMemory(isolate(), 1, 2, SharedFlag::kNotShared);
auto backing_store = BackingStore::AllocateWasmMemory(
isolate(), 1, 2, WasmMemoryFlag::kWasmMemory32, SharedFlag::kNotShared);
CHECK(backing_store);
EXPECT_TRUE(backing_store->is_wasm_memory());
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
......@@ -28,8 +28,8 @@ TEST_F(BackingStoreTest, GrowWasmMemoryInPlace) {
}
TEST_F(BackingStoreTest, GrowWasmMemoryInPlace_neg) {
auto backing_store =
BackingStore::AllocateWasmMemory(isolate(), 1, 2, SharedFlag::kNotShared);
auto backing_store = BackingStore::AllocateWasmMemory(
isolate(), 1, 2, WasmMemoryFlag::kWasmMemory32, SharedFlag::kNotShared);
CHECK(backing_store);
EXPECT_TRUE(backing_store->is_wasm_memory());
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
......@@ -42,8 +42,8 @@ TEST_F(BackingStoreTest, GrowWasmMemoryInPlace_neg) {
}
TEST_F(BackingStoreTest, GrowSharedWasmMemoryInPlace) {
auto backing_store =
BackingStore::AllocateWasmMemory(isolate(), 2, 3, SharedFlag::kShared);
auto backing_store = BackingStore::AllocateWasmMemory(
isolate(), 2, 3, WasmMemoryFlag::kWasmMemory32, SharedFlag::kShared);
CHECK(backing_store);
EXPECT_TRUE(backing_store->is_wasm_memory());
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
......@@ -57,14 +57,15 @@ TEST_F(BackingStoreTest, GrowSharedWasmMemoryInPlace) {
}
TEST_F(BackingStoreTest, CopyWasmMemory) {
auto bs1 =
BackingStore::AllocateWasmMemory(isolate(), 1, 2, SharedFlag::kNotShared);
auto bs1 = BackingStore::AllocateWasmMemory(
isolate(), 1, 2, WasmMemoryFlag::kWasmMemory32, SharedFlag::kNotShared);
CHECK(bs1);
EXPECT_TRUE(bs1->is_wasm_memory());
EXPECT_EQ(1 * wasm::kWasmPageSize, bs1->byte_length());
EXPECT_EQ(2 * wasm::kWasmPageSize, bs1->byte_capacity());
auto bs2 = bs1->CopyWasmMemory(isolate(), 3, 3);
auto bs2 =
bs1->CopyWasmMemory(isolate(), 3, 3, WasmMemoryFlag::kWasmMemory32);
EXPECT_TRUE(bs2->is_wasm_memory());
EXPECT_EQ(3 * wasm::kWasmPageSize, bs2->byte_length());
EXPECT_EQ(3 * wasm::kWasmPageSize, bs2->byte_capacity());
......@@ -111,6 +112,7 @@ TEST_F(BackingStoreTest, RacyGrowWasmMemoryInPlace) {
std::shared_ptr<BackingStore> backing_store =
BackingStore::AllocateWasmMemory(isolate(), 0, kMaxPages,
WasmMemoryFlag::kWasmMemory32,
SharedFlag::kShared);
for (int i = 0; i < kNumThreads; i++) {
......
......@@ -81,8 +81,9 @@ class TrapHandlerTest : public TestWithIsolate,
void SetUp() override {
InstallFallbackHandler();
SetupTrapHandler(GetParam());
backing_store_ = BackingStore::AllocateWasmMemory(i_isolate(), 1, 1,
SharedFlag::kNotShared);
backing_store_ = BackingStore::AllocateWasmMemory(
i_isolate(), 1, 1, WasmMemoryFlag::kWasmMemory32,
SharedFlag::kNotShared);
CHECK(backing_store_);
EXPECT_TRUE(backing_store_->has_guard_regions());
// The allocated backing store ends with a guard page.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment