Commit 5ead3a55 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Refactor code space allocation

Make {AllocateForCode} return an actual buffer, and move the OOM check
into that method. This allows us to generate more precise OOM messages.

R=mstarzinger@chromium.org

Change-Id: Ie9ed81248fe8068c92eec29a4911ffef43032de2
Reviewed-on: https://chromium-review.googlesource.com/1245769
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56258}
parent 1c2d6111
......@@ -381,18 +381,13 @@ WasmCode* NativeModule::AddOwnedCode(
// Both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
Address executable_buffer = AllocateForCode(instructions.size());
if (executable_buffer == kNullAddress) {
V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
UNREACHABLE();
}
Vector<byte> executable_buffer = AllocateForCode(instructions.size());
// Ownership will be transferred to {owned_code_} below.
code = new WasmCode(
this, index,
{reinterpret_cast<byte*>(executable_buffer), instructions.size()},
stack_slots, safepoint_table_offset, handler_table_offset,
code = new WasmCode(this, index, executable_buffer, stack_slots,
safepoint_table_offset, handler_table_offset,
constant_pool_offset, std::move(protected_instructions),
std::move(reloc_info), std::move(source_position_table), kind, tier);
std::move(reloc_info), std::move(source_position_table),
kind, tier);
if (owned_code_.empty() ||
code->instruction_start() > owned_code_.back()->instruction_start()) {
......@@ -655,21 +650,29 @@ void NativeModule::InstallCode(WasmCode* code) {
WasmCode::kFlushICache);
}
Address NativeModule::AllocateForCode(size_t size) {
Vector<byte> NativeModule::AllocateForCode(size_t size) {
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
base::AddressRegion mem = free_code_space_.Allocate(size);
if (mem.is_empty()) {
if (!can_request_more_memory_) return kNullAddress;
if (!can_request_more_memory_) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode reservation");
UNREACHABLE();
}
Address hint = owned_code_space_.empty() ? kNullAddress
: owned_code_space_.back().end();
VirtualMemory new_mem =
wasm_code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) return kNullAddress;
if (!new_mem.IsReserved()) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode reservation");
UNREACHABLE();
}
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
free_code_space_.Merge(new_mem.region());
......@@ -703,24 +706,27 @@ Address NativeModule::AllocateForCode(size_t size) {
size_t commit_size = static_cast<size_t>(commit_end - start);
DCHECK(IsAligned(commit_size, page_allocator->AllocatePageSize()));
if (!wasm_code_manager_->Commit(start, commit_size)) {
return kNullAddress;
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode commit");
UNREACHABLE();
}
committed_code_space_.fetch_add(commit_size);
commit_end = start;
}
#else
size_t commit_size = static_cast<size_t>(commit_end - commit_start);
DCHECK(IsAligned(commit_size, page_allocator->AllocatePageSize()));
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return kNullAddress;
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode commit");
UNREACHABLE();
}
committed_code_space_.fetch_add(commit_size);
#endif
committed_code_space_.fetch_add(commit_end - commit_start);
}
DCHECK(IsAligned(mem.begin(), kCodeAlignment));
allocated_code_space_.Merge(mem);
TRACE_HEAP("Code alloc for %p: %" PRIuPTR ",+%zu\n", this, mem.begin(), size);
return mem.begin();
TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, mem.begin(), size);
return {reinterpret_cast<byte*>(mem.begin()), mem.size()};
}
WasmCode* NativeModule::Lookup(Address pc) const {
......
......@@ -348,7 +348,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
Address AllocateForCode(size_t size);
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
Vector<byte> AllocateForCode(size_t size);
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
......
......@@ -198,7 +198,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
"OOM in NativeModule::AddOwnedCode");
"OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
......@@ -221,9 +221,12 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
// This fails in "reservation" if we cannot extend the code space, or in
// "commit" it we can (since we hit the allocation limit in the
// WasmCodeManager). Hence don't check for that part of the OOM message.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(native_module.get(), index++, 1 * kCodeAlignment),
"OOM in NativeModule::AddOwnedCode");
"OOM in NativeModule::AllocateForCode");
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
......@@ -235,7 +238,7 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * page() - kJumpTableSize),
"OOM in NativeModule::AddOwnedCode");
"OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
......@@ -262,7 +265,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
// grow.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
"OOM in NativeModule::AddOwnedCode");
"OOM in NativeModule::AllocateForCode");
} else {
// The module grows by one page. One page remains uncommitted.
CHECK_NOT_NULL(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment