Commit c5a16a39 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Factor out {WasmCodeAllocator}

This factors out a {WasmCodeAllocator} which manages all code
reservations and allocations for a {NativeModule}. This will allow for
better testing of this component (which will be added in a separate CL).

R=titzer@chromium.org

Change-Id: I09727c30afc533e95569276147792d0e641b0507
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1605738
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61445}
parent 5343d789
This diff is collapsed.
......@@ -272,6 +272,64 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
public:
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
bool can_request_more);
~WasmCodeAllocator();
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
}
size_t generated_code_size() const {
return generated_code_size_.load(std::memory_order_acquire);
}
size_t freed_code_size() const {
return freed_code_size_.load(std::memory_order_acquire);
}
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success.
V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
// Free memory pages of all given code objects. Used for wasm code GC.
void FreeCode(Vector<WasmCode* const>);
private:
// The engine-wide wasm code manager.
WasmCodeManager* const code_manager_;
mutable base::Mutex mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
DisjointAllocationPool free_code_space_;
// Code space that was allocated for code (subset of {owned_code_space_}).
DisjointAllocationPool allocated_code_space_;
// Code space that was allocated before but is dead now. Full pages within
// this region are discarded. It's still a subset of {owned_code_space_}).
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
std::atomic<size_t> committed_code_space_{0};
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
bool is_executable_ = false;
const bool can_request_more_memory_;
};
class V8_EXPORT_PRIVATE NativeModule final {
public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
......@@ -357,7 +415,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// above {GetCallTargetForFunction} returns) to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
bool SetExecutable(bool executable);
bool SetExecutable(bool executable) {
return code_allocator_.SetExecutable(executable);
}
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
......@@ -384,7 +444,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
size_t committed_code_space() const { return committed_code_space_.load(); }
size_t committed_code_space() const {
return code_allocator_.committed_code_space();
}
WasmEngine* engine() const { return engine_; }
void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
......@@ -442,8 +504,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Add and publish anonymous code.
WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr);
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
Vector<byte> AllocateForCode(size_t size);
WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
......@@ -471,6 +531,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
byte |= 1 << (bitset_idx % kBitsPerByte);
}
// {WasmCodeAllocator} manages all code reservations and allocations for this
// {NativeModule}.
WasmCodeAllocator code_allocator_;
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
// to be consistent across asynchronous compilations later.
......@@ -517,27 +581,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
// this module marking those functions that have been redirected.
std::unique_ptr<uint8_t[]> interpreter_redirections_;
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
DisjointAllocationPool free_code_space_;
// Code space that was allocated for code (subset of {owned_code_space_}).
DisjointAllocationPool allocated_code_space_;
// Code space that was allocated before but is dead now. Full pages within
// this region are discarded. It's still a subset of {owned_code_space_}).
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
WasmEngine* const engine_;
std::atomic<size_t> committed_code_space_{0};
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule);
......@@ -577,7 +626,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
private:
friend class NativeModule;
friend class WasmCodeAllocator;
friend class WasmEngine;
std::shared_ptr<NativeModule> NewNativeModule(
......@@ -592,7 +641,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// for the freed memory size. We do that in FreeNativeModule.
// There's no separate Uncommit.
void FreeNativeModule(NativeModule*);
void FreeNativeModule(Vector<VirtualMemory> owned_code,
size_t committed_size);
void AssignRanges(Address start, Address end, NativeModule*);
......
......@@ -614,50 +614,46 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
}
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
{
base::MutexGuard guard(&mutex_);
auto it = native_modules_.find(native_module);
DCHECK_NE(native_modules_.end(), it);
for (Isolate* isolate : it->second->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
DCHECK_EQ(1, info->native_modules.count(native_module));
info->native_modules.erase(native_module);
// If there are {WasmCode} objects of the deleted {NativeModule}
// outstanding to be logged in this isolate, remove them. Decrementing the
// ref count is not needed, since the {NativeModule} dies anyway.
size_t remaining = info->code_to_log.size();
if (remaining > 0) {
for (size_t i = 0; i < remaining; ++i) {
while (i < remaining &&
info->code_to_log[i]->native_module() == native_module) {
// Move the last remaining item to this slot (this can be the same
// as {i}, which is OK).
info->code_to_log[i] = info->code_to_log[--remaining];
}
base::MutexGuard guard(&mutex_);
auto it = native_modules_.find(native_module);
DCHECK_NE(native_modules_.end(), it);
for (Isolate* isolate : it->second->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
DCHECK_EQ(1, info->native_modules.count(native_module));
info->native_modules.erase(native_module);
// If there are {WasmCode} objects of the deleted {NativeModule}
// outstanding to be logged in this isolate, remove them. Decrementing the
// ref count is not needed, since the {NativeModule} dies anyway.
size_t remaining = info->code_to_log.size();
if (remaining > 0) {
for (size_t i = 0; i < remaining; ++i) {
while (i < remaining &&
info->code_to_log[i]->native_module() == native_module) {
// Move the last remaining item to this slot (this can be the same
// as {i}, which is OK).
info->code_to_log[i] = info->code_to_log[--remaining];
}
info->code_to_log.resize(remaining);
}
info->code_to_log.resize(remaining);
}
// If there is a GC running which has references to code contained in the
// deleted {NativeModule}, remove those references.
if (current_gc_info_) {
for (auto it = current_gc_info_->dead_code.begin(),
end = current_gc_info_->dead_code.end();
it != end;) {
if ((*it)->native_module() == native_module) {
it = current_gc_info_->dead_code.erase(it);
} else {
++it;
}
}
// If there is a GC running which has references to code contained in the
// deleted {NativeModule}, remove those references.
if (current_gc_info_) {
for (auto it = current_gc_info_->dead_code.begin(),
end = current_gc_info_->dead_code.end();
it != end;) {
if ((*it)->native_module() == native_module) {
it = current_gc_info_->dead_code.erase(it);
} else {
++it;
}
TRACE_CODE_GC(
"Native module %p died, reducing dead code objects to %zu.\n",
native_module, current_gc_info_->dead_code.size());
}
native_modules_.erase(it);
TRACE_CODE_GC("Native module %p died, reducing dead code objects to %zu.\n",
native_module, current_gc_info_->dead_code.size());
}
code_manager_.FreeNativeModule(native_module);
native_modules_.erase(it);
}
namespace {
......
......@@ -213,7 +213,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
CHECK_EQ(0, manager()->committed_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(page_size, GetParam()),
"OOM in NativeModule::AllocateForCode commit");
"OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
......@@ -244,7 +244,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
// WasmCodeManager). Hence don't check for that part of the OOM message.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(native_module.get(), index++, 1 * kCodeAlignment),
"OOM in NativeModule::AllocateForCode");
"OOM in wasm code");
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
......@@ -260,7 +260,7 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm2.get(), 0, 2 * page_size - kJumpTableSize),
"OOM in NativeModule::AllocateForCode commit");
"OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
......@@ -275,7 +275,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
// grow.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
"OOM in NativeModule::AllocateForCode");
"OOM in wasm code reservation");
} else {
// The module grows by one page. One page remains uncommitted.
WasmCodeRefScope code_ref_scope;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment