Commit eb7a36be authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Replace RecursiveMutex by Mutex

Some of the methods in {WasmCodeAllocator} can be called either without
holding the lock, or while already holding it. In order to support both
situations, we used a {RecursiveMutex} so far.
This CL refactors this to be a simple {Mutex} again, and passes a
{WasmCodeAllocator::OptionalLock} object which stores whether the lock
is already held or not.
Note that getting the lock twice fails immediately in debug builds,
while forgetting to get the lock might only fail on TSan.

The alternative would be to duplicate all methods, having one variant
that expects that the lock is held and one that assume that it's
unlocked. It would be multiple methods though to duplicate across both
{NativeModule} and {WasmCodeAllocator}, hence I went for the
{OptionalLock} instead.

Bug: v8:9477

R=mstarzinger@chromium.org

Change-Id: I4e32286cdb93385ac655d408191b330efdd7ad66
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1825338
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64137}
parent c8ebe89d
......@@ -430,6 +430,16 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
if (engine) engine->FreeDeadCode(dead_code);
}
WasmCodeAllocator::OptionalLock::~OptionalLock() {
if (allocator_) allocator_->mutex_.Unlock();
}
void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
DCHECK(!is_locked());
allocator_ = allocator;
allocator->mutex_.Lock();
}
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
bool can_request_more,
......@@ -448,6 +458,11 @@ WasmCodeAllocator::~WasmCodeAllocator() {
committed_code_space());
}
void WasmCodeAllocator::Init(NativeModule* native_module) {
DCHECK_EQ(1, owned_code_space_.size());
native_module->AddCodeSpace(owned_code_space_[0].region(), {});
}
namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
......@@ -539,12 +554,18 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
return AllocateForCodeInRegion(
native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()});
native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()},
WasmCodeAllocator::OptionalLock{});
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
NativeModule* native_module, size_t size, base::AddressRegion region) {
base::RecursiveMutexGuard lock(&mutex_);
NativeModule* native_module, size_t size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& optional_lock) {
OptionalLock new_lock;
if (!optional_lock.is_locked()) new_lock.Lock(this);
const auto& locked_lock =
optional_lock.is_locked() ? optional_lock : new_lock;
DCHECK(locked_lock.is_locked());
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
......@@ -579,7 +600,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
native_module->AddCodeSpace(new_region);
native_module->AddCodeSpace(new_region, locked_lock);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
......@@ -619,7 +640,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
bool WasmCodeAllocator::SetExecutable(bool executable) {
base::RecursiveMutexGuard lock(&mutex_);
base::MutexGuard lock(&mutex_);
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
......@@ -682,7 +703,7 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
freed_code_size_.fetch_add(code_size);
// Merge {freed_regions} into {freed_code_space_} and discard full pages.
base::RecursiveMutexGuard guard(&mutex_);
base::MutexGuard guard(&mutex_);
PageAllocator* allocator = GetPlatformPageAllocator();
size_t commit_page_size = allocator->CommitPageSize();
for (auto region : freed_regions.regions()) {
......@@ -706,16 +727,10 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
base::RecursiveMutexGuard lock(&mutex_);
base::MutexGuard lock(&mutex_);
return owned_code_space_.size();
}
base::AddressRegion WasmCodeAllocator::GetSingleCodeRegion() const {
base::RecursiveMutexGuard lock(&mutex_);
DCHECK_EQ(1, owned_code_space_.size());
return owned_code_space_[0].region();
}
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
bool can_request_more, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
......@@ -742,7 +757,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
code_table_ =
std::make_unique<WasmCode*[]>(module_->num_declared_functions);
}
AddCodeSpace(code_allocator_.GetSingleCodeRegion());
code_allocator_.Init(this);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
......@@ -764,7 +779,7 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
// Re-allocate jump table.
main_jump_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
single_code_space_region);
single_code_space_region, WasmCodeAllocator::OptionalLock{});
base::MutexGuard guard(&allocation_mutex_);
code_space_data_[0].jump_table = main_jump_table_;
}
......@@ -894,7 +909,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
}
lazy_compile_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
single_code_space_region);
single_code_space_region, WasmCodeAllocator::OptionalLock{});
JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions,
......@@ -1130,11 +1145,12 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
}
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
uint32_t jump_table_size, base::AddressRegion region) {
uint32_t jump_table_size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
this, jump_table_size, region, allocator_lock);
DCHECK(!code_space.empty());
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
......@@ -1195,7 +1211,9 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
target);
}
void NativeModule::AddCodeSpace(base::AddressRegion region) {
void NativeModule::AddCodeSpace(
base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
#ifndef V8_EMBEDDED_BUILTINS
// The far jump table contains far jumps to the embedded builtins. This
// requires a build with embedded builtins enabled.
......@@ -1222,8 +1240,8 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
Vector<byte> padding =
code_allocator_.AllocateForCodeInRegion(this, size, region);
Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
this, size, region, allocator_lock);
CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(region.begin()), region.size());
......@@ -1243,7 +1261,8 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
allocator_lock);
CHECK(region.contains(jump_table->instruction_start()));
}
......@@ -1252,7 +1271,7 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
far_jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount, num_function_slots),
region);
region, allocator_lock);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
......
......@@ -280,11 +280,33 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
public:
// {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
// indicate that the lock on the {WasmCodeAllocator} is already taken. It's
// optional to allow to also call methods without holding the lock.
class OptionalLock {
public:
// External users can only instantiate a non-locked {OptionalLock}.
OptionalLock() = default;
~OptionalLock();
bool is_locked() const { return allocator_ != nullptr; }
private:
friend class WasmCodeAllocator;
// {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
// is passed.
void Lock(WasmCodeAllocator*);
WasmCodeAllocator* allocator_ = nullptr;
};
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
bool can_request_more,
std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
// Call before use, after the {NativeModule} is set up completely.
void Init(NativeModule*);
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
}
......@@ -301,7 +323,8 @@ class WasmCodeAllocator {
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion);
base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
// Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success.
......@@ -313,18 +336,11 @@ class WasmCodeAllocator {
// Retrieve the number of separately reserved code spaces.
size_t GetNumCodeSpaces() const;
// Returns the region of the single code space managed by this code allocator.
// Will fail if more than one code space has been created.
base::AddressRegion GetSingleCodeRegion() const;
private:
// The engine-wide wasm code manager.
WasmCodeManager* const code_manager_;
// TODO(clemensb): Try to make this non-recursive again. It's recursive
// currently because {AllocateForCodeInRegion} might create a new code space,
// which recursively calls {AllocateForCodeInRegion} for the jump table.
mutable base::RecursiveMutex mutex_;
mutable base::Mutex mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
......@@ -547,8 +563,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
ExecutionTier tier, Vector<uint8_t> code_space,
const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size,
base::AddressRegion);
WasmCode* CreateEmptyJumpTableInRegion(
uint32_t jump_table_size, base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
// Hold the {allocation_mutex_} when calling one of these methods.
// {slot_index} is the index in the declared functions, i.e. function index
......@@ -558,7 +575,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
void AddCodeSpace(base::AddressRegion);
void AddCodeSpace(base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
// Hold the {allocation_mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment