Commit 2eefd6a1 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Merge two related Mutexes into one

This changes the interaction between {NativeModule} and
{WasmCodeAllocator}. The {WasmCodeAllocator} is a field of
{NativeModule}, and only called directly by the {NativeModule}. So far,
there were two mutexes involved, the {allocation_mutex_} in
{NativeModule}, and {mutex_} in {WasmCodeAllocator}. This caused
problems with lock order inversion.

This CL thus merges the two mutex, by always locking the mutex in
{NativeModule} when calling a non-atomic method in {WasmCodeAllocator}.
This serializes slightly more code, but none of this should be
performance-critical.

This removes the awkward {OptionalLock} class and adds the "Locked"
suffix to a few methods to document that those can only be called
while holding the allocation mutex.

R=jkummerow@chromium.org
CC=​dlehmann@google.com

Bug: v8:11663
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_isolates_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_gc_stress_dbg_ng
Cq-Include-Trybots: luci.v8.try:v8_linux_gc_stress_dbg_ng
Change-Id: I8895d61fef23a57b218e068532375bac941a5a77
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2831477
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74026}
parent 6c40a668
This diff is collapsed.
......@@ -398,31 +398,11 @@ class WasmCodeAllocator {
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif
// {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
// indicate that the lock on the {WasmCodeAllocator} is already taken. It's
// optional to allow to also call methods without holding the lock.
class OptionalLock {
public:
// External users can only instantiate a non-locked {OptionalLock}.
OptionalLock() = default;
~OptionalLock();
bool is_locked() const { return allocator_ != nullptr; }
private:
friend class WasmCodeAllocator;
// {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
// is passed.
void Lock(WasmCodeAllocator*);
WasmCodeAllocator* allocator_ = nullptr;
};
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
std::shared_ptr<Counters> async_counters);
WasmCodeAllocator(WasmCodeManager*, std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
// Call before use, after the {NativeModule} is set up completely.
void Init(NativeModule*);
void Init(VirtualMemory code_space);
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
......@@ -435,22 +415,26 @@ class WasmCodeAllocator {
}
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
base::AddressRegion);
// Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
void FreeCode(Vector<WasmCode* const>);
// Retrieve the number of separately reserved code spaces.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
size_t GetNumCodeSpaces() const;
private:
......@@ -462,10 +446,8 @@ class WasmCodeAllocator {
// The engine-wide wasm code manager.
WasmCodeManager* const code_manager_;
mutable base::Mutex mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
// These fields are protected by the mutex in {NativeModule}.
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
......@@ -524,7 +506,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
// table and jump table via another {PublishCode}.
void ReinstallDebugCode(WasmCode*);
Vector<uint8_t> AllocateForDeserializedCode(size_t total_code_size);
struct JumpTablesRef {
Address jump_table_start = kNullAddress;
Address far_jump_table_start = kNullAddress;
bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
std::pair<Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
size_t total_code_size);
std::unique_ptr<WasmCode> AddDeserializedCode(
int index, Vector<byte> instructions, int stack_slots,
......@@ -565,26 +555,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
// the first jump table).
Address GetCallTargetForFunction(uint32_t func_index) const;
struct JumpTablesRef {
Address jump_table_start = kNullAddress;
Address far_jump_table_start = kNullAddress;
bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
// Finds the jump tables that should be used for given code region. This
// information is then passed to {GetNearCallTargetForFunction} and
// {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
// up there. Return an empty struct if no suitable jump tables exist.
JumpTablesRef FindJumpTablesForRegion(base::AddressRegion) const;
JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
// Similarly to {GetCallTargetForFunction}, but uses the jump table previously
// looked up via {FindJumpTablesForRegion}.
// looked up via {FindJumpTablesForRegionLocked}.
Address GetNearCallTargetForFunction(uint32_t func_index,
const JumpTablesRef&) const;
// Get a runtime stub entry (which is a far jump table slot) in the jump table
// previously looked up via {FindJumpTablesForRegion}.
// previously looked up via {FindJumpTablesForRegionLocked}.
Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
const JumpTablesRef&) const;
......@@ -593,6 +576,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
bool SetExecutable(bool executable) {
base::MutexGuard guard{&allocation_mutex_};
return code_allocator_.SetExecutable(executable);
}
......@@ -727,9 +711,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
ExecutionTier tier, ForDebugging for_debugging,
Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableInRegion(
int jump_table_size, base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
base::AddressRegion);
void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
......@@ -741,8 +724,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
void AddCodeSpace(base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
void AddCodeSpaceLocked(base::AddressRegion);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
......
......@@ -712,11 +712,9 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
constexpr size_t kMaxReservation =
RoundUp<kCodeAlignment>(WasmCodeAllocator::kMaxCodeSpaceSize * 9 / 10);
size_t code_space_size = std::min(kMaxReservation, remaining_code_size_);
current_code_space_ =
std::tie(current_code_space_, current_jump_tables_) =
native_module_->AllocateForDeserializedCode(code_space_size);
DCHECK_EQ(current_code_space_.size(), code_space_size);
current_jump_tables_ = native_module_->FindJumpTablesForRegion(
base::AddressRegionOf(current_code_space_));
DCHECK(current_jump_tables_.is_valid());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment