Commit 2eefd6a1 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Merge two related Mutexes into one

This changes the interaction between {NativeModule} and
{WasmCodeAllocator}. The {WasmCodeAllocator} is a field of
{NativeModule}, and only called directly by the {NativeModule}. So far,
there were two mutexes involved, the {allocation_mutex_} in
{NativeModule}, and {mutex_} in {WasmCodeAllocator}. This caused
problems with lock order inversion.

This CL thus merges the two mutex, by always locking the mutex in
{NativeModule} when calling a non-atomic method in {WasmCodeAllocator}.
This serializes slightly more code, but none of this should be
performance-critical.

This removes the awkward {OptionalLock} class and adds the "Locked"
suffix to a few methods to document that those can only be called
while holding the allocation mutex.

R=jkummerow@chromium.org
CC=​dlehmann@google.com

Bug: v8:11663
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_isolates_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_gc_stress_dbg_ng
Cq-Include-Trybots: luci.v8.try:v8_linux_gc_stress_dbg_ng
Change-Id: I8895d61fef23a57b218e068532375bac941a5a77
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2831477
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74026}
parent 6c40a668
......@@ -502,28 +502,14 @@ int WasmCode::GetSourcePositionBefore(int offset) {
return position;
}
WasmCodeAllocator::OptionalLock::~OptionalLock() {
if (allocator_) allocator_->mutex_.Unlock();
}
void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
DCHECK(!is_locked());
allocator_ = allocator;
allocator->mutex_.Lock();
}
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
free_code_space_(code_space.region()),
async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
owned_code_space_.emplace_back(std::move(code_space));
async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
......@@ -531,9 +517,12 @@ WasmCodeAllocator::~WasmCodeAllocator() {
committed_code_space());
}
void WasmCodeAllocator::Init(NativeModule* native_module) {
DCHECK_EQ(1, owned_code_space_.size());
native_module->AddCodeSpace(owned_code_space_[0].region(), {});
void WasmCodeAllocator::Init(VirtualMemory code_space) {
DCHECK(owned_code_space_.empty());
DCHECK(free_code_space_.IsEmpty());
free_code_space_.Merge(code_space.region());
owned_code_space_.emplace_back(std::move(code_space));
async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
namespace {
......@@ -625,18 +614,11 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion,
WasmCodeAllocator::OptionalLock{});
return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
NativeModule* native_module, size_t size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& optional_lock) {
OptionalLock new_lock;
if (!optional_lock.is_locked()) new_lock.Lock(this);
const auto& locked_lock =
optional_lock.is_locked() ? optional_lock : new_lock;
DCHECK(locked_lock.is_locked());
NativeModule* native_module, size_t size, base::AddressRegion region) {
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
......@@ -667,7 +649,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
native_module->AddCodeSpace(new_region, locked_lock);
native_module->AddCodeSpaceLocked(new_region);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
......@@ -704,7 +686,6 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
bool WasmCodeAllocator::SetExecutable(bool executable) {
base::MutexGuard lock(&mutex_);
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
......@@ -768,19 +749,16 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
DisjointAllocationPool regions_to_decommit;
PageAllocator* allocator = GetPlatformPageAllocator();
size_t commit_page_size = allocator->CommitPageSize();
{
base::MutexGuard guard(&mutex_);
for (auto region : freed_regions.regions()) {
auto merged_region = freed_code_space_.Merge(region);
Address discard_start =
std::max(RoundUp(merged_region.begin(), commit_page_size),
RoundDown(region.begin(), commit_page_size));
Address discard_end =
std::min(RoundDown(merged_region.end(), commit_page_size),
RoundUp(region.end(), commit_page_size));
if (discard_start >= discard_end) continue;
regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
for (auto region : freed_regions.regions()) {
auto merged_region = freed_code_space_.Merge(region);
Address discard_start =
std::max(RoundUp(merged_region.begin(), commit_page_size),
RoundDown(region.begin(), commit_page_size));
Address discard_end =
std::min(RoundDown(merged_region.end(), commit_page_size),
RoundUp(region.end(), commit_page_size));
if (discard_start >= discard_end) continue;
regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
for (auto region : regions_to_decommit.regions()) {
......@@ -795,7 +773,6 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
base::MutexGuard lock(&mutex_);
return owned_code_space_.size();
}
......@@ -809,8 +786,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<NativeModule>* shared_this)
: engine_(engine),
engine_scope_(engine->GetBarrierForBackgroundCompile()->TryLock()),
code_allocator_(engine->code_manager(), std::move(code_space),
async_counters),
code_allocator_(engine->code_manager(), async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
......@@ -838,7 +814,14 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::fill_n(num_liftoff_function_calls_.get(),
module_->num_declared_functions, kCounterStart);
}
code_allocator_.Init(this);
// Even though there cannot be another thread using this object (since we are
// just constructing it), we need to hold the mutex to fulfill the
// precondition of {WasmCodeAllocator::Init}, which calls
// {NativeModule::AddCodeSpaceLocked}.
base::MutexGuard guard{&allocation_mutex_};
auto initial_region = code_space.region();
code_allocator_.Init(std::move(code_space));
AddCodeSpaceLocked(initial_region);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
......@@ -858,10 +841,10 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
single_code_space_region = code_space_data_[0].region;
}
// Re-allocate jump table.
main_jump_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
single_code_space_region, WasmCodeAllocator::OptionalLock{});
base::MutexGuard guard(&allocation_mutex_);
main_jump_table_ = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
single_code_space_region);
code_space_data_[0].jump_table = main_jump_table_;
}
......@@ -929,6 +912,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
const int constant_pool_offset = base_offset + code->constant_pool_offset();
const int code_comments_offset = base_offset + code->code_comments_offset();
base::MutexGuard guard{&allocation_mutex_};
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
base::Memcpy(dst_code_bytes.begin(), instructions.begin(),
......@@ -940,7 +924,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
FindJumpTablesForRegionLocked(base::AddressRegionOf(dst_code_bytes));
Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
......@@ -982,7 +966,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
new_code->MaybePrint();
new_code->Validate();
return PublishCode(std::move(new_code));
return PublishCodeLocked(std::move(new_code));
}
void NativeModule::UseLazyStub(uint32_t func_index) {
......@@ -990,25 +974,23 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
base::MutexGuard guard(&allocation_mutex_);
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
base::AddressRegion single_code_space_region;
{
base::MutexGuard guard(&allocation_mutex_);
DCHECK_EQ(1, code_space_data_.size());
single_code_space_region = code_space_data_[0].region;
}
lazy_compile_table_ = CreateEmptyJumpTableInRegion(
DCHECK_EQ(1, code_space_data_.size());
base::AddressRegion single_code_space_region = code_space_data_[0].region;
lazy_compile_table_ = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
single_code_space_region, WasmCodeAllocator::OptionalLock{});
single_code_space_region);
JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions,
GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
FindJumpTablesForRegion(base::AddressRegionOf(
lazy_compile_table_->instructions()))));
GetNearRuntimeStubEntry(
WasmCode::kWasmCompileLazy,
FindJumpTablesForRegionLocked(
base::AddressRegionOf(lazy_compile_table_->instructions()))));
}
// Add jump table entry for jump to the lazy compile stub.
......@@ -1017,7 +999,6 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
Address lazy_compile_target =
lazy_compile_table_->instruction_start() +
JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
base::MutexGuard guard(&allocation_mutex_);
PatchJumpTablesLocked(slot_index, lazy_compile_target);
}
......@@ -1026,10 +1007,14 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging) {
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, desc.instr_size);
auto jump_table_ref =
FindJumpTablesForRegion(base::AddressRegionOf(code_space));
Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
{
base::MutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
jump_table_ref =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
}
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
source_position_table, kind, tier, for_debugging,
......@@ -1230,9 +1215,14 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
Vector<uint8_t> NativeModule::AllocateForDeserializedCode(
size_t total_code_size) {
return code_allocator_.AllocateForCode(this, total_code_size);
std::pair<Vector<uint8_t>, NativeModule::JumpTablesRef>
NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
base::MutexGuard guard{&allocation_mutex_};
Vector<uint8_t> code_space =
code_allocator_.AllocateForCode(this, total_code_size);
auto jump_tables =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
return {code_space, jump_tables};
}
std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
......@@ -1289,13 +1279,14 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
return source_map_.get();
}
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
int jump_table_size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
int jump_table_size, base::AddressRegion region) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
this, jump_table_size, region, allocator_lock);
Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
CODE_SPACE_WRITE_SCOPE
......@@ -1317,7 +1308,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
WasmCode::kJumpTable, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
return PublishCode(std::move(code));
return PublishCodeLocked(std::move(code));
}
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
......@@ -1369,9 +1360,9 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
target);
}
void NativeModule::AddCodeSpace(
base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
// Each code space must be at least twice as large as the overhead per code
// space. Otherwise, we are wasting too much memory.
DCHECK_GE(region.size(),
......@@ -1387,8 +1378,8 @@ void NativeModule::AddCodeSpace(
->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
this, size, region, allocator_lock);
Vector<byte> padding =
code_allocator_.AllocateForCodeInRegion(this, size, region);
CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(region.begin()), region.size());
......@@ -1402,23 +1393,23 @@ void NativeModule::AddCodeSpace(
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool is_first_code_space = code_space_data_.empty();
// We always need a far jump table, because it contains the runtime stubs.
const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
const bool needs_far_jump_table =
!FindJumpTablesForRegionLocked(region).is_valid();
const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
allocator_lock);
jump_table = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
CHECK(region.contains(jump_table->instruction_start()));
}
if (needs_far_jump_table) {
int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
far_jump_table = CreateEmptyJumpTableInRegion(
far_jump_table = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount,
NumWasmFunctionsInFarJumpTable(num_function_slots)),
region, allocator_lock);
region);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
......@@ -1446,7 +1437,6 @@ void NativeModule::AddCodeSpace(
main_far_jump_table_ = far_jump_table;
}
base::MutexGuard guard(&allocation_mutex_);
code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
if (jump_table && !is_first_code_space) {
......@@ -1566,8 +1556,10 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
return main_jump_table_->instruction_start() + slot_offset;
}
NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
base::AddressRegion code_region) const {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
auto jump_table_usable = [code_region](const WasmCode* jump_table) {
Address table_start = jump_table->instruction_start();
Address table_end = table_start + jump_table->instructions().size();
......@@ -1583,18 +1575,6 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
// Access to these fields is possible without locking, since these fields are
// initialized on construction of the {NativeModule}.
if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
(main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
return {
main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
main_far_jump_table_->instruction_start()};
}
// Otherwise, take the mutex and look for another suitable jump table.
base::MutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.far_jump_table) continue;
......@@ -2002,10 +1982,15 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
}
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, total_code_space);
// Lookup the jump tables to use once, then use for all code objects.
auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
Vector<byte> code_space;
NativeModule::JumpTablesRef jump_tables;
{
base::MutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, total_code_space);
// Lookup the jump tables to use once, then use for all code objects.
jump_tables =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
}
// If we happen to have a {total_code_space} which is bigger than
// {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
// region. If this ever happens, we need to handle this case (by splitting the
......@@ -2110,19 +2095,16 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
base::MutexGuard guard(&allocation_mutex_);
// Free the code space.
code_allocator_.FreeCode(codes);
DebugInfo* debug_info = nullptr;
{
base::MutexGuard guard(&allocation_mutex_);
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
debug_info = debug_info_.get();
// Free the {WasmCode} objects. This will also unregister trap handler data.
for (WasmCode* code : codes) {
DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
owned_code_.erase(code->instruction_start());
}
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
DebugInfo* debug_info = debug_info_.get();
// Free the {WasmCode} objects. This will also unregister trap handler data.
for (WasmCode* code : codes) {
DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
owned_code_.erase(code->instruction_start());
}
// Remove debug side tables for all removed code objects, after releasing our
// lock. This is to avoid lock order inversion.
......@@ -2130,6 +2112,7 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
}
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
base::MutexGuard guard{&allocation_mutex_};
return code_allocator_.GetNumCodeSpaces();
}
......
......@@ -398,31 +398,11 @@ class WasmCodeAllocator {
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif
// {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
// indicate that the lock on the {WasmCodeAllocator} is already taken. It's
// optional to allow to also call methods without holding the lock.
class OptionalLock {
public:
// External users can only instantiate a non-locked {OptionalLock}.
OptionalLock() = default;
~OptionalLock();
bool is_locked() const { return allocator_ != nullptr; }
private:
friend class WasmCodeAllocator;
// {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
// is passed.
void Lock(WasmCodeAllocator*);
WasmCodeAllocator* allocator_ = nullptr;
};
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
std::shared_ptr<Counters> async_counters);
WasmCodeAllocator(WasmCodeManager*, std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
// Call before use, after the {NativeModule} is set up completely.
void Init(NativeModule*);
void Init(VirtualMemory code_space);
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
......@@ -435,22 +415,26 @@ class WasmCodeAllocator {
}
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
base::AddressRegion);
// Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
void FreeCode(Vector<WasmCode* const>);
// Retrieve the number of separately reserved code spaces.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
size_t GetNumCodeSpaces() const;
private:
......@@ -462,10 +446,8 @@ class WasmCodeAllocator {
// The engine-wide wasm code manager.
WasmCodeManager* const code_manager_;
mutable base::Mutex mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
// These fields are protected by the mutex in {NativeModule}.
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
......@@ -524,7 +506,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
// table and jump table via another {PublishCode}.
void ReinstallDebugCode(WasmCode*);
Vector<uint8_t> AllocateForDeserializedCode(size_t total_code_size);
struct JumpTablesRef {
Address jump_table_start = kNullAddress;
Address far_jump_table_start = kNullAddress;
bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
std::pair<Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
size_t total_code_size);
std::unique_ptr<WasmCode> AddDeserializedCode(
int index, Vector<byte> instructions, int stack_slots,
......@@ -565,26 +555,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
// the first jump table).
Address GetCallTargetForFunction(uint32_t func_index) const;
struct JumpTablesRef {
Address jump_table_start = kNullAddress;
Address far_jump_table_start = kNullAddress;
bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
// Finds the jump tables that should be used for given code region. This
// information is then passed to {GetNearCallTargetForFunction} and
// {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
// up there. Return an empty struct if no suitable jump tables exist.
JumpTablesRef FindJumpTablesForRegion(base::AddressRegion) const;
JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
// Similarly to {GetCallTargetForFunction}, but uses the jump table previously
// looked up via {FindJumpTablesForRegion}.
// looked up via {FindJumpTablesForRegionLocked}.
Address GetNearCallTargetForFunction(uint32_t func_index,
const JumpTablesRef&) const;
// Get a runtime stub entry (which is a far jump table slot) in the jump table
// previously looked up via {FindJumpTablesForRegion}.
// previously looked up via {FindJumpTablesForRegionLocked}.
Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
const JumpTablesRef&) const;
......@@ -593,6 +576,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
bool SetExecutable(bool executable) {
base::MutexGuard guard{&allocation_mutex_};
return code_allocator_.SetExecutable(executable);
}
......@@ -727,9 +711,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
ExecutionTier tier, ForDebugging for_debugging,
Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableInRegion(
int jump_table_size, base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
base::AddressRegion);
void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
......@@ -741,8 +724,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
void AddCodeSpace(base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
void AddCodeSpaceLocked(base::AddressRegion);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
......
......@@ -712,11 +712,9 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
constexpr size_t kMaxReservation =
RoundUp<kCodeAlignment>(WasmCodeAllocator::kMaxCodeSpaceSize * 9 / 10);
size_t code_space_size = std::min(kMaxReservation, remaining_code_size_);
current_code_space_ =
std::tie(current_code_space_, current_jump_tables_) =
native_module_->AllocateForDeserializedCode(code_space_size);
DCHECK_EQ(current_code_space_.size(), code_space_size);
current_jump_tables_ = native_module_->FindJumpTablesForRegion(
base::AddressRegionOf(current_code_space_));
DCHECK(current_jump_tables_.is_valid());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment