Commit 8a3c4881 authored by Daniel Lehmann's avatar Daniel Lehmann Committed by V8 LUCI CQ

[wasm] Merge code space write scope implementations

Merges `NativeModuleModificationScope` (with an implementation using
Intel PKU, if available, and mprotect otherwise) and
`CodeSpaceWriteScope` (for Apple Silicon, where switching to RWX with
mprotect is disallowed anyway, so MAP_JIT and thread-local switching
must be used).

Because `CodeSpaceWriteScope` sounded better (and is shorter), we kept
its name (which unfortunately makes the diff a bit harder to read).

R=clemensb@chromium.org
CC=jkummerow@chromium.org

Bug: v8:11714

Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel_ng
Change-Id: Ib2a7d18e72797a725ed34b904c70769166d811dd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2972911Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Daniel Lehmann <dlehmann@google.com>
Cr-Commit-Position: refs/heads/master@{#75272}
parent 6d91aaa7
...@@ -10,9 +10,31 @@ namespace v8 { ...@@ -10,9 +10,31 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
#if !(defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)) #if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module) thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
// The {NativeModule} argument is unused; it is just here for a common API with
// the non-M1 implementation.
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToWritable();
}
code_space_write_nesting_level_++;
}
CodeSpaceWriteScope::~CodeSpaceWriteScope() {
code_space_write_nesting_level_--;
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToExecutable();
}
}
#else // Not on MacOS on ARM64 (M1 hardware): Use Intel PKU and/or mprotect.
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: native_module_(native_module) { : native_module_(native_module) {
DCHECK_NOT_NULL(native_module_); DCHECK_NOT_NULL(native_module_);
if (FLAG_wasm_memory_protection_keys) { if (FLAG_wasm_memory_protection_keys) {
...@@ -28,7 +50,7 @@ NativeModuleModificationScope::NativeModuleModificationScope( ...@@ -28,7 +50,7 @@ NativeModuleModificationScope::NativeModuleModificationScope(
} }
} }
NativeModuleModificationScope::~NativeModuleModificationScope() { CodeSpaceWriteScope::~CodeSpaceWriteScope() {
if (FLAG_wasm_memory_protection_keys) { if (FLAG_wasm_memory_protection_keys) {
bool success = native_module_->SetThreadWritable(false); bool success = native_module_->SetThreadWritable(false);
if (!success && FLAG_wasm_write_protect_code_memory) { if (!success && FLAG_wasm_write_protect_code_memory) {
...@@ -41,7 +63,8 @@ NativeModuleModificationScope::~NativeModuleModificationScope() { ...@@ -41,7 +63,8 @@ NativeModuleModificationScope::~NativeModuleModificationScope() {
CHECK(success); CHECK(success);
} }
} }
#endif // !(defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64))
#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
......
...@@ -20,44 +20,58 @@ namespace wasm { ...@@ -20,44 +20,58 @@ namespace wasm {
class NativeModule; class NativeModule;
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64) // Within the scope, the code space is writable (and for Apple M1 also not
// Arm64 on MacOS (M1 hardware) uses CodeSpaceWriteScope to switch permissions. // executable). After the last (nested) scope is destructed, the code space is
// TODO(wasm): Merge NativeModuleModificationScope and CodeSpaceWriteScope. // not writable.
class V8_NODISCARD NativeModuleModificationScope final { // This uses three different implementations, depending on the platform, flags,
public: // and runtime support:
explicit NativeModuleModificationScope(NativeModule*) {} // - On MacOS on ARM64 ("Apple M1"/Apple Silicon), it uses APRR/MAP_JIT to
}; // switch only the calling thread between writable and executable. This achieves
#else // "real" W^X and is thread-local and fast.
// Within the scope, the native_module is writable and not executable. // - When Intel PKU (aka. memory protection keys) are available, it switches
// At the scope's destruction, the native_module is executable and not writable. // the protection keys' permission between writable and not writable. The
// The states inside the scope and at the scope termination are irrespective of // executable permission cannot be retracted with PKU. That is, this "only"
// native_module's state when entering the scope. // achieves write-protection, but is similarly thread-local and fast.
// We currently mark the entire module's memory W^X: // - As a fallback, we switch with {mprotect()} between R-X and RWX (due to
// concurrent compilation and execution). This is slow and process-wide. With
// {mprotect()}, we currently switch permissions for the entire module's memory:
// - for AOT, that's as efficient as it can be. // - for AOT, that's as efficient as it can be.
// - for Lazy, we don't have a heuristic for functions that may need patching, // - for Lazy, we don't have a heuristic for functions that may need patching,
// and even if we did, the resulting set of pages may be fragmented. // and even if we did, the resulting set of pages may be fragmented.
// Currently, we try and keep the number of syscalls low. // Currently, we try and keep the number of syscalls low.
// - similar argument for debug time. // - similar argument for debug time.
class V8_NODISCARD NativeModuleModificationScope final { // MAP_JIT on Apple M1 cannot switch permissions for smaller ranges of memory,
// and for PKU we would need multiple keys, so both of them also switch
// permissions for all code pages.
class V8_NODISCARD CodeSpaceWriteScope final {
public: public:
explicit NativeModuleModificationScope(NativeModule* native_module); explicit CodeSpaceWriteScope(NativeModule* native_module);
~NativeModuleModificationScope(); ~CodeSpaceWriteScope();
// Disable copy constructor and copy-assignment operator, since this manages // Disable copy constructor and copy-assignment operator, since this manages
// a resource and implicit copying of the scope can yield surprising errors. // a resource and implicit copying of the scope can yield surprising errors.
NativeModuleModificationScope(const NativeModuleModificationScope&) = delete; CodeSpaceWriteScope(const CodeSpaceWriteScope&) = delete;
NativeModuleModificationScope& operator=( CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete;
const NativeModuleModificationScope&) = delete;
private: private:
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
static thread_local int code_space_write_nesting_level_;
#else // On non-M1 hardware:
// The M1 implementation knows implicitly from the {MAP_JIT} flag during
// allocation which region to switch permissions for. On non-M1 hardware,
// however, we either need the protection key or code space from the
// {native_module_}.
NativeModule* native_module_; NativeModule* native_module_;
};
#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64) #endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
};
} // namespace wasm } // namespace wasm
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64) #if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Low-level API for switching MAP_JIT pages between writable and executable.
// TODO(wasm): Access to these functions is only needed in tests. Remove?
// Ignoring this warning is considered better than relying on // Ignoring this warning is considered better than relying on
// __builtin_available. // __builtin_available.
#pragma clang diagnostic push #pragma clang diagnostic push
...@@ -70,42 +84,13 @@ inline void SwitchMemoryPermissionsToExecutable() { ...@@ -70,42 +84,13 @@ inline void SwitchMemoryPermissionsToExecutable() {
} }
#pragma clang diagnostic pop #pragma clang diagnostic pop
namespace wasm {
class V8_NODISCARD CodeSpaceWriteScope {
public:
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
CodeSpaceWriteScope() {
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToWritable();
}
code_space_write_nesting_level_++;
}
~CodeSpaceWriteScope() {
code_space_write_nesting_level_--;
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToExecutable();
}
}
private:
static thread_local int code_space_write_nesting_level_;
};
#define CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope _write_access_;
} // namespace wasm
#else // Not Mac-on-arm64. #else // Not Mac-on-arm64.
// Nothing to do, we map code memory with rwx permissions. // Nothing to do, we map code memory with rwx permissions.
inline void SwitchMemoryPermissionsToWritable() {} inline void SwitchMemoryPermissionsToWritable() {}
inline void SwitchMemoryPermissionsToExecutable() {} inline void SwitchMemoryPermissionsToExecutable() {}
#define CODE_SPACE_WRITE_SCOPE #endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
#endif // V8_OS_MACOSX && V8_HOST_ARCH_ARM64
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -1110,7 +1110,7 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object, ...@@ -1110,7 +1110,7 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
Counters* counters = isolate->counters(); Counters* counters = isolate->counters();
DCHECK(!native_module->lazy_compile_frozen()); DCHECK(!native_module->lazy_compile_frozen());
NativeModuleModificationScope native_module_modification_scope(native_module); CodeSpaceWriteScope code_space_write_scope(native_module);
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index); TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
...@@ -1422,8 +1422,7 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) { ...@@ -1422,8 +1422,7 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
uint32_t start = module->num_imported_functions; uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions; uint32_t end = start + module->num_declared_functions;
base::Optional<NativeModuleModificationScope> base::Optional<CodeSpaceWriteScope> lazy_code_space_write_scope;
lazy_native_module_modification_scope;
for (uint32_t func_index = start; func_index < end; func_index++) { for (uint32_t func_index = start; func_index < end; func_index++) {
if (prefer_liftoff) { if (prefer_liftoff) {
builder.AddRecompilationUnit(func_index, ExecutionTier::kLiftoff); builder.AddRecompilationUnit(func_index, ExecutionTier::kLiftoff);
...@@ -1436,8 +1435,8 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) { ...@@ -1436,8 +1435,8 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
} else { } else {
// Open a single scope for all following calls to {UseLazyStub()}, instead // Open a single scope for all following calls to {UseLazyStub()}, instead
// of flipping page permissions for each {func_index} individually. // of flipping page permissions for each {func_index} individually.
if (!lazy_native_module_modification_scope.has_value()) { if (!lazy_code_space_write_scope.has_value()) {
lazy_native_module_modification_scope.emplace(native_module); lazy_code_space_write_scope.emplace(native_module);
} }
if (strategy == CompileStrategy::kLazy) { if (strategy == CompileStrategy::kLazy) {
native_module->UseLazyStub(func_index); native_module->UseLazyStub(func_index);
......
...@@ -630,7 +630,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { ...@@ -630,7 +630,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables); instance->set_indirect_function_tables(*tables);
} }
NativeModuleModificationScope native_modification_scope(native_module); CodeSpaceWriteScope native_modification_scope(native_module);
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// Process the imports for the module. // Process the imports for the module.
......
...@@ -52,10 +52,6 @@ namespace wasm { ...@@ -52,10 +52,6 @@ namespace wasm {
using trap_handler::ProtectedInstructionData; using trap_handler::ProtectedInstructionData;
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
#endif
base::AddressRegion DisjointAllocationPool::Merge( base::AddressRegion DisjointAllocationPool::Merge(
base::AddressRegion new_region) { base::AddressRegion new_region) {
// Find the possible insertion position by identifying the first region whose // Find the possible insertion position by identifying the first region whose
...@@ -702,12 +698,11 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion( ...@@ -702,12 +698,11 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
} }
// TODO(dlehmann): Do not return the success as a bool, but instead fail hard. // TODO(dlehmann): Do not return the success as a bool, but instead fail hard.
// That is, pull the CHECK from {NativeModuleModificationScope} in here and // That is, pull the CHECK from {CodeSpaceWriteScope} in here and return void.
// return void.
// TODO(dlehmann): Ensure {SetWritable(true)} is always paired up with a // TODO(dlehmann): Ensure {SetWritable(true)} is always paired up with a
// {SetWritable(false)}, such that eventually the code space is write protected. // {SetWritable(false)}, such that eventually the code space is write protected.
// One solution is to make the API foolproof by hiding {SetWritable()} and // One solution is to make the API foolproof by hiding {SetWritable()} and
// allowing change of permissions only through {NativeModuleModificationScope}. // allowing change of permissions only through {CodeSpaceWriteScope}.
// TODO(dlehmann): Add tests that ensure the code space is eventually write- // TODO(dlehmann): Add tests that ensure the code space is eventually write-
// protected. // protected.
bool WasmCodeAllocator::SetWritable(bool writable) { bool WasmCodeAllocator::SetWritable(bool writable) {
...@@ -933,8 +928,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const { ...@@ -933,8 +928,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
} }
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) { WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
const size_t relocation_size = code->relocation_size(); const size_t relocation_size = code->relocation_size();
base::OwnedVector<byte> reloc_info; base::OwnedVector<byte> reloc_info;
if (relocation_size > 0) { if (relocation_size > 0) {
...@@ -1031,8 +1025,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) { ...@@ -1031,8 +1025,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
module_->num_imported_functions + module_->num_declared_functions); module_->num_imported_functions + module_->num_declared_functions);
base::RecursiveMutexGuard guard(&allocation_mutex_); base::RecursiveMutexGuard guard(&allocation_mutex_);
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
if (!lazy_compile_table_) { if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions; uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
...@@ -1073,8 +1066,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCode( ...@@ -1073,8 +1066,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
jump_table_ref = jump_table_ref =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space)); FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
} }
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots, return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data, protected_instructions_data,
source_position_table, kind, tier, for_debugging, source_position_table, kind, tier, for_debugging,
...@@ -1154,8 +1146,7 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) { ...@@ -1154,8 +1146,7 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode"); "wasm.PublishCode");
base::RecursiveMutexGuard lock(&allocation_mutex_); base::RecursiveMutexGuard lock(&allocation_mutex_);
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
return PublishCodeLocked(std::move(code)); return PublishCodeLocked(std::move(code));
} }
...@@ -1166,10 +1157,9 @@ std::vector<WasmCode*> NativeModule::PublishCode( ...@@ -1166,10 +1157,9 @@ std::vector<WasmCode*> NativeModule::PublishCode(
std::vector<WasmCode*> published_code; std::vector<WasmCode*> published_code;
published_code.reserve(codes.size()); published_code.reserve(codes.size());
base::RecursiveMutexGuard lock(&allocation_mutex_); base::RecursiveMutexGuard lock(&allocation_mutex_);
CODE_SPACE_WRITE_SCOPE
// Get writable permission already here (and not inside the loop in // Get writable permission already here (and not inside the loop in
// {PatchJumpTablesLocked}), to avoid switching for each {code} individually. // {PatchJumpTablesLocked}), to avoid switching for each {code} individually.
NativeModuleModificationScope native_module_modification_scope(this); CodeSpaceWriteScope code_space_write_scope(this);
// The published code is put into the top-most surrounding {WasmCodeRefScope}. // The published code is put into the top-most surrounding {WasmCodeRefScope}.
for (auto& code : codes) { for (auto& code : codes) {
published_code.push_back(PublishCodeLocked(std::move(code))); published_code.push_back(PublishCodeLocked(std::move(code)));
...@@ -1278,8 +1268,7 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) { ...@@ -1278,8 +1268,7 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
code_table_[slot_idx] = code; code_table_[slot_idx] = code;
code->IncRef(); code->IncRef();
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
PatchJumpTablesLocked(slot_idx, code->instruction_start()); PatchJumpTablesLocked(slot_idx, code->instruction_start());
} }
...@@ -1357,8 +1346,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked( ...@@ -1357,8 +1346,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region); code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty()); DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging); UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size()); ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{ std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module new WasmCode{this, // native_module
...@@ -1452,8 +1440,7 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) { ...@@ -1452,8 +1440,7 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
#endif // V8_OS_WIN64 #endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(this);
NativeModuleModificationScope native_module_modification_scope(this);
WasmCode* jump_table = nullptr; WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr; WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions; const uint32_t num_wasm_functions = module_->num_declared_functions;
...@@ -1772,13 +1759,12 @@ void WasmCodeManager::Commit(base::AddressRegion region) { ...@@ -1772,13 +1759,12 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
// TODO(dlehmann): This allocates initially as writable and executable, and // TODO(dlehmann): This allocates initially as writable and executable, and
// as such is not safe-by-default. In particular, if // as such is not safe-by-default. In particular, if
// {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g., // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
// because no {NativeModuleModificationScope} is created), the writable // because no {CodeSpaceWriteScope} is created), the writable permission is
// permission is never withdrawn. // never withdrawn.
// One potential fix is to allocate initially with kReadExecute only, which // One potential fix is to allocate initially with kReadExecute only, which
// forces all compilation threads to add the missing // forces all compilation threads to add the missing {CodeSpaceWriteScope}s
// {NativeModuleModificationScope}s before modification; and/or adding // before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is
// DCHECKs that {NativeModuleModificationScope} is open when calling this // open when calling this method.
// method.
PageAllocator::Permission permission = PageAllocator::kReadWriteExecute; PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
bool success; bool success;
...@@ -2108,11 +2094,10 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode( ...@@ -2108,11 +2094,10 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
generated_code.reserve(results.size()); generated_code.reserve(results.size());
// Now copy the generated code into the code space and relocate it. // Now copy the generated code into the code space and relocate it.
CODE_SPACE_WRITE_SCOPE
// Get writable permission already here (and not inside the loop in // Get writable permission already here (and not inside the loop in
// {AddCodeWithCodeSpace}), to avoid lock contention on the // {AddCodeWithCodeSpace}), to avoid lock contention on the
// {allocator_mutex_} if we try to switch for each code individually. // {allocator_mutex_} if we try to switch for each code individually.
NativeModuleModificationScope native_module_modification_scope(this); CodeSpaceWriteScope code_space_write_scope(this);
for (auto& result : results) { for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get()); DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size); size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
...@@ -2176,10 +2161,9 @@ std::vector<int> NativeModule::FindFunctionsToRecompile( ...@@ -2176,10 +2161,9 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) { TieringState new_tiering_state) {
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
base::RecursiveMutexGuard guard(&allocation_mutex_); base::RecursiveMutexGuard guard(&allocation_mutex_);
CODE_SPACE_WRITE_SCOPE
// Get writable permission already here (and not inside the loop in // Get writable permission already here (and not inside the loop in
// {PatchJumpTablesLocked}), to avoid switching for each slot individually. // {PatchJumpTablesLocked}), to avoid switching for each slot individually.
NativeModuleModificationScope native_module_modification_scope(this); CodeSpaceWriteScope code_space_write_scope(this);
std::vector<int> function_indexes; std::vector<int> function_indexes;
int imported = module()->num_imported_functions; int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions; int declared = module()->num_declared_functions;
...@@ -2216,11 +2200,10 @@ std::vector<int> NativeModule::FindFunctionsToRecompile( ...@@ -2216,11 +2200,10 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) { void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
base::RecursiveMutexGuard guard(&allocation_mutex_); base::RecursiveMutexGuard guard(&allocation_mutex_);
CODE_SPACE_WRITE_SCOPE
// Get writable permission already here (and not inside the loop in // Get writable permission already here (and not inside the loop in
// {WasmCodeAllocator::FreeCode}), to avoid switching for each {code} // {WasmCodeAllocator::FreeCode}), to avoid switching for each {code}
// individually. // individually.
NativeModuleModificationScope native_module_modification_scope(this); CodeSpaceWriteScope code_space_write_scope(this);
// Free the code space. // Free the code space.
code_allocator_.FreeCode(codes); code_allocator_.FreeCode(codes);
......
...@@ -778,7 +778,7 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -778,7 +778,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class WasmCode; friend class WasmCode;
friend class WasmCodeAllocator; friend class WasmCodeAllocator;
friend class WasmCodeManager; friend class WasmCodeManager;
friend class NativeModuleModificationScope; friend class CodeSpaceWriteScope;
struct CodeSpaceData { struct CodeSpaceData {
base::AddressRegion region; base::AddressRegion region;
...@@ -880,10 +880,10 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -880,10 +880,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// This mutex protects concurrent calls to {AddCode} and friends. // This mutex protects concurrent calls to {AddCode} and friends.
// TODO(dlehmann): Revert this to a regular {Mutex} again. // TODO(dlehmann): Revert this to a regular {Mutex} again.
// This needs to be a {RecursiveMutex} only because of // This needs to be a {RecursiveMutex} only because of {CodeSpaceWriteScope}
// {NativeModuleModificationScope} usages, which are (1) either at places // usages, which are (1) either at places that already hold the
// that already hold the {allocation_mutex_} or (2) because of multiple open // {allocation_mutex_} or (2) because of multiple open {CodeSpaceWriteScope}s
// {NativeModuleModificationScope}s in the call hierarchy. Both are fixable. // in the call hierarchy. Both are fixable.
mutable base::RecursiveMutex allocation_mutex_; mutable base::RecursiveMutex allocation_mutex_;
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
......
...@@ -568,9 +568,7 @@ class CopyAndRelocTask : public JobTask { ...@@ -568,9 +568,7 @@ class CopyAndRelocTask : public JobTask {
publish_handle_(std::move(publish_handle)) {} publish_handle_(std::move(publish_handle)) {}
void Run(JobDelegate* delegate) override { void Run(JobDelegate* delegate) override {
CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope code_space_write_scope(deserializer_->native_module_);
NativeModuleModificationScope native_module_modification_scope(
deserializer_->native_module_);
do { do {
auto batch = from_queue_->Pop(); auto batch = from_queue_->Pop();
if (batch.empty()) break; if (batch.empty()) break;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment