Commit de6a07dc authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Fix data race on code table

The {code_table_} in {NativeModule} is protected by the
{allocation_mutex_}. The {code} and {code_table} accessors did not
acquire this lock though.
This CL removes the unsafe {code_table} accessor, renames {code} to
{GetCode} and protects it by a lock.

R=mstarzinger@chromium.org

Bug: v8:9112
Change-Id: Id2df68460b4c10291a49b4016b9574e02744e8b9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1561315Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60779}
parent e741a164
...@@ -1226,9 +1226,8 @@ RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) { ...@@ -1226,9 +1226,8 @@ RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
wasm::NativeModule* native_module = wasm::NativeModule* native_module =
exp_fun->instance()->module_object()->native_module(); exp_fun->instance()->module_object()->native_module();
uint32_t func_index = exp_fun->function_index(); uint32_t func_index = exp_fun->function_index();
return isolate->heap()->ToBoolean( wasm::WasmCode* code = native_module->GetCode(func_index);
native_module->has_code(func_index) && return isolate->heap()->ToBoolean(code && code->is_liftoff());
native_module->code(func_index)->is_liftoff());
} }
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) { RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
......
...@@ -474,8 +474,10 @@ void NativeModule::LogWasmCodes(Isolate* isolate) { ...@@ -474,8 +474,10 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
// TODO(titzer): we skip the logging of the import wrappers // TODO(titzer): we skip the logging of the import wrappers
// here, but they should be included somehow. // here, but they should be included somehow.
for (WasmCode* code : code_table()) { int start = module()->num_imported_functions;
if (code != nullptr) code->LogCode(isolate); int end = start + module()->num_declared_functions;
for (int func_index = start; func_index < end; ++func_index) {
if (WasmCode* code = GetCode(func_index)) code->LogCode(isolate);
} }
} }
...@@ -828,10 +830,9 @@ WasmCode* NativeModule::AddDeserializedCode( ...@@ -828,10 +830,9 @@ WasmCode* NativeModule::AddDeserializedCode(
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
std::vector<WasmCode*> result; WasmCode** start = code_table_.get();
result.reserve(code_table().size()); WasmCode** end = start + module_->num_declared_functions;
for (WasmCode* code : code_table()) result.push_back(code); return std::vector<WasmCode*>{start, end};
return result;
} }
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) { WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
......
...@@ -319,13 +319,14 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -319,13 +319,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to get a consistent view of the table (e.g. used by the serializer). // to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const; std::vector<WasmCode*> SnapshotCodeTable() const;
WasmCode* code(uint32_t index) const { WasmCode* GetCode(uint32_t index) const {
base::MutexGuard guard(&allocation_mutex_);
DCHECK_LT(index, num_functions()); DCHECK_LT(index, num_functions());
DCHECK_LE(module_->num_imported_functions, index); DCHECK_LE(module_->num_imported_functions, index);
return code_table_[index - module_->num_imported_functions]; return code_table_[index - module_->num_imported_functions];
} }
bool has_code(uint32_t index) const { return code(index) != nullptr; } bool has_code(uint32_t index) const { return GetCode(index) != nullptr; }
Address runtime_stub_entry(WasmCode::RuntimeStubId index) const { Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
DCHECK_LT(index, WasmCode::kRuntimeStubCount); DCHECK_LT(index, WasmCode::kRuntimeStubCount);
...@@ -449,10 +450,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -449,10 +450,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size); WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
Vector<WasmCode*> code_table() const {
return {code_table_.get(), module_->num_declared_functions};
}
// Hold the {mutex_} when calling this method. // Hold the {mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) { bool has_interpreter_redirection(uint32_t func_index) {
DCHECK_LT(func_index, num_functions()); DCHECK_LT(func_index, num_functions());
......
...@@ -3614,7 +3614,7 @@ class ThreadImpl { ...@@ -3614,7 +3614,7 @@ class ThreadImpl {
if (native_module->is_jump_table_slot(target)) { if (native_module->is_jump_table_slot(target)) {
uint32_t func_index = uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target); native_module->GetFunctionIndexFromJumpTableSlot(target);
return native_module->code(func_index); return native_module->GetCode(func_index);
} }
WasmCode* code = native_module->Lookup(target); WasmCode* code = native_module->Lookup(target);
DCHECK_EQ(code->instruction_start(), target); DCHECK_EQ(code->instruction_start(), target);
......
...@@ -3707,7 +3707,7 @@ TEST(Liftoff_tier_up) { ...@@ -3707,7 +3707,7 @@ TEST(Liftoff_tier_up) {
r.builder().instance_object()->module_object()->native_module(); r.builder().instance_object()->module_object()->native_module();
// This test only works if we managed to compile with Liftoff. // This test only works if we managed to compile with Liftoff.
if (native_module->code(add.function_index())->is_liftoff()) { if (native_module->GetCode(add.function_index())->is_liftoff()) {
// First run should execute {add}. // First run should execute {add}.
CHECK_EQ(18, r.Call(11, 7)); CHECK_EQ(18, r.Call(11, 7));
...@@ -3715,7 +3715,7 @@ TEST(Liftoff_tier_up) { ...@@ -3715,7 +3715,7 @@ TEST(Liftoff_tier_up) {
// the index of {add}. // the index of {add}.
CodeDesc desc; CodeDesc desc;
memset(&desc, 0, sizeof(CodeDesc)); memset(&desc, 0, sizeof(CodeDesc));
WasmCode* sub_code = native_module->code(sub.function_index()); WasmCode* sub_code = native_module->GetCode(sub.function_index());
size_t sub_size = sub_code->instructions().size(); size_t sub_size = sub_code->instructions().size();
std::unique_ptr<byte[]> buffer(new byte[sub_code->instructions().size()]); std::unique_ptr<byte[]> buffer(new byte[sub_code->instructions().size()]);
memcpy(buffer.get(), sub_code->instructions().start(), sub_size); memcpy(buffer.get(), sub_code->instructions().start(), sub_size);
......
...@@ -205,7 +205,7 @@ class TestingModuleBuilder { ...@@ -205,7 +205,7 @@ class TestingModuleBuilder {
return instance_object_; return instance_object_;
} }
WasmCode* GetFunctionCode(uint32_t index) const { WasmCode* GetFunctionCode(uint32_t index) const {
return native_module_->code(index); return native_module_->GetCode(index);
} }
Address globals_start() const { Address globals_start() const {
return reinterpret_cast<Address>(globals_data_); return reinterpret_cast<Address>(globals_data_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment