Commit 70cc29ca authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Add generated code from the background thread

This avoids the need for the finisher task (running on the foreground
thread) for Liftoff code.
This CL just makes the simple change to call {AddCode} from the
background thread. More cleanup will follow in separate CLs.

R=mstarzinger@chromium.org

Bug: v8:6600, v8:7921
Change-Id: I99ef29377efee5be36ba203aa7ed71e2471d86f3
Reviewed-on: https://chromium-review.googlesource.com/1126930
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54266}
parent f248584b
......@@ -572,12 +572,15 @@ class LiftoffAssembler : public TurboAssembler {
}
CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; }
bool did_bailout() { return bailout_reason_ != nullptr; }
const char* bailout_reason() const { return bailout_reason_; }
void bailout(const char* reason) {
if (bailout_reason_ == nullptr) bailout_reason_ = reason;
if (bailout_reason_ != nullptr) return;
AbortCompilation();
bailout_reason_ = reason;
}
private:
......
......@@ -29,7 +29,7 @@ constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
#define __ asm_->
#define __ asm_.
#define TRACE(...) \
do { \
......@@ -131,11 +131,9 @@ class LiftoffCompiler {
}
};
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
compiler::CallDescriptor* call_descriptor, ModuleEnv* env,
LiftoffCompiler(compiler::CallDescriptor* call_descriptor, ModuleEnv* env,
Zone* compilation_zone)
: asm_(liftoff_asm),
descriptor_(
: descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
......@@ -150,6 +148,8 @@ class LiftoffCompiler {
bool ok() const { return ok_; }
void GetCode(CodeDesc* desc) { asm_.GetCode(nullptr, desc); }
OwnedVector<uint8_t> GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
......@@ -410,7 +410,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool.position), false);
__ CallRuntimeStub(ool.stub);
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
......@@ -429,7 +429,7 @@ class LiftoffCompiler {
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
__ GetTotalFrameSlotCount());
__ FinishCode();
safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
// The previous calls may have also generated a bailout.
DidAssemblerBailout(decoder);
}
......@@ -437,6 +437,7 @@ class LiftoffCompiler {
void OnFirstError(Decoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
asm_.AbortCompilation();
}
void NextInstruction(Decoder* decoder, WasmOpcode opcode) {
......@@ -553,7 +554,7 @@ class LiftoffCompiler {
Register),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
if (emit_fn && (asm_.*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn();
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
FunctionSig sig_i_i(1, 1, sig_i_i_reps);
......@@ -567,7 +568,7 @@ class LiftoffCompiler {
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_->*emit_fn)(dst.fp(), src.fp())) return;
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
ValueType sig_reps[] = {type};
FunctionSig sig(0, 1, sig_reps);
......@@ -1421,7 +1422,7 @@ class LiftoffCompiler {
LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
LiftoffStackSlots stack_slots(asm_);
LiftoffStackSlots stack_slots(&asm_);
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
LiftoffRegister(args[0])));
stack_slots.Construct();
......@@ -1432,7 +1433,7 @@ class LiftoffCompiler {
LiftoffRegister centry(kJavaScriptCallCodeStartRegister);
LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerLoadType);
__ CallRuntimeWithCEntry(runtime_function, centry.gp());
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
}
......@@ -1556,7 +1557,7 @@ class LiftoffCompiler {
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
__ CallRuntimeStub(wasm::WasmCode::kWasmGrowMemory);
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (kReturnRegister0 != result.gp()) {
......@@ -1608,7 +1609,7 @@ class LiftoffCompiler {
__ CallIndirect(imm.sig, call_descriptor, target_reg);
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
......@@ -1623,7 +1624,7 @@ class LiftoffCompiler {
Address addr = static_cast<Address>(imm.index);
__ CallNativeWasmCode(addr);
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
......@@ -1748,7 +1749,7 @@ class LiftoffCompiler {
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(imm.sig, call_descriptor, target);
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
......@@ -1789,7 +1790,7 @@ class LiftoffCompiler {
}
private:
LiftoffAssembler* const asm_;
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
ModuleEnv* const env_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
......@@ -1842,22 +1843,20 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, wasm_unit_->counters_->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
decoder(&zone, module, wasm_unit_->func_body_, &asm_, call_descriptor,
decoder(&zone, module, wasm_unit_->func_body_, call_descriptor,
wasm_unit_->env_, &zone);
decoder.Decode();
liftoff_compile_time_scope.reset();
wasm::LiftoffCompiler* compiler = &decoder.interface();
bool liftoff_bailout = !compiler->ok();
bool validation_error = decoder.failed();
if (liftoff_bailout || validation_error) {
if (decoder.failed()) return false; // validation error
if (!compiler->ok()) {
// Liftoff compilation failed.
if (liftoff_bailout) {
wasm_unit_->counters_->liftoff_unsupported_functions()->Increment();
}
asm_.AbortCompilation();
wasm_unit_->counters_->liftoff_unsupported_functions()->Increment();
return false;
}
wasm_unit_->counters_->liftoff_compiled_functions()->Increment();
if (FLAG_trace_wasm_decode_time) {
double compile_ms = compile_timer.Elapsed().InMillisecondsF();
PrintF(
......@@ -1868,29 +1867,28 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
compile_ms);
}
wasm_unit_->counters_->liftoff_compiled_functions()->Increment();
CodeDesc desc;
compiler->GetCode(&desc);
OwnedVector<byte> source_positions = compiler->GetSourcePositionTable();
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions =
compiler->GetProtectedInstructions();
uint32_t frame_slot_count = compiler->GetTotalFrameSlotCount();
int safepoint_table_offset = compiler->GetSafepointTableOffset();
asm_.GetCode(nullptr, &desc_);
source_positions_ = compiler->GetSourcePositionTable();
protected_instructions_ = compiler->GetProtectedInstructions();
frame_slot_count_ = compiler->GetTotalFrameSlotCount();
safepoint_table_offset_ = compiler->GetSafepointTableOffset();
code_ = wasm_unit_->native_module_->AddCode(
wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
0, std::move(protected_instructions), std::move(source_positions),
wasm::WasmCode::kLiftoff);
// Record the memory cost this unit places on the system until
// it is finalized.
wasm_unit_->memory_cost_ =
desc_.buffer_size + source_positions_.size() +
protected_instructions_.size() * sizeof(*protected_instructions_.start());
wasm_unit_->memory_cost_ = sizeof(*this);
return true;
}
wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(wasm::ErrorThrower*) {
// TODO(clemensh): Also move this to {ExecuteCompilation}.
return wasm_unit_->native_module_->AddCode(
wasm_unit_->func_index_, desc_, frame_slot_count_,
safepoint_table_offset_, 0, std::move(protected_instructions_),
std::move(source_positions_), wasm::WasmCode::kLiftoff);
return code_;
}
#undef __
......
......@@ -5,16 +5,16 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
#define V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
#include "src/source-position-table.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/base/macros.h"
namespace v8 {
namespace internal {
namespace wasm {
class ErrorThrower;
class WasmCode;
class WasmCompilationUnit;
class LiftoffCompilationUnit final {
public:
explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
......@@ -25,16 +25,9 @@ class LiftoffCompilationUnit final {
private:
WasmCompilationUnit* const wasm_unit_;
// Must stay alive until the code is added to the {NativeModule}, because it
// contains the instruction buffer.
LiftoffAssembler asm_;
// Result of compilation:
CodeDesc desc_;
OwnedVector<byte> source_positions_;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
uint32_t frame_slot_count_;
int safepoint_table_offset_;
WasmCode* code_;
DISALLOW_COPY_AND_ASSIGN(LiftoffCompilationUnit);
};
......
......@@ -8,6 +8,7 @@
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
......
......@@ -168,6 +168,9 @@ void WasmCode::LogCode(Isolate* isolate) const {
void WasmCode::Validate() const {
#ifdef DEBUG
// We run NativeModule::Lookup, which accesses owned_code_, thuse we need to
// hold the mutex to avoid race conditions.
base::LockGuard<base::Mutex> lock(&native_module_->allocation_mutex_);
// We expect certain relocation info modes to never appear in {WasmCode}
// objects or to be restricted to a small set of valid values. Hence the
// iteration below does not use a mask, but visits all relocation data.
......@@ -530,11 +533,16 @@ WasmCode* NativeModule::AddCode(
}
}
if (!ret->protected_instructions_.is_empty()) {
ret->RegisterTrapHandlerData();
{
// TODO(clemensh): Remove the need for locking here. Probably requires
// word-aligning the jump table slots.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
if (!ret->protected_instructions_.is_empty()) {
ret->RegisterTrapHandlerData();
}
set_code(index, ret);
PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
}
set_code(index, ret);
PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
// Flush the i-cache here instead of in AddOwnedCode, to include the changes
// made while iterating over the RelocInfo above.
......
......@@ -225,6 +225,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
static constexpr bool kCanAllocateMoreMemory = true;
#endif
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or {AddCodeCopy},
// i.e. it can be called concurrently from background threads.
WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
OwnedVector<trap_handler::ProtectedInstructionData>
......@@ -388,7 +390,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::list<VirtualMemory> owned_code_space_;
WasmCodeManager* wasm_code_manager_;
base::Mutex allocation_mutex_;
// This mutex protects concurrent calls to {AddCode} and {AddCodeCopy}.
mutable base::Mutex allocation_mutex_;
size_t committed_code_space_ = 0;
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment