Commit 8844cfd9 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

Revert "[baseline] Concurrent Sparkplug n-thread with synchronised queue"

This reverts commit 0c459ff5.

Reason for revert: breaks build on M1 (where W^X flag is RO) https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Mac%20-%20arm64%20-%20release%20builder/6999/overview

Original change's description:
> [baseline] Concurrent Sparkplug n-thread with synchronised queue
>
> Installation in the main thread.
> Design doc: https://docs.google.com/document/d/1GmEiEt2VDmhY_Ag0PiIcGWKtvQupKgNcMZUvgpfQksk/edit?resourcekey=0-seYa-QJsx1ZbjelluPG1iQ
>
> Change-Id: Ifc6eccd44efdf377320c64cf9957c6060334e543
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3186831
> Commit-Queue: Victor Gomes <victorgomes@chromium.org>
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#77431}

Change-Id: I45a952aacf0ad29ebb703a742fdc6da7b0b7c826
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3229378
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Owners-Override: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77433}
parent e8c97924
......@@ -15,219 +15,19 @@
#include "src/handles/global-handles-inl.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/parked-scope.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/utils/locked-queue-inl.h"
namespace v8 {
namespace internal {
namespace baseline {
class BaselineCompilerTask {
public:
BaselineCompilerTask(Isolate* isolate, PersistentHandles* handles,
SharedFunctionInfo sfi)
: shared_function_info_(handles->NewHandle(sfi)),
bytecode_(handles->NewHandle(sfi.GetBytecodeArray(isolate))) {
DCHECK(sfi.is_compiled());
}
BaselineCompilerTask(const BaselineCompilerTask&) V8_NOEXCEPT = delete;
BaselineCompilerTask(BaselineCompilerTask&&) V8_NOEXCEPT = default;
// Executed in the background thread.
void Compile(LocalIsolate* local_isolate) {
BaselineCompiler compiler(local_isolate, shared_function_info_, bytecode_);
compiler.GenerateCode();
maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle(
compiler.Build(local_isolate));
}
// Executed in the main thread.
void Install(Isolate* isolate) {
Handle<Code> code;
if (!maybe_code_.ToHandle(&code)) return;
if (FLAG_print_code) {
code->Print();
}
isolate->heap()->RegisterCodeObject(code);
shared_function_info_->set_baseline_code(*code, kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
shared_function_info_->GetBytecodeArray(isolate)
.set_osr_loop_nesting_level(AbstractCode::kMaxLoopNestingMarker);
}
if (FLAG_trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
std::stringstream ss;
ss << "[Concurrent Sparkplug Off Thread] Function ";
shared_function_info_->ShortPrint(ss);
ss << " installed\n";
OFStream os(scope.file());
os << ss.str();
}
}
private:
Handle<SharedFunctionInfo> shared_function_info_;
Handle<BytecodeArray> bytecode_;
MaybeHandle<Code> maybe_code_;
};
class BaselineBatchCompilerJob {
public:
BaselineBatchCompilerJob(Isolate* isolate, Handle<WeakFixedArray> task_queue,
int batch_size)
: isolate_for_local_isolate_(isolate) {
handles_ = isolate->NewPersistentHandles();
tasks_.reserve(batch_size);
for (int i = 0; i < batch_size; i++) {
MaybeObject maybe_sfi = task_queue->Get(i);
// TODO(victorgomes): Do I need to clear the value?
task_queue->Set(i, HeapObjectReference::ClearedValue(isolate));
HeapObject obj;
// Skip functions where weak reference is no longer valid.
if (!maybe_sfi.GetHeapObjectIfWeak(&obj)) continue;
// Skip functions where the bytecode has been flushed.
SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
if (ShouldSkipFunction(shared)) continue;
tasks_.emplace_back(isolate, handles_.get(), shared);
}
if (FLAG_trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[Concurrent Sparkplug] compiling %zu functions\n",
tasks_.size());
}
}
bool ShouldSkipFunction(SharedFunctionInfo shared) {
return !shared.is_compiled() || shared.HasBaselineCode() ||
!CanCompileWithBaseline(isolate_for_local_isolate_, shared);
}
// Executed in the background thread.
void Compile() {
#ifdef V8_RUNTIME_CALL_STATS
WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
isolate_for_local_isolate_->counters()
->worker_thread_runtime_call_stats());
LocalIsolate local_isolate(isolate_for_local_isolate_,
ThreadKind::kBackground,
runtime_call_stats_scope.Get());
#else
LocalIsolate local_isolate(isolate_for_local_isolate_,
ThreadKind::kBackground);
#endif
local_isolate.heap()->AttachPersistentHandles(std::move(handles_));
UnparkedScope unparked_scope(&local_isolate);
LocalHandleScope handle_scope(&local_isolate);
for (auto& task : tasks_) {
task.Compile(&local_isolate);
}
// Get the handle back since we'd need them to install the code later.
handles_ = local_isolate.heap()->DetachPersistentHandles();
}
// Executed in the main thread.
void Install(Isolate* isolate) {
for (auto& task : tasks_) {
task.Install(isolate);
}
}
private:
Isolate* isolate_for_local_isolate_;
std::vector<BaselineCompilerTask> tasks_;
std::unique_ptr<PersistentHandles> handles_;
};
class ConcurrentBaselineCompiler {
public:
class JobDispatcher : public v8::JobTask {
public:
JobDispatcher(
Isolate* isolate,
LockedQueue<std::unique_ptr<BaselineBatchCompilerJob>>* incoming_queue,
LockedQueue<std::unique_ptr<BaselineBatchCompilerJob>>* outcoming_queue)
: isolate_(isolate),
incoming_queue_(incoming_queue),
outgoing_queue_(outcoming_queue) {}
void Run(JobDelegate* delegate) override {
while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) {
std::unique_ptr<BaselineBatchCompilerJob> job;
incoming_queue_->Dequeue(&job);
job->Compile();
outgoing_queue_->Enqueue(std::move(job));
}
isolate_->stack_guard()->RequestInstallBaselineCode();
}
size_t GetMaxConcurrency(size_t worker_count) const override {
return incoming_queue_->size();
}
private:
Isolate* isolate_;
LockedQueue<std::unique_ptr<BaselineBatchCompilerJob>>* incoming_queue_;
LockedQueue<std::unique_ptr<BaselineBatchCompilerJob>>* outgoing_queue_;
};
explicit ConcurrentBaselineCompiler(Isolate* isolate) : isolate_(isolate) {
if (FLAG_concurrent_sparkplug) {
job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible,
std::make_unique<JobDispatcher>(isolate_, &incoming_queue_,
&outgoing_queue_));
}
}
~ConcurrentBaselineCompiler() {
if (FLAG_concurrent_sparkplug && job_handle_->IsValid()) {
// Wait for the job handle to complete, so that we know the queue
// pointers are safe.
job_handle_->Cancel();
}
}
void CompileBatch(Handle<WeakFixedArray> task_queue, int batch_size) {
DCHECK(FLAG_concurrent_sparkplug);
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileBaseline);
incoming_queue_.Enqueue(std::make_unique<BaselineBatchCompilerJob>(
isolate_, task_queue, batch_size));
job_handle_->NotifyConcurrencyIncrease();
}
void InstallBatch() {
while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<BaselineBatchCompilerJob> job;
outgoing_queue_.Dequeue(&job);
job->Install(isolate_);
}
}
private:
Isolate* isolate_;
std::unique_ptr<JobHandle> job_handle_;
LockedQueue<std::unique_ptr<BaselineBatchCompilerJob>> incoming_queue_;
LockedQueue<std::unique_ptr<BaselineBatchCompilerJob>> outgoing_queue_;
};
BaselineBatchCompiler::BaselineBatchCompiler(Isolate* isolate)
: isolate_(isolate),
compilation_queue_(Handle<WeakFixedArray>::null()),
last_index_(0),
estimated_instruction_size_(0),
enabled_(true) {
if (FLAG_concurrent_sparkplug) {
concurrent_compiler_ =
std::make_unique<ConcurrentBaselineCompiler>(isolate_);
}
}
enabled_(true) {}
BaselineBatchCompiler::~BaselineBatchCompiler() {
if (!compilation_queue_.is_null()) {
......@@ -236,20 +36,19 @@ BaselineBatchCompiler::~BaselineBatchCompiler() {
}
}
void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
bool BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
// Early return if the function is compiled with baseline already or it is not
// suitable for baseline compilation.
if (shared->HasBaselineCode()) return;
if (!CanCompileWithBaseline(isolate_, *shared)) return;
if (shared->HasBaselineCode()) return true;
if (!CanCompileWithBaseline(isolate_, *shared)) return false;
// Immediately compile the function if batch compilation is disabled.
if (!is_enabled()) {
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate_));
Compiler::CompileBaseline(isolate_, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
return;
return Compiler::CompileBaseline(
isolate_, function, Compiler::CLEAR_EXCEPTION, &is_compiled_scope);
}
int estimated_size;
......@@ -277,26 +76,12 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
"functions\n",
(last_index_ + 1));
}
if (FLAG_concurrent_sparkplug) {
Enqueue(shared);
concurrent_compiler_->CompileBatch(compilation_queue_, last_index_ + 1);
ClearBatch();
} else {
CompileBatch(function);
}
} else {
Enqueue(shared);
CompileBatch(function);
return true;
}
}
void BaselineBatchCompiler::Enqueue(Handle<SharedFunctionInfo> shared) {
EnsureQueueCapacity();
compilation_queue_->Set(last_index_++, HeapObjectReference::Weak(*shared));
}
void BaselineBatchCompiler::InstallBatch() {
DCHECK(FLAG_concurrent_sparkplug);
concurrent_compiler_->InstallBatch();
return false;
}
void BaselineBatchCompiler::EnsureQueueCapacity() {
......
......@@ -5,8 +5,6 @@
#ifndef V8_BASELINE_BASELINE_BATCH_COMPILER_H_
#define V8_BASELINE_BASELINE_BATCH_COMPILER_H_
#include <atomic>
#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
......@@ -14,9 +12,6 @@ namespace v8 {
namespace internal {
namespace baseline {
class BaselineCompiler;
class ConcurrentBaselineCompiler;
class BaselineBatchCompiler {
public:
static const int kInitialQueueSize = 32;
......@@ -24,26 +19,23 @@ class BaselineBatchCompiler {
explicit BaselineBatchCompiler(Isolate* isolate);
~BaselineBatchCompiler();
// Enqueues SharedFunctionInfo of |function| for compilation.
void EnqueueFunction(Handle<JSFunction> function);
// Returns true if the function is compiled (either it was compiled already,
// or the current batch including the function was just compiled).
bool EnqueueFunction(Handle<JSFunction> function);
void set_enabled(bool enabled) { enabled_ = enabled; }
bool is_enabled() { return enabled_; }
void InstallBatch();
private:
// Ensure there is enough space in the compilation queue to enqueue another
// function, growing the queue if necessary.
void EnsureQueueCapacity();
// Enqueues SharedFunctionInfo.
void Enqueue(Handle<SharedFunctionInfo> shared);
// Returns true if the current batch exceeds the threshold and should be
// compiled.
bool ShouldCompileBatch() const;
// Compiles the current batch.
// Compiles the current batch and returns the number of functions compiled.
void CompileBatch(Handle<JSFunction> function);
// Resets the current batch.
......@@ -68,9 +60,6 @@ class BaselineBatchCompiler {
// Flag indicating whether batch compilation is enabled.
// Batch compilation can be dynamically disabled e.g. when creating snapshots.
bool enabled_;
// Handle to the background compilation jobs.
std::unique_ptr<ConcurrentBaselineCompiler> concurrent_compiler_;
};
} // namespace baseline
......
......@@ -24,7 +24,6 @@
#include "src/codegen/macro-assembler-inl.h"
#include "src/common/globals.h"
#include "src/execution/frame-constants.h"
#include "src/heap/local-factory-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/logging/runtime-call-stats-scope.h"
......@@ -259,18 +258,16 @@ std::unique_ptr<AssemblerBuffer> AllocateBuffer(
} // namespace
BaselineCompiler::BaselineCompiler(
LocalIsolate* local_isolate,
Handle<SharedFunctionInfo> shared_function_info,
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode)
: local_isolate_(local_isolate),
stats_(local_isolate->runtime_call_stats()),
: local_isolate_(isolate->AsLocalIsolate()),
stats_(isolate->counters()->runtime_call_stats()),
shared_function_info_(shared_function_info),
bytecode_(bytecode),
masm_(local_isolate->GetMainThreadIsolateUnsafe(),
CodeObjectRequired::kNo, AllocateBuffer(bytecode)),
masm_(isolate, CodeObjectRequired::kNo, AllocateBuffer(bytecode)),
basm_(&masm_),
iterator_(bytecode_),
zone_(local_isolate->allocator(), ZONE_NAME),
zone_(isolate->allocator(), ZONE_NAME),
labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) {
MemsetPointer(labels_, nullptr, bytecode_->length());
......@@ -284,15 +281,9 @@ BaselineCompiler::BaselineCompiler(
#define __ basm_.
#define RCS_BASELINE_SCOPE(rcs) \
RCS_SCOPE(stats_, \
local_isolate_->is_main_thread() \
? RuntimeCallCounterId::kCompileBaseline##rcs \
: RuntimeCallCounterId::kCompileBackgroundBaseline##rcs)
void BaselineCompiler::GenerateCode() {
{
RCS_BASELINE_SCOPE(PreVisit);
RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
for (; !iterator_.done(); iterator_.Advance()) {
PreVisitSingleBytecode();
}
......@@ -304,7 +295,7 @@ void BaselineCompiler::GenerateCode() {
__ CodeEntry();
{
RCS_BASELINE_SCOPE(Visit);
RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselineVisit);
Prologue();
AddPosition();
for (; !iterator_.done(); iterator_.Advance()) {
......@@ -314,19 +305,18 @@ void BaselineCompiler::GenerateCode() {
}
}
MaybeHandle<Code> BaselineCompiler::Build(LocalIsolate* local_isolate) {
MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
CodeDesc desc;
__ GetCode(local_isolate->GetMainThreadIsolateUnsafe(), &desc);
__ GetCode(isolate, &desc);
// Allocate the bytecode offset table.
Handle<ByteArray> bytecode_offset_table =
bytecode_offset_table_builder_.ToBytecodeOffsetTable(local_isolate);
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
Factory::CodeBuilder code_builder(local_isolate, desc, CodeKind::BASELINE);
Factory::CodeBuilder code_builder(isolate, desc, CodeKind::BASELINE);
code_builder.set_bytecode_offset_table(bytecode_offset_table);
if (shared_function_info_->HasInterpreterData()) {
code_builder.set_interpreter_data(
handle(shared_function_info_->interpreter_data(), local_isolate));
handle(shared_function_info_->interpreter_data(), isolate));
} else {
code_builder.set_interpreter_data(bytecode_);
}
......
......@@ -14,7 +14,6 @@
#include "src/base/threaded-list.h"
#include "src/base/vlq.h"
#include "src/baseline/baseline-assembler.h"
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-register.h"
......@@ -52,12 +51,12 @@ class BytecodeOffsetTableBuilder {
class BaselineCompiler {
public:
explicit BaselineCompiler(LocalIsolate* local_isolate,
explicit BaselineCompiler(Isolate* isolate,
Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode);
void GenerateCode();
MaybeHandle<Code> Build(LocalIsolate* local_isolate);
MaybeHandle<Code> Build(Isolate* isolate);
static int EstimateInstructionSize(BytecodeArray bytecode);
private:
......
......@@ -60,10 +60,9 @@ MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
Handle<BytecodeArray> bytecode(shared->GetBytecodeArray(isolate), isolate);
LocalIsolate* local_isolate = isolate->main_thread_local_isolate();
baseline::BaselineCompiler compiler(local_isolate, shared, bytecode);
baseline::BaselineCompiler compiler(isolate, shared, bytecode);
compiler.GenerateCode();
MaybeHandle<Code> code = compiler.Build(local_isolate);
MaybeHandle<Code> code = compiler.Build(isolate);
if (FLAG_print_code && !code.is_null()) {
code.ToHandleChecked()->Print();
}
......
......@@ -1404,11 +1404,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsDeferredHandle(Address* location);
#endif // DEBUG
baseline::BaselineBatchCompiler* baseline_batch_compiler() {
DCHECK_NOT_NULL(baseline_batch_compiler_);
return baseline_batch_compiler_;
}
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
DCHECK(optimizing_compile_dispatcher_ == nullptr ||
......
......@@ -9,7 +9,6 @@
#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
#include "src/logging/local-logger.h"
#include "src/logging/runtime-call-stats-scope.h"
namespace v8 {
namespace internal {
......@@ -24,10 +23,7 @@ LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind,
stack_limit_(kind == ThreadKind::kMain
? isolate->stack_guard()->real_climit()
: GetCurrentStackPosition() - FLAG_stack_size * KB),
runtime_call_stats_(kind == ThreadKind::kMain &&
runtime_call_stats == nullptr
? isolate->counters()->runtime_call_stats()
: runtime_call_stats) {}
runtime_call_stats_(runtime_call_stats) {}
LocalIsolate::~LocalIsolate() {
if (bigint_processor_) bigint_processor_->Destroy();
......
......@@ -74,8 +74,6 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
return (v8::internal::LocalFactory*)this;
}
AccountingAllocator* allocator() { return isolate_->allocator(); }
bool has_pending_exception() const { return false; }
void RegisterDeserializerStarted();
......@@ -115,10 +113,6 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
}
LocalIsolate* AsLocalIsolate() { return this; }
// TODO(victorgomes): Remove this when/if MacroAssembler supports LocalIsolate
// only constructor.
Isolate* GetMainThreadIsolateUnsafe() const { return isolate_; }
Object* pending_message_address() {
return isolate_->pending_message_address();
}
......
......@@ -4,7 +4,6 @@
#include "src/execution/stack-guard.h"
#include "src/baseline/baseline-batch-compiler.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/execution/interrupts-scope.h"
#include "src/execution/isolate.h"
......@@ -320,12 +319,6 @@ Object StackGuard::HandleInterrupts() {
isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
}
if (TestAndClear(&interrupt_flags, INSTALL_BASELINE_CODE)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBaselineConcurrentCompilation");
isolate_->baseline_batch_compiler()->InstallBatch();
}
if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
// Callbacks must be invoked outside of ExecutionAccess lock.
......
......@@ -49,12 +49,11 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
V(TERMINATE_EXECUTION, TerminateExecution, 0) \
V(GC_REQUEST, GC, 1) \
V(INSTALL_CODE, InstallCode, 2) \
V(INSTALL_BASELINE_CODE, InstallBaselineCode, 3) \
V(API_INTERRUPT, ApiInterrupt, 4) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \
V(LOG_WASM_CODE, LogWasmCode, 7) \
V(WASM_CODE_GC, WasmCodeGC, 8)
V(API_INTERRUPT, ApiInterrupt, 3) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
V(LOG_WASM_CODE, LogWasmCode, 6) \
V(WASM_CODE_GC, WasmCodeGC, 7)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
......
......@@ -694,11 +694,6 @@ DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code")
#if ENABLE_SPARKPLUG
DEFINE_IMPLICATION(always_sparkplug, sparkplug)
DEFINE_BOOL(baseline_batch_compilation, true, "batch compile Sparkplug code")
DEFINE_BOOL(concurrent_sparkplug, false,
"compile Sparkplug code in a background thread")
// TODO(victorgomes): Currently concurrent compilation only works if we assume
// no write protect in code space.
DEFINE_NEG_IMPLICATION(concurrent_sparkplug, write_protect_code_memory)
#else
DEFINE_BOOL(baseline_batch_compilation, false, "batch compile Sparkplug code")
#endif
......@@ -711,8 +706,7 @@ DEFINE_INT(baseline_batch_compilation_threshold, 4 * KB,
DEFINE_BOOL(trace_baseline, false, "trace baseline compilation")
DEFINE_BOOL(trace_baseline_batch_compilation, false,
"trace baseline batch compilation")
DEFINE_BOOL(trace_baseline_concurrent_compilation, false,
"trace baseline concurrent compilation")
#undef FLAG
#define FLAG FLAG_FULL
......
......@@ -9,7 +9,6 @@
#include <vector>
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
namespace v8 {
......
......@@ -69,24 +69,6 @@ Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
return handle(accessors, isolate());
}
template <typename Impl>
Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
int flags, AllocationType allocation) {
Map map = read_only_roots().code_data_container_map();
int size = map.instance_size();
CodeDataContainer data_container = CodeDataContainer::cast(
AllocateRawWithImmortalMap(size, allocation, map));
DisallowGarbageCollection no_gc;
data_container.set_next_code_link(read_only_roots().undefined_value(),
SKIP_WRITE_BARRIER);
data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
impl()->SetExternalCodeSpaceInDataContainer(data_container);
}
data_container.clear_padding();
return handle(data_container, isolate());
}
template <typename Impl>
Handle<FixedArray> FactoryBase<Impl>::NewFixedArray(int length,
AllocationType allocation) {
......
......@@ -94,10 +94,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
// Creates a new CodeDataContainer for a Code object.
Handle<CodeDataContainer> NewCodeDataContainer(int flags,
AllocationType allocation);
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
int length, AllocationType allocation = AllocationType::kYoung);
......
......@@ -71,23 +71,12 @@
#include "src/wasm/wasm-value.h"
#endif
#include "src/heap/local-heap-inl.h"
namespace v8 {
namespace internal {
Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
CodeKind kind)
: isolate_(isolate),
local_isolate_(isolate_->main_thread_local_isolate()),
code_desc_(desc),
kind_(kind),
position_table_(isolate_->factory()->empty_byte_array()) {}
Factory::CodeBuilder::CodeBuilder(LocalIsolate* local_isolate,
const CodeDesc& desc, CodeKind kind)
: isolate_(local_isolate->GetMainThreadIsolateUnsafe()),
local_isolate_(local_isolate),
code_desc_(desc),
kind_(kind),
position_table_(isolate_->factory()->empty_byte_array()) {}
......@@ -97,10 +86,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
const auto factory = isolate_->factory();
// Allocate objects needed for code initialization.
Handle<ByteArray> reloc_info =
CompiledWithConcurrentBaseline()
? local_isolate_->factory()->NewByteArray(code_desc_.reloc_size,
AllocationType::kOld)
: factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld);
factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld);
Handle<CodeDataContainer> data_container;
// Use a canonical off-heap trampoline CodeDataContainer if possible.
......@@ -118,14 +104,9 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
kind_specific_flags_);
data_container = canonical_code_data_container;
} else {
if (CompiledWithConcurrentBaseline()) {
data_container = local_isolate_->factory()->NewCodeDataContainer(
0, AllocationType::kOld);
} else {
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
}
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
data_container->set_kind_specific_flags(kind_specific_flags_,
kRelaxedStore);
}
......@@ -151,12 +132,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
CodePageCollectionMemoryModificationScope code_allocation(heap);
Handle<Code> code;
if (CompiledWithConcurrentBaseline()) {
if (!AllocateConcurrentSparkplugCode(retry_allocation_or_fail)
.ToHandle(&code)) {
return MaybeHandle<Code>();
}
} else if (!AllocateCode(retry_allocation_or_fail).ToHandle(&code)) {
if (!AllocateCode(retry_allocation_or_fail).ToHandle(&code)) {
return MaybeHandle<Code>();
}
......@@ -259,7 +235,6 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
return code;
}
// TODO(victorgomes): Unify the two AllocateCodes
MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
bool retry_allocation_or_fail) {
Heap* heap = isolate_->heap();
......@@ -293,27 +268,6 @@ MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
return code;
}
MaybeHandle<Code> Factory::CodeBuilder::AllocateConcurrentSparkplugCode(
bool retry_allocation_or_fail) {
LocalHeap* heap = local_isolate_->heap();
AllocationType allocation_type = V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
? AllocationType::kCode
: AllocationType::kReadOnly;
const int object_size = Code::SizeFor(code_desc_.body_size());
HeapObject result =
heap->AllocateRaw(object_size, allocation_type).ToObject();
CHECK(!result.is_null());
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*isolate_->factory()->code_map(),
SKIP_WRITE_BARRIER);
Handle<Code> code = handle(Code::cast(result), local_isolate_);
DCHECK_IMPLIES(is_executable_, IsAligned(code->address(), kCodeAlignment));
return code;
}
MaybeHandle<Code> Factory::CodeBuilder::TryBuild() {
return BuildInternal(false);
}
......@@ -1343,14 +1297,6 @@ void Factory::AddToScriptList(Handle<Script> script) {
isolate()->heap()->set_script_list(*scripts);
}
void Factory::SetExternalCodeSpaceInDataContainer(
CodeDataContainer data_container) {
DCHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
data_container.AllocateExternalPointerEntries(isolate());
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
data_container.set_code_entry_point(isolate(), kNullAddress);
}
Handle<Script> Factory::CloneScript(Handle<Script> script) {
Heap* heap = isolate()->heap();
int script_id = isolate()->GetNextScriptId();
......@@ -2144,6 +2090,23 @@ Handle<JSObject> Factory::NewExternal(void* value) {
return external;
}
Handle<CodeDataContainer> Factory::NewCodeDataContainer(
int flags, AllocationType allocation) {
CodeDataContainer data_container =
CodeDataContainer::cast(New(code_data_container_map(), allocation));
DisallowGarbageCollection no_gc;
data_container.set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
data_container.AllocateExternalPointerEntries(isolate());
data_container.set_code_cage_base(isolate()->code_cage_base());
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
data_container.set_code_entry_point(isolate(), kNullAddress);
}
data_container.clear_padding();
return handle(data_container, isolate());
}
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
CHECK_NOT_NULL(isolate()->embedded_blob_code());
......
......@@ -655,6 +655,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
// Creates a new CodeDataContainer for a Code object.
Handle<CodeDataContainer> NewCodeDataContainer(int flags,
AllocationType allocation);
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
......@@ -840,10 +844,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
public:
CodeBuilder(Isolate* isolate, const CodeDesc& desc, CodeKind kind);
// TODO(victorgomes): Remove Isolate dependency from CodeBuilder.
CodeBuilder(LocalIsolate* local_isolate, const CodeDesc& desc,
CodeKind kind);
// Builds a new code object (fully initialized). All header fields of the
// returned object are immutable and the code object is write protected.
V8_WARN_UNUSED_RESULT Handle<Code> Build();
......@@ -934,18 +934,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
bool CompiledWithConcurrentBaseline() const {
return FLAG_concurrent_sparkplug && kind_ == CodeKind::BASELINE;
}
private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
MaybeHandle<Code> AllocateCode(bool retry_allocation_or_fail);
MaybeHandle<Code> AllocateConcurrentSparkplugCode(
bool retry_allocation_or_fail);
Isolate* const isolate_;
LocalIsolate* local_isolate_;
const CodeDesc& code_desc_;
const CodeKind kind_;
......@@ -986,7 +979,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
AllocationType AllocationTypeForInPlaceInternalizableString();
void AddToScriptList(Handle<Script> shared);
void SetExternalCodeSpaceInDataContainer(CodeDataContainer data_container);
// ------
HeapObject AllocateRawWithAllocationSite(
......
......@@ -4632,15 +4632,6 @@ void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
#endif
}
void Heap::RegisterCodeObject(Handle<Code> code) {
Address addr = code->address();
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && code_space()->Contains(addr)) {
MemoryChunk::FromHeapObject(*code)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject(addr);
}
}
// TODO(ishell): move builtin accessors out from Heap.
Code Heap::builtin(Builtin builtin) {
DCHECK(Builtins::IsBuiltinId(builtin));
......
......@@ -1666,8 +1666,6 @@ class Heap {
return result;
}
void RegisterCodeObject(Handle<Code> code);
static const char* GarbageCollectionReasonToString(
GarbageCollectionReason gc_reason);
......
......@@ -161,11 +161,6 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
return AllocateRawBackground(local_heap, object_size, NOT_EXECUTABLE);
}
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size, Executability executable) {
DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
......@@ -174,7 +169,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, executable);
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
......@@ -553,13 +548,6 @@ AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
AllocationResult CodeLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
DCHECK(!FLAG_enable_third_party_heap);
return OldLargeObjectSpace::AllocateRawBackground(local_heap, object_size,
EXECUTABLE);
}
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
......
......@@ -171,8 +171,6 @@ class OldLargeObjectSpace : public LargeObjectSpace {
explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawBackground(
LocalHeap* local_heap, int object_size, Executability executable);
};
class NewLargeObjectSpace : public LargeObjectSpace {
......@@ -202,9 +200,6 @@ class CodeLargeObjectSpace : public OldLargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size);
// Finds a large object page containing the given address, returns nullptr if
// such a page doesn't exist.
LargePage* FindPage(Address a);
......
......@@ -72,10 +72,6 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------
void AddToScriptList(Handle<Script> shared);
void SetExternalCodeSpaceInDataContainer(CodeDataContainer data_container) {
UNREACHABLE();
}
// ------
ReadOnlyRoots roots_;
......
......@@ -35,24 +35,8 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Safepoint();
bool large_object = size_in_bytes > heap_->MaxRegularHeapObjectSize(type);
if (type == AllocationType::kCode) {
AllocationResult alloc;
if (large_object) {
alloc =
heap()->code_lo_space()->AllocateRawBackground(this, size_in_bytes);
} else {
alloc =
code_space_allocator()->AllocateRaw(size_in_bytes, alignment, origin);
}
HeapObject object;
if (alloc.To(&object) && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
heap()->ZapCodeObject(object.address(), size_in_bytes);
}
return alloc;
}
CHECK_EQ(type, AllocationType::kOld);
if (large_object)
return heap()->lo_space()->AllocateRawBackground(this, size_in_bytes);
else
......
......@@ -53,8 +53,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)),
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()),
code_space_allocator_(this, heap->code_space()) {
old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this, [this] {
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
......@@ -78,7 +77,6 @@ LocalHeap::~LocalHeap() {
heap_->safepoint()->RemoveLocalHeap(this, [this] {
old_space_allocator_.FreeLinearAllocationArea();
code_space_allocator_.FreeLinearAllocationArea();
if (!is_main_thread()) {
marking_barrier_->Publish();
......@@ -228,22 +226,18 @@ void LocalHeap::SafepointSlowPath() {
void LocalHeap::FreeLinearAllocationArea() {
old_space_allocator_.FreeLinearAllocationArea();
code_space_allocator_.FreeLinearAllocationArea();
}
void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_.MakeLinearAllocationAreaIterable();
code_space_allocator_.MakeLinearAllocationAreaIterable();
}
void LocalHeap::MarkLinearAllocationAreaBlack() {
old_space_allocator_.MarkLinearAllocationAreaBlack();
code_space_allocator_.MarkLinearAllocationAreaBlack();
}
void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea();
code_space_allocator_.UnmarkLinearAllocationArea();
}
bool LocalHeap::TryPerformCollection() {
......
......@@ -94,7 +94,6 @@ class V8_EXPORT_PRIVATE LocalHeap {
MarkingBarrier* marking_barrier() { return marking_barrier_.get(); }
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
ConcurrentAllocator* code_space_allocator() { return &code_space_allocator_; }
// Mark/Unmark linear allocation areas black. Used for black allocation.
void MarkLinearAllocationAreaBlack();
......@@ -290,7 +289,6 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::vector<std::pair<GCEpilogueCallback*, void*>> gc_epilogue_callbacks_;
ConcurrentAllocator old_space_allocator_;
ConcurrentAllocator code_space_allocator_;
friend class CollectionBarrier;
friend class ConcurrentAllocator;
......
......@@ -568,8 +568,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
LocalHeap* local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
DCHECK(!is_compaction_space());
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE);
DCHECK(identity() == OLD_SPACE || identity() == MAP_SPACE);
DCHECK(origin == AllocationOrigin::kRuntime ||
origin == AllocationOrigin::kGC);
......@@ -641,8 +640,7 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
AllocationOrigin origin) {
base::MutexGuard lock(&space_mutex_);
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE);
DCHECK(identity() == OLD_SPACE || identity() == MAP_SPACE);
size_t new_node_size = 0;
FreeSpace new_node =
......
......@@ -392,9 +392,6 @@ class RuntimeCallTimer final {
V(CompileBaseline) \
V(CompileBaselinePreVisit) \
V(CompileBaselineVisit) \
V(CompileBackgroundBaselinePreVisit) \
V(CompileBackgroundBaselineVisit) \
V(CompileBaselineFinalization) \
V(CompileCollectSourcePositions) \
V(CompileDeserialize) \
V(CompileEnqueueOnDispatcher) \
......
......@@ -1051,13 +1051,13 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
}
int BytecodeArray::osr_loop_nesting_level() const {
return ACQUIRE_READ_INT8_FIELD(*this, kOsrLoopNestingLevelOffset);
return ReadField<int8_t>(kOsrLoopNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
RELEASE_WRITE_INT8_FIELD(*this, kOsrLoopNestingLevelOffset, depth);
WriteField<int8_t>(kOsrLoopNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
......
......@@ -28,10 +28,6 @@ class BytecodeArray;
class CodeDataContainer;
class CodeDesc;
class LocalFactory;
template <typename Impl>
class FactoryBase;
namespace interpreter {
class Register;
} // namespace interpreter
......@@ -122,8 +118,6 @@ class CodeDataContainer : public HeapObject {
inline void set_code_entry_point(Isolate* isolate, Address value);
friend Factory;
friend FactoryBase<Factory>;
friend FactoryBase<LocalFactory>;
OBJECT_CONSTRUCTORS(CodeDataContainer, HeapObject);
};
......
......@@ -526,10 +526,6 @@
} while (false)
#endif
#define ACQUIRE_READ_INT8_FIELD(p, offset) \
static_cast<int8_t>(base::Acquire_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
#define ACQUIRE_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Acquire_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
......@@ -572,10 +568,6 @@
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define RELEASE_WRITE_INT8_FIELD(p, offset, value) \
base::Release_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
#define RELEASE_WRITE_UINT32_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
......
......@@ -23,7 +23,6 @@ inline LockedQueue<Record>::LockedQueue() {
head_ = new Node();
CHECK_NOT_NULL(head_);
tail_ = head_;
size_ = 0;
}
template <typename Record>
......@@ -47,7 +46,6 @@ inline void LockedQueue<Record>::Enqueue(Record record) {
base::MutexGuard guard(&tail_mutex_);
tail_->next.SetValue(n);
tail_ = n;
size_++;
}
}
......@@ -61,8 +59,6 @@ inline bool LockedQueue<Record>::Dequeue(Record* record) {
if (next_node == nullptr) return false;
*record = std::move(next_node->value);
head_ = next_node;
DCHECK_GT(size_.load(), 0);
size_--;
}
delete old_head;
return true;
......@@ -83,11 +79,6 @@ inline bool LockedQueue<Record>::Peek(Record* record) const {
return true;
}
template <typename Record>
inline size_t LockedQueue<Record>::size() const {
return size_;
}
} // namespace internal
} // namespace v8
......
......@@ -5,8 +5,6 @@
#ifndef V8_UTILS_LOCKED_QUEUE_H_
#define V8_UTILS_LOCKED_QUEUE_H_
#include <atomic>
#include "src/base/platform/platform.h"
#include "src/utils/allocation.h"
......@@ -29,7 +27,6 @@ class LockedQueue final {
inline bool Dequeue(Record* record);
inline bool IsEmpty() const;
inline bool Peek(Record* record) const;
inline size_t size() const;
private:
struct Node;
......@@ -38,7 +35,6 @@ class LockedQueue final {
base::Mutex tail_mutex_;
Node* head_;
Node* tail_;
std::atomic<size_t> size_;
};
} // namespace internal
......
......@@ -7,7 +7,6 @@
// Flags: --no-always-sparkplug --lazy-feedback-allocation
// Flags: --flush-baseline-code --flush-bytecode --no-opt
// Flags: --no-stress-concurrent-inlining
// Flags: --no-concurrent-sparkplug
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment