Commit 22269111 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Cache maglev code on the feedback vector

In the future we may want to cache this in a separate slot to turbofan
code, but for now cache maglev code in the same optimized code slot on
the feedback vector.

Bug: v8:7700
Change-Id: Idd40a024cc9beb9b4da06a88a3789d822ddd4ab7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3622916Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80354}
parent e9244f37
......@@ -913,30 +913,28 @@ class OptimizedCodeCache : public AllStatic {
return handle(code, isolate);
}
static void Insert(Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
const CodeKind kind = compilation_info->code_kind();
static void Insert(Isolate* isolate, JSFunction function,
BytecodeOffset osr_offset, CodeT code,
bool is_function_context_specializing) {
const CodeKind kind = code.kind();
if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
Handle<JSFunction> function = compilation_info->closure();
Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
const BytecodeOffset osr_offset = compilation_info->osr_offset();
FeedbackVector feedback_vector = function->feedback_vector();
FeedbackVector feedback_vector = function.feedback_vector();
if (IsOSR(osr_offset)) {
DCHECK(CodeKindCanOSR(kind));
DCHECK(!compilation_info->function_context_specializing());
SharedFunctionInfo shared = function->shared();
DCHECK(!is_function_context_specializing);
SharedFunctionInfo shared = function.shared();
Handle<BytecodeArray> bytecode(shared.GetBytecodeArray(isolate), isolate);
interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt());
DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop);
feedback_vector.SetOptimizedOsrCode(it.GetSlotOperand(2), *code);
feedback_vector.SetOptimizedOsrCode(it.GetSlotOperand(2), code);
return;
}
DCHECK(!IsOSR(osr_offset));
if (compilation_info->function_context_specializing()) {
if (is_function_context_specializing) {
// Function context specialization folds-in the function context, so no
// sharing can occur. Make sure the optimized code cache is cleared.
if (feedback_vector.has_optimized_code()) {
......@@ -996,7 +994,10 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
// Success!
job->RecordCompilationStats(ConcurrencyMode::kSynchronous, isolate);
DCHECK(!isolate->has_pending_exception());
OptimizedCodeCache::Insert(isolate, compilation_info);
OptimizedCodeCache::Insert(isolate, *compilation_info->closure(),
compilation_info->osr_offset(),
ToCodeT(*compilation_info->code()),
compilation_info->function_context_specializing());
job->RecordFunctionCompilation(LogEventListener::LAZY_COMPILE_TAG, isolate);
return true;
}
......@@ -1175,6 +1176,9 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
}
RecordMaglevFunctionCompilation(isolate, function);
const bool kIsContextSpecializing = false;
OptimizedCodeCache::Insert(isolate, *function, osr_offset, function->code(),
kIsContextSpecializing);
return handle(function->code(), isolate);
}
......@@ -3420,7 +3424,10 @@ bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
isolate);
if (V8_LIKELY(use_result)) {
ResetTieringState(*function, osr_offset);
OptimizedCodeCache::Insert(isolate, compilation_info);
OptimizedCodeCache::Insert(
isolate, *compilation_info->closure(),
compilation_info->osr_offset(), ToCodeT(*compilation_info->code()),
compilation_info->function_context_specializing());
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (!IsOSR(osr_offset)) {
function->set_code(*compilation_info->code(), kReleaseStore);
......@@ -3446,6 +3453,9 @@ bool Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
Isolate* isolate) {
#ifdef V8_ENABLE_MAGLEV
VMState<COMPILER> state(isolate);
const bool kIsContextSpecializing = false;
OptimizedCodeCache::Insert(isolate, *job->function(), BytecodeOffset::None(),
job->function()->code(), kIsContextSpecializing);
RecordMaglevFunctionCompilation(isolate, job->function());
#endif
return CompilationJob::SUCCEEDED;
......
......@@ -100,7 +100,7 @@ inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
// TODO(jgruber): Rename or remove this predicate. Currently it means 'is this
// kind stored either in the FeedbackVector cache, or in the OSR cache?'.
inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
return kind == CodeKind::TURBOFAN;
return kind == CodeKind::MAGLEV || kind == CodeKind::TURBOFAN;
}
inline CodeKind CodeKindForTopTier() { return CodeKind::TURBOFAN; }
......
......@@ -385,9 +385,11 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
if (ticks < Smi::kMaxValue) set_profiler_ticks(ticks + 1);
}
void FeedbackVector::SetOptimizedCode(Handle<CodeT> code) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
void FeedbackVector::SetOptimizedCode(CodeT code) {
DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
// We should set optimized code only when there is no valid optimized code.
// TODO(v8:7700): Update this check once optimized code can be promoted to a
// higher tier (in particular, maglev to turbofan).
DCHECK(!has_optimized_code() ||
optimized_code().marked_for_deoptimization() ||
FLAG_stress_concurrent_inlining_attach_code);
......@@ -396,8 +398,10 @@ void FeedbackVector::SetOptimizedCode(Handle<CodeT> code) {
// re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't
// TieringState::kRequestTurbofan*.
set_maybe_optimized_code(HeapObjectReference::Weak(*code), kReleaseStore);
set_maybe_optimized_code(HeapObjectReference::Weak(code), kReleaseStore);
int32_t state = flags();
// TODO(leszeks): Reconsider whether this could clear the tiering state vs.
// the callers doing so.
state = TieringStateBits::update(state, TieringState::kNone);
state = MaybeHasOptimizedCodeBit::update(state, true);
set_flags(state);
......
......@@ -254,7 +254,7 @@ class FeedbackVector
// the world, thus 'maybe'.
inline bool maybe_has_optimized_code() const;
inline void set_maybe_has_optimized_code(bool value);
void SetOptimizedCode(Handle<CodeT> code);
void SetOptimizedCode(CodeT code);
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason);
void ClearOptimizedCode();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment