Commit 3f5a3df6 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[osr] Fall back to synchronous OSR on cache mismatches

If we've already cached OSR'd code for the current function but with a
different osr offset, fall back to synchronous compilation. This avoids
degenerate cases where we repeatedly spawn OSR jobs but then fail to
install them.

Drive-by: More consistent --trace-osr output.
Drive-by: Rename kCompileForOnStackReplacement to kCompileOptimizeOSR
for name consistency.
Drive-by: Add JSFunction::DebugNameCStr() for more convenient PrintF's.

Bug: v8:12161
Change-Id: I2b4a65bc9e082d85d7048a3e92ef86b07d396687
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3560431Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79761}
parent 3111db91
...@@ -1824,7 +1824,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1824,7 +1824,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -2077,7 +2077,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -2077,7 +2077,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -2806,7 +2806,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -2806,7 +2806,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
Label skip; Label skip;
......
...@@ -1803,7 +1803,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, ...@@ -1803,7 +1803,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -1795,7 +1795,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, ...@@ -1795,7 +1795,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -1800,7 +1800,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, ...@@ -1800,7 +1800,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -1673,7 +1673,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { ...@@ -1673,7 +1673,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
{ {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -1874,7 +1874,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1874,7 +1874,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -243,7 +243,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -243,7 +243,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
......
...@@ -2729,7 +2729,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address) { ...@@ -2729,7 +2729,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
Label skip; Label skip;
......
...@@ -121,22 +121,25 @@ class CompilerTracer : public AllStatic { ...@@ -121,22 +121,25 @@ class CompilerTracer : public AllStatic {
} }
static void TraceOptimizeOSR(Isolate* isolate, Handle<JSFunction> function, static void TraceOptimizeOSR(Isolate* isolate, Handle<JSFunction> function,
BytecodeOffset osr_offset) { BytecodeOffset osr_offset,
ConcurrencyMode mode) {
if (!FLAG_trace_osr) return; if (!FLAG_trace_osr) return;
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[OSR - Started: "); PrintF(scope.file(),
function->PrintName(scope.file()); "[OSR - started. function: %s, osr offset: %d, mode: %s]\n",
PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt()); function->DebugNameCStr().get(), osr_offset.ToInt(), ToString(mode));
} }
static void TraceOptimizeOSRUnavailable(Isolate* isolate, static void TraceOptimizeOSRUnavailable(Isolate* isolate,
Handle<JSFunction> function, Handle<JSFunction> function,
BytecodeOffset osr_offset) { BytecodeOffset osr_offset,
ConcurrencyMode mode) {
if (!FLAG_trace_osr) return; if (!FLAG_trace_osr) return;
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[OSR - Unavailable (failed or in progress): "); PrintF(scope.file(),
function->PrintName(scope.file()); "[OSR - unavailable (failed or in progress). function: %s, osr "
PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt()); "offset: %d, mode: %s]\n",
function->DebugNameCStr().get(), osr_offset.ToInt(), ToString(mode));
} }
static void TraceCompilationStats(Isolate* isolate, static void TraceCompilationStats(Isolate* isolate,
...@@ -3405,12 +3408,13 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate, ...@@ -3405,12 +3408,13 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate); Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate);
bytecode->reset_osr_urgency(); bytecode->reset_osr_urgency();
CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset); CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode);
MaybeHandle<CodeT> result = GetOrCompileOptimized( MaybeHandle<CodeT> result = GetOrCompileOptimized(
isolate, function, mode, CodeKind::TURBOFAN, osr_offset, frame); isolate, function, mode, CodeKind::TURBOFAN, osr_offset, frame);
if (result.is_null()) { if (result.is_null()) {
CompilerTracer::TraceOptimizeOSRUnavailable(isolate, function, osr_offset); CompilerTracer::TraceOptimizeOSRUnavailable(isolate, function, osr_offset,
mode);
} }
return result; return result;
......
...@@ -100,19 +100,17 @@ namespace { ...@@ -100,19 +100,17 @@ namespace {
void TraceInOptimizationQueue(JSFunction function) { void TraceInOptimizationQueue(JSFunction function) {
if (FLAG_trace_opt_verbose) { if (FLAG_trace_opt_verbose) {
PrintF("[not marking function "); PrintF("[not marking function %s for optimization: already queued]\n",
function.PrintName(); function.DebugNameCStr().get());
PrintF(" for optimization: already queued]\n");
} }
} }
void TraceHeuristicOptimizationDisallowed(JSFunction function) { void TraceHeuristicOptimizationDisallowed(JSFunction function) {
if (FLAG_trace_opt_verbose) { if (FLAG_trace_opt_verbose) {
PrintF("[not marking function ");
function.PrintName();
PrintF( PrintF(
" for optimization: marked with " "[not marking function %s for optimization: marked with "
"%%PrepareFunctionForOptimization for manual optimization]\n"); "%%PrepareFunctionForOptimization for manual optimization]\n",
function.DebugNameCStr().get());
} }
} }
...@@ -153,21 +151,19 @@ namespace { ...@@ -153,21 +151,19 @@ namespace {
bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame, bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame,
int* osr_urgency_out) { int* osr_urgency_out) {
JSFunction function = frame->function(); JSFunction function = frame->function();
BytecodeArray bytecode = frame->GetBytecodeArray(); const int current_offset = frame->GetBytecodeOffset();
const int bytecode_offset = frame->GetBytecodeOffset(); OSROptimizedCodeCache cache = function.native_context().osr_code_cache();
if (V8_UNLIKELY(function.shared().osr_code_cache_state() != kNotCached)) { interpreter::BytecodeArrayIterator iterator(
OSROptimizedCodeCache cache = function.native_context().osr_code_cache(); handle(frame->GetBytecodeArray(), frame->isolate()));
interpreter::BytecodeArrayIterator iterator( for (BytecodeOffset osr_offset : cache.OsrOffsetsFor(function.shared())) {
handle(bytecode, frame->isolate())); DCHECK(!osr_offset.IsNone());
for (int jump_offset : cache.GetBytecodeOffsetsFromSFI(function.shared())) { iterator.SetOffset(osr_offset.ToInt());
iterator.SetOffset(jump_offset); if (base::IsInRange(current_offset, iterator.GetJumpTargetOffset(),
if (base::IsInRange(bytecode_offset, iterator.GetJumpTargetOffset(), osr_offset.ToInt())) {
jump_offset)) { int loop_depth = iterator.GetImmediateOperand(1);
int loop_depth = iterator.GetImmediateOperand(1); // `+ 1` because osr_urgency is an exclusive upper limit on the depth.
// `+ 1` because osr_urgency is an exclusive upper limit on the depth. *osr_urgency_out = loop_depth + 1;
*osr_urgency_out = loop_depth + 1; return true;
return true;
}
} }
} }
return false; return false;
...@@ -227,14 +223,15 @@ void TrySetOsrUrgency(Isolate* isolate, JSFunction function, int osr_urgency) { ...@@ -227,14 +223,15 @@ void TrySetOsrUrgency(Isolate* isolate, JSFunction function, int osr_urgency) {
// We've passed all checks - bump the OSR urgency. // We've passed all checks - bump the OSR urgency.
BytecodeArray bytecode = shared.GetBytecodeArray(isolate);
if (V8_UNLIKELY(FLAG_trace_osr)) { if (V8_UNLIKELY(FLAG_trace_osr)) {
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[OSR - arming back edges in "); PrintF(scope.file(),
function.PrintName(scope.file()); "[OSR - setting osr urgency. function: %s, old urgency: %d, new "
PrintF(scope.file(), "]\n"); "urgency: %d]\n",
function.DebugNameCStr().get(), bytecode.osr_urgency(), osr_urgency);
} }
BytecodeArray bytecode = shared.GetBytecodeArray(isolate);
DCHECK_GE(osr_urgency, bytecode.osr_urgency()); // Never lower urgency here. DCHECK_GE(osr_urgency, bytecode.osr_urgency()); // Never lower urgency here.
bytecode.set_osr_urgency(osr_urgency); bytecode.set_osr_urgency(osr_urgency);
} }
...@@ -352,9 +349,8 @@ OptimizationDecision TieringManager::ShouldOptimize(JSFunction function, ...@@ -352,9 +349,8 @@ OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
// small, optimistically optimize it now. // small, optimistically optimize it now.
return OptimizationDecision::TurbofanSmallFunction(); return OptimizationDecision::TurbofanSmallFunction();
} else if (FLAG_trace_opt_verbose) { } else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing "); PrintF("[not yet optimizing %s, not enough ticks: %d/%d and ",
function.PrintName(); function.DebugNameCStr().get(), ticks, ticks_for_optimization);
PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
if (any_ic_changed_) { if (any_ic_changed_) {
PrintF("ICs changed]\n"); PrintF("ICs changed]\n");
} else { } else {
......
...@@ -1110,8 +1110,12 @@ int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) { ...@@ -1110,8 +1110,12 @@ int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
return initial_map().instance_size(); return initial_map().instance_size();
} }
std::unique_ptr<char[]> JSFunction::DebugNameCStr() {
return shared().DebugNameCStr();
}
void JSFunction::PrintName(FILE* out) { void JSFunction::PrintName(FILE* out) {
PrintF(out, "%s", shared().DebugNameCStr().get()); PrintF(out, "%s", DebugNameCStr().get());
} }
namespace { namespace {
......
...@@ -300,7 +300,7 @@ class JSFunction : public TorqueGeneratedJSFunction< ...@@ -300,7 +300,7 @@ class JSFunction : public TorqueGeneratedJSFunction<
: JSFunction::kSizeWithoutPrototype; : JSFunction::kSizeWithoutPrototype;
} }
// Prints the name of the function using PrintF. std::unique_ptr<char[]> DebugNameCStr();
void PrintName(FILE* out = stdout); void PrintName(FILE* out = stdout);
// Calculate the instance size and in-object properties count. // Calculate the instance size and in-object properties count.
......
...@@ -133,16 +133,36 @@ void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) { ...@@ -133,16 +133,36 @@ void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) {
} }
} }
std::vector<int> OSROptimizedCodeCache::GetBytecodeOffsetsFromSFI( std::vector<BytecodeOffset> OSROptimizedCodeCache::OsrOffsetsFor(
SharedFunctionInfo shared) { SharedFunctionInfo shared) {
std::vector<int> bytecode_offsets;
DisallowGarbageCollection gc; DisallowGarbageCollection gc;
const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
if (state == kNotCached) return {};
std::vector<BytecodeOffset> offsets;
for (int index = 0; index < length(); index += kEntryLength) { for (int index = 0; index < length(); index += kEntryLength) {
if (GetSFIFromEntry(index) == shared) { if (GetSFIFromEntry(index) != shared) continue;
bytecode_offsets.push_back(GetBytecodeOffsetFromEntry(index).ToInt()); offsets.emplace_back(GetBytecodeOffsetFromEntry(index));
} if (state == kCachedOnce) return offsets;
} }
return bytecode_offsets;
return offsets;
}
base::Optional<BytecodeOffset> OSROptimizedCodeCache::FirstOsrOffsetFor(
SharedFunctionInfo shared) {
DisallowGarbageCollection gc;
const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
if (state == kNotCached) return {};
for (int index = 0; index < length(); index += kEntryLength) {
if (GetSFIFromEntry(index) != shared) continue;
return GetBytecodeOffsetFromEntry(index);
}
return {};
} }
int OSROptimizedCodeCache::GrowOSRCache( int OSROptimizedCodeCache::GrowOSRCache(
......
...@@ -57,6 +57,9 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray { ...@@ -57,6 +57,9 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
CodeT TryGet(SharedFunctionInfo shared, BytecodeOffset osr_offset, CodeT TryGet(SharedFunctionInfo shared, BytecodeOffset osr_offset,
Isolate* isolate); Isolate* isolate);
std::vector<BytecodeOffset> OsrOffsetsFor(SharedFunctionInfo shared);
base::Optional<BytecodeOffset> FirstOsrOffsetFor(SharedFunctionInfo shared);
// Remove all code objects marked for deoptimization from OSR code cache. // Remove all code objects marked for deoptimization from OSR code cache.
void EvictDeoptimizedCode(Isolate* isolate); void EvictDeoptimizedCode(Isolate* isolate);
...@@ -67,10 +70,6 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray { ...@@ -67,10 +70,6 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
// Sets the OSR optimized code cache to an empty array. // Sets the OSR optimized code cache to an empty array.
static void Clear(Isolate* isolate, NativeContext context); static void Clear(Isolate* isolate, NativeContext context);
// Returns vector of bytecode offsets corresponding to the shared function
// |shared|
std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared);
enum OSRCodeCacheConstants { enum OSRCodeCacheConstants {
kSharedOffset, kSharedOffset,
kCachedCodeOffset, kCachedCodeOffset,
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include "src/objects/function-kind.h" #include "src/objects/function-kind.h"
#include "src/objects/function-syntax-kind.h" #include "src/objects/function-syntax-kind.h"
#include "src/objects/objects.h" #include "src/objects/objects.h"
#include "src/objects/osr-optimized-code-cache.h"
#include "src/objects/script.h" #include "src/objects/script.h"
#include "src/objects/slots.h" #include "src/objects/slots.h"
#include "src/objects/smi.h" #include "src/objects/smi.h"
...@@ -42,6 +41,8 @@ class WasmCapiFunctionData; ...@@ -42,6 +41,8 @@ class WasmCapiFunctionData;
class WasmExportedFunctionData; class WasmExportedFunctionData;
class WasmJSFunctionData; class WasmJSFunctionData;
enum OSRCodeCacheStateOfSFI : uint8_t;
namespace wasm { namespace wasm {
struct WasmModule; struct WasmModule;
class ValueType; class ValueType;
......
...@@ -56,9 +56,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) { ...@@ -56,9 +56,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_lazy && !sfi->is_compiled()) { if (FLAG_trace_lazy && !sfi->is_compiled()) {
PrintF("[unoptimized: "); PrintF("[unoptimized: %s]\n", function->DebugNameCStr().get());
function->PrintName();
PrintF("]\n");
} }
#endif #endif
...@@ -225,7 +223,7 @@ RUNTIME_FUNCTION(Runtime_VerifyType) { ...@@ -225,7 +223,7 @@ RUNTIME_FUNCTION(Runtime_VerifyType) {
return *obj; return *obj;
} }
RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
HandleScope handle_scope(isolate); HandleScope handle_scope(isolate);
DCHECK_EQ(0, args.length()); DCHECK_EQ(0, args.length());
DCHECK(FLAG_use_osr); DCHECK(FLAG_use_osr);
...@@ -244,21 +242,40 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { ...@@ -244,21 +242,40 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
BytecodeOffset osr_offset = BytecodeOffset(frame->GetBytecodeOffset()); BytecodeOffset osr_offset = BytecodeOffset(frame->GetBytecodeOffset());
DCHECK(!osr_offset.IsNone()); DCHECK(!osr_offset.IsNone());
// TODO(v8:12161): If cache exists with different offset: kSynchronous.
ConcurrencyMode mode = ConcurrencyMode mode =
isolate->concurrent_recompilation_enabled() && FLAG_concurrent_osr V8_LIKELY(isolate->concurrent_recompilation_enabled() &&
FLAG_concurrent_osr)
? ConcurrencyMode::kConcurrent ? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kSynchronous; : ConcurrencyMode::kSynchronous;
// The synchronous fallback mechanism triggers if we've already got OSR'd
// code for the current function but at a different OSR offset - that may
// indicate we're having trouble hitting the correct JumpLoop for code
// installation. In this case, fall back to synchronous OSR.
Handle<JSFunction> function(frame->function(), isolate); Handle<JSFunction> function(frame->function(), isolate);
MaybeHandle<CodeT> maybe_result = base::Optional<BytecodeOffset> cached_osr_offset =
Compiler::CompileOptimizedOSR(isolate, function, osr_offset, frame, mode); function->native_context().osr_code_cache().FirstOsrOffsetFor(
function->shared());
if (cached_osr_offset.has_value() &&
cached_osr_offset.value() != osr_offset) {
if (V8_UNLIKELY(FLAG_trace_osr)) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(),
"[OSR - falling back to synchronous compilation due to mismatched "
"cached entry. function: %s, requested: %d, cached: %d]\n",
function->DebugNameCStr().get(), osr_offset.ToInt(),
cached_osr_offset.value().ToInt());
}
mode = ConcurrencyMode::kSynchronous;
}
Handle<CodeT> result; Handle<CodeT> result;
if (!maybe_result.ToHandle(&result)) { if (!Compiler::CompileOptimizedOSR(isolate, function, osr_offset, frame, mode)
// No OSR'd code available. .ToHandle(&result)) {
// TODO(v8:12161): Distinguish between actual failure and scheduling a // An empty result can mean one of two things:
// concurrent job. // 1) we've started a concurrent compilation job - everything is fine.
// 2) synchronous compilation failed for some reason.
if (!function->HasAttachedOptimizedCode()) { if (!function->HasAttachedOptimizedCode()) {
function->set_code(function->shared().GetCode(), kReleaseStore); function->set_code(function->shared().GetCode(), kReleaseStore);
} }
...@@ -278,9 +295,9 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { ...@@ -278,9 +295,9 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
if (FLAG_trace_osr) { if (FLAG_trace_osr) {
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), PrintF(scope.file(),
"[OSR - Entry at OSR bytecode offset %d, offset %d in optimized " "[OSR - entry. function: %s, osr offset: %d, pc offset: %d]\n",
"code]\n", function->DebugNameCStr().get(), osr_offset.ToInt(),
osr_offset.ToInt(), data.OsrPcOffset().value()); data.OsrPcOffset().value());
} }
if (function->feedback_vector().invocation_count() <= 1 && if (function->feedback_vector().invocation_count() <= 1 &&
...@@ -313,9 +330,10 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { ...@@ -313,9 +330,10 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
// compile for OSR again. // compile for OSR again.
if (FLAG_trace_osr) { if (FLAG_trace_osr) {
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[OSR - Re-marking "); PrintF(scope.file(),
function->PrintName(scope.file()); "[OSR - forcing synchronous optimization on next entry. function: "
PrintF(scope.file(), " for non-concurrent optimization]\n"); "%s]\n",
function->DebugNameCStr().get());
} }
function->set_tiering_state(TieringState::kRequestTurbofan_Synchronous); function->set_tiering_state(TieringState::kRequestTurbofan_Synchronous);
} }
......
...@@ -661,7 +661,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) { ...@@ -661,7 +661,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
USE(unused_result); USE(unused_result);
// Finalize again to finish the queued job. The next call into // Finalize again to finish the queued job. The next call into
// CompileForOnStackReplacement will pick up the cached Code object. // Runtime::kCompileOptimizedOSR will pick up the cached Code object.
FinalizeOptimization(isolate); FinalizeOptimization(isolate);
} }
......
...@@ -107,7 +107,7 @@ namespace internal { ...@@ -107,7 +107,7 @@ namespace internal {
F(WeakCollectionSet, 4, 1) F(WeakCollectionSet, 4, 1)
#define FOR_EACH_INTRINSIC_COMPILER(F, I) \ #define FOR_EACH_INTRINSIC_COMPILER(F, I) \
F(CompileForOnStackReplacement, 0, 1) \ F(CompileOptimizedOSR, 0, 1) \
F(CompileLazy, 1, 1) \ F(CompileLazy, 1, 1) \
F(CompileBaseline, 1, 1) \ F(CompileBaseline, 1, 1) \
F(CompileMaglev_Concurrent, 1, 1) \ F(CompileMaglev_Concurrent, 1, 1) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment