Commit dc9b48e4 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

Address comments from `[osr] Basic support for concurrent OSR`

- Unhandlify OSROptimizedCodeCache::GetOptimizedCode.
- Unstatic-fy FeedbackVector::SetOptimizedCode.
- Remove frame-walking logic during the OSR tierup decision.

Bug: v8:12161
Change-Id: I4fa8c972cb50d369b17898ba57e1909c86e933df
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3560478Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79686}
parent 3ce690ee
...@@ -903,10 +903,9 @@ class OptimizedCodeCache : public AllStatic { ...@@ -903,10 +903,9 @@ class OptimizedCodeCache : public AllStatic {
CodeT code; CodeT code;
if (IsOSR(osr_offset)) { if (IsOSR(osr_offset)) {
// For OSR, check the OSR optimized code cache. // For OSR, check the OSR optimized code cache.
code = code = function->native_context()
function->native_context()
.GetOSROptimizedCodeCache() .GetOSROptimizedCodeCache()
.GetOptimizedCode(handle(shared, isolate), osr_offset, isolate); .GetOptimizedCode(shared, osr_offset, isolate);
} else { } else {
// Non-OSR code may be cached on the feedback vector. // Non-OSR code may be cached on the feedback vector.
if (function->has_feedback_vector()) { if (function->has_feedback_vector()) {
...@@ -960,9 +959,7 @@ class OptimizedCodeCache : public AllStatic { ...@@ -960,9 +959,7 @@ class OptimizedCodeCache : public AllStatic {
return; return;
} }
Handle<FeedbackVector> vector = function->feedback_vector().SetOptimizedCode(code);
handle(function->feedback_vector(), isolate);
FeedbackVector::SetOptimizedCode(vector, code);
} }
}; };
...@@ -3397,15 +3394,6 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate, ...@@ -3397,15 +3394,6 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
return {}; return {};
} }
// If we are trying to do OSR when there are already optimized activations of
// the function, it means (a) the function is directly or indirectly
// recursive and (b) an optimized invocation has been deoptimized so that we
// are currently in an unoptimized activation.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized() && frame->function() == *function) return {};
}
// -- Alright, decided to proceed. -- // -- Alright, decided to proceed. --
// Disarm all back edges, i.e. reset the OSR urgency. // Disarm all back edges, i.e. reset the OSR urgency.
......
...@@ -386,25 +386,22 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() { ...@@ -386,25 +386,22 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
if (ticks < Smi::kMaxValue) set_profiler_ticks(ticks + 1); if (ticks < Smi::kMaxValue) set_profiler_ticks(ticks + 1);
} }
// static void FeedbackVector::SetOptimizedCode(Handle<CodeT> code) {
void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<CodeT> code) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind())); DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
// We should set optimized code only when there is no valid optimized code. // We should set optimized code only when there is no valid optimized code.
DCHECK(!vector->has_optimized_code() || DCHECK(!has_optimized_code() ||
vector->optimized_code().marked_for_deoptimization() || optimized_code().marked_for_deoptimization() ||
FLAG_stress_concurrent_inlining_attach_code); FLAG_stress_concurrent_inlining_attach_code);
// TODO(mythria): We could see a CompileOptimized state here either from // TODO(mythria): We could see a CompileOptimized state here either from
// tests that use %OptimizeFunctionOnNextCall, --always-opt or because we // tests that use %OptimizeFunctionOnNextCall, --always-opt or because we
// re-mark the function for non-concurrent optimization after an OSR. We // re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that state isn't // should avoid these cases and also check that marker isn't
// TieringState::kRequestTurbofan*. // TieringState::kRequestTurbofan*.
vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code), set_maybe_optimized_code(HeapObjectReference::Weak(*code), kReleaseStore);
kReleaseStore); int32_t state = flags();
int32_t state = vector->flags();
state = TieringStateBits::update(state, TieringState::kNone); state = TieringStateBits::update(state, TieringState::kNone);
state = MaybeHasOptimizedCodeBit::update(state, true); state = MaybeHasOptimizedCodeBit::update(state, true);
vector->set_flags(state); set_flags(state);
} }
void FeedbackVector::ClearOptimizedCode() { void FeedbackVector::ClearOptimizedCode() {
......
...@@ -228,8 +228,7 @@ class FeedbackVector ...@@ -228,8 +228,7 @@ class FeedbackVector
// the world, thus 'maybe'. // the world, thus 'maybe'.
inline bool maybe_has_optimized_code() const; inline bool maybe_has_optimized_code() const;
inline void set_maybe_has_optimized_code(bool value); inline void set_maybe_has_optimized_code(bool value);
static void SetOptimizedCode(Handle<FeedbackVector> vector, void SetOptimizedCode(Handle<CodeT> code);
Handle<CodeT> code);
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared, void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason); const char* reason);
void ClearOptimizedCode(); void ClearOptimizedCode();
......
...@@ -27,7 +27,7 @@ void OSROptimizedCodeCache::AddOptimizedCode( ...@@ -27,7 +27,7 @@ void OSROptimizedCodeCache::AddOptimizedCode(
Handle<OSROptimizedCodeCache> osr_cache( Handle<OSROptimizedCodeCache> osr_cache(
native_context->GetOSROptimizedCodeCache(), isolate); native_context->GetOSROptimizedCodeCache(), isolate);
DCHECK_EQ(osr_cache->FindEntry(shared, osr_offset), -1); DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1);
int entry = -1; int entry = -1;
for (int index = 0; index < osr_cache->length(); index += kEntryLength) { for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
if (osr_cache->Get(index + kSharedOffset)->IsCleared() || if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
...@@ -90,7 +90,7 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) { ...@@ -90,7 +90,7 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
native_context->set_osr_code_cache(*new_osr_cache); native_context->set_osr_code_cache(*new_osr_cache);
} }
CodeT OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared, CodeT OSROptimizedCodeCache::GetOptimizedCode(SharedFunctionInfo shared,
BytecodeOffset osr_offset, BytecodeOffset osr_offset,
Isolate* isolate) { Isolate* isolate) {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
...@@ -178,12 +178,12 @@ BytecodeOffset OSROptimizedCodeCache::GetBytecodeOffsetFromEntry(int index) { ...@@ -178,12 +178,12 @@ BytecodeOffset OSROptimizedCodeCache::GetBytecodeOffsetFromEntry(int index) {
return BytecodeOffset(osr_offset_entry.value()); return BytecodeOffset(osr_offset_entry.value());
} }
int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared, int OSROptimizedCodeCache::FindEntry(SharedFunctionInfo shared,
BytecodeOffset osr_offset) { BytecodeOffset osr_offset) {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
DCHECK(!osr_offset.IsNone()); DCHECK(!osr_offset.IsNone());
for (int index = 0; index < length(); index += kEntryLength) { for (int index = 0; index < length(); index += kEntryLength) {
if (GetSFIFromEntry(index) != *shared) continue; if (GetSFIFromEntry(index) != shared) continue;
if (GetBytecodeOffsetFromEntry(index) != osr_offset) continue; if (GetBytecodeOffsetFromEntry(index) != osr_offset) continue;
return index; return index;
} }
......
...@@ -51,8 +51,8 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray { ...@@ -51,8 +51,8 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
// Returns the code corresponding to the shared function |shared| and // Returns the code corresponding to the shared function |shared| and
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty // BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
// object otherwise. // object otherwise.
CodeT GetOptimizedCode(Handle<SharedFunctionInfo> shared, CodeT GetOptimizedCode(SharedFunctionInfo shared, BytecodeOffset osr_offset,
BytecodeOffset osr_offset, Isolate* isolate); Isolate* isolate);
// Remove all code objects marked for deoptimization from OSR code cache. // Remove all code objects marked for deoptimization from OSR code cache.
void EvictMarkedCode(Isolate* isolate); void EvictMarkedCode(Isolate* isolate);
...@@ -73,8 +73,7 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray { ...@@ -73,8 +73,7 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
SharedFunctionInfo GetSFIFromEntry(int index); SharedFunctionInfo GetSFIFromEntry(int index);
BytecodeOffset GetBytecodeOffsetFromEntry(int index); BytecodeOffset GetBytecodeOffsetFromEntry(int index);
inline int FindEntry(Handle<SharedFunctionInfo> shared, inline int FindEntry(SharedFunctionInfo shared, BytecodeOffset osr_offset);
BytecodeOffset osr_offset);
inline void ClearEntry(int src, Isolate* isolate); inline void ClearEntry(int src, Isolate* isolate);
inline void InitializeEntry(int entry, SharedFunctionInfo shared, CodeT code, inline void InitializeEntry(int entry, SharedFunctionInfo shared, CodeT code,
BytecodeOffset osr_offset); BytecodeOffset osr_offset);
......
...@@ -144,20 +144,20 @@ TEST_F(TestWithNativeContext, FindCachedEntry) { ...@@ -144,20 +144,20 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
Handle<OSROptimizedCodeCache> osr_cache( Handle<OSROptimizedCodeCache> osr_cache(
native_context->GetOSROptimizedCodeCache(), isolate); native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->GetOptimizedCode(shared, BytecodeOffset(0), isolate), EXPECT_EQ(osr_cache->GetOptimizedCode(*shared, BytecodeOffset(0), isolate),
*code); *code);
EXPECT_EQ( EXPECT_EQ(osr_cache->GetOptimizedCode(*shared1, BytecodeOffset(bailout_id),
osr_cache->GetOptimizedCode(shared1, BytecodeOffset(bailout_id), isolate), isolate),
*code1); *code1);
RunJS("%DeoptimizeFunction(f1)"); RunJS("%DeoptimizeFunction(f1)");
EXPECT_TRUE( EXPECT_TRUE(
osr_cache->GetOptimizedCode(shared1, BytecodeOffset(bailout_id), isolate) osr_cache->GetOptimizedCode(*shared1, BytecodeOffset(bailout_id), isolate)
.is_null()); .is_null());
osr_cache->Set(OSROptimizedCodeCache::kCachedCodeOffset, osr_cache->Set(OSROptimizedCodeCache::kCachedCodeOffset,
HeapObjectReference::ClearedValue(isolate)); HeapObjectReference::ClearedValue(isolate));
EXPECT_TRUE(osr_cache->GetOptimizedCode(shared, BytecodeOffset(0), isolate) EXPECT_TRUE(osr_cache->GetOptimizedCode(*shared, BytecodeOffset(0), isolate)
.is_null()); .is_null());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment