Commit d368dcf4 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

Refactor OSROptimizedCodeCache

Tweak a few names, remove a few GetIsolate calls, other minor
usability refactors.

It may be worth taking a closer look at the impl in the future,
currently the design choices don't seem ideal (see the added TODO
on top of the class).

Bug: v8:12161
Change-Id: Ib34e372aa58a30c68c9c5cdd0d1da0ec3e86717c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3560447Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79687}
parent dc9b48e4
...@@ -903,9 +903,8 @@ class OptimizedCodeCache : public AllStatic { ...@@ -903,9 +903,8 @@ class OptimizedCodeCache : public AllStatic {
CodeT code; CodeT code;
if (IsOSR(osr_offset)) { if (IsOSR(osr_offset)) {
// For OSR, check the OSR optimized code cache. // For OSR, check the OSR optimized code cache.
code = function->native_context() code = function->native_context().osr_code_cache().TryGet(
.GetOSROptimizedCodeCache() shared, osr_offset, isolate);
.GetOptimizedCode(shared, osr_offset, isolate);
} else { } else {
// Non-OSR code may be cached on the feedback vector. // Non-OSR code may be cached on the feedback vector.
if (function->has_feedback_vector()) { if (function->has_feedback_vector()) {
...@@ -943,8 +942,8 @@ class OptimizedCodeCache : public AllStatic { ...@@ -943,8 +942,8 @@ class OptimizedCodeCache : public AllStatic {
DCHECK(CodeKindCanOSR(kind)); DCHECK(CodeKindCanOSR(kind));
Handle<SharedFunctionInfo> shared(function->shared(), isolate); Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Handle<NativeContext> native_context(function->native_context(), isolate); Handle<NativeContext> native_context(function->native_context(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code, OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
osr_offset); osr_offset);
return; return;
} }
......
...@@ -377,8 +377,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) { ...@@ -377,8 +377,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
isolate->heap()->InvalidateCodeDeoptimizationData(code); isolate->heap()->InvalidateCodeDeoptimizationData(code);
} }
native_context.GetOSROptimizedCodeCache().EvictMarkedCode( native_context.osr_code_cache().EvictDeoptimizedCode(isolate);
native_context.GetIsolate());
} }
void Deoptimizer::DeoptimizeAll(Isolate* isolate) { void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
...@@ -393,7 +392,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) { ...@@ -393,7 +392,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
while (!context.IsUndefined(isolate)) { while (!context.IsUndefined(isolate)) {
NativeContext native_context = NativeContext::cast(context); NativeContext native_context = NativeContext::cast(context);
MarkAllCodeForContext(native_context); MarkAllCodeForContext(native_context);
OSROptimizedCodeCache::Clear(native_context); OSROptimizedCodeCache::Clear(isolate, native_context);
DeoptimizeMarkedCodeForContext(native_context); DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link(); context = native_context.next_context_link();
} }
...@@ -452,7 +451,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { ...@@ -452,7 +451,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
// pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove // pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove
// this call from here. // this call from here.
OSROptimizedCodeCache::Compact( OSROptimizedCodeCache::Compact(
Handle<NativeContext>(function.native_context(), isolate)); isolate, Handle<NativeContext>(function.native_context(), isolate));
} }
} }
......
...@@ -156,8 +156,7 @@ bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame, ...@@ -156,8 +156,7 @@ bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame,
BytecodeArray bytecode = frame->GetBytecodeArray(); BytecodeArray bytecode = frame->GetBytecodeArray();
const int bytecode_offset = frame->GetBytecodeOffset(); const int bytecode_offset = frame->GetBytecodeOffset();
if (V8_UNLIKELY(function.shared().osr_code_cache_state() != kNotCached)) { if (V8_UNLIKELY(function.shared().osr_code_cache_state() != kNotCached)) {
OSROptimizedCodeCache cache = OSROptimizedCodeCache cache = function.native_context().osr_code_cache();
function.native_context().GetOSROptimizedCodeCache();
interpreter::BytecodeArrayIterator iterator( interpreter::BytecodeArrayIterator iterator(
handle(bytecode, frame->isolate())); handle(bytecode, frame->isolate()));
for (int jump_offset : cache.GetBytecodeOffsetsFromSFI(function.shared())) { for (int jump_offset : cache.GetBytecodeOffsetsFromSFI(function.shared())) {
......
...@@ -1184,7 +1184,7 @@ Handle<NativeContext> Factory::NewNativeContext() { ...@@ -1184,7 +1184,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
context.set_math_random_index(Smi::zero()); context.set_math_random_index(Smi::zero());
context.set_serialized_objects(*empty_fixed_array()); context.set_serialized_objects(*empty_fixed_array());
context.set_microtask_queue(isolate(), nullptr); context.set_microtask_queue(isolate(), nullptr);
context.set_osr_code_cache(*empty_weak_fixed_array()); context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate()));
context.set_retained_maps(*empty_weak_array_list()); context.set_retained_maps(*empty_weak_array_list());
return handle(context, isolate()); return handle(context, isolate());
} }
......
...@@ -296,10 +296,6 @@ ScriptContextTable NativeContext::synchronized_script_context_table() const { ...@@ -296,10 +296,6 @@ ScriptContextTable NativeContext::synchronized_script_context_table() const {
get(SCRIPT_CONTEXT_TABLE_INDEX, kAcquireLoad)); get(SCRIPT_CONTEXT_TABLE_INDEX, kAcquireLoad));
} }
OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() {
return OSROptimizedCodeCache::cast(osr_code_cache());
}
void NativeContext::SetOptimizedCodeListHead(Object head) { void NativeContext::SetOptimizedCodeListHead(Object head) {
set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore); set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
} }
......
...@@ -369,7 +369,7 @@ enum ContextLookupFlags { ...@@ -369,7 +369,7 @@ enum ContextLookupFlags {
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \ V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
V(WRAPPED_FUNCTION_MAP_INDEX, Map, wrapped_function_map) \ V(WRAPPED_FUNCTION_MAP_INDEX, Map, wrapped_function_map) \
V(RETAINED_MAPS, Object, retained_maps) \ V(RETAINED_MAPS, Object, retained_maps) \
V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache) V(OSR_CODE_CACHE_INDEX, OSROptimizedCodeCache, osr_code_cache)
#include "torque-generated/src/objects/contexts-tq.inc" #include "torque-generated/src/objects/contexts-tq.inc"
...@@ -777,8 +777,6 @@ class NativeContext : public Context { ...@@ -777,8 +777,6 @@ class NativeContext : public Context {
inline void SetDeoptimizedCodeListHead(Object head); inline void SetDeoptimizedCodeListHead(Object head);
inline Object DeoptimizedCodeListHead(); inline Object DeoptimizedCodeListHead();
inline OSROptimizedCodeCache GetOSROptimizedCodeCache();
void ResetErrorsThrown(); void ResetErrorsThrown();
void IncrementErrorsThrown(); void IncrementErrorsThrown();
int GetErrorsThrown(); int GetErrorsThrown();
......
...@@ -12,22 +12,28 @@ ...@@ -12,22 +12,28 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
const int OSROptimizedCodeCache::kInitialLength; // static
const int OSROptimizedCodeCache::kMaxLength; Handle<OSROptimizedCodeCache> OSROptimizedCodeCache::Empty(Isolate* isolate) {
return Handle<OSROptimizedCodeCache>::cast(
isolate->factory()->empty_weak_fixed_array());
}
void OSROptimizedCodeCache::AddOptimizedCode( // static
Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared, void OSROptimizedCodeCache::Insert(Isolate* isolate,
Handle<CodeT> code, BytecodeOffset osr_offset) { Handle<NativeContext> native_context,
Handle<SharedFunctionInfo> shared,
Handle<CodeT> code,
BytecodeOffset osr_offset) {
DCHECK(!osr_offset.IsNone()); DCHECK(!osr_offset.IsNone());
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
STATIC_ASSERT(kEntryLength == 3);
Isolate* isolate = native_context->GetIsolate();
DCHECK(!isolate->serializer_enabled()); DCHECK(!isolate->serializer_enabled());
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
Handle<OSROptimizedCodeCache> osr_cache( Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
native_context->GetOSROptimizedCodeCache(), isolate); isolate);
DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1); DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1);
STATIC_ASSERT(kEntryLength == 3);
int entry = -1; int entry = -1;
for (int index = 0; index < osr_cache->length(); index += kEntryLength) { for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
if (osr_cache->Get(index + kSharedOffset)->IsCleared() || if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
...@@ -37,28 +43,31 @@ void OSROptimizedCodeCache::AddOptimizedCode( ...@@ -37,28 +43,31 @@ void OSROptimizedCodeCache::AddOptimizedCode(
} }
} }
if (entry == -1 && osr_cache->length() + kEntryLength <= kMaxLength) { if (entry == -1) {
entry = GrowOSRCache(native_context, &osr_cache); if (osr_cache->length() + kEntryLength <= kMaxLength) {
} else if (entry == -1) { entry = GrowOSRCache(isolate, native_context, &osr_cache);
// We reached max capacity and cannot grow further. Reuse an existing entry. } else {
// TODO(mythria): We could use better mechanisms (like lru) to replace // We reached max capacity and cannot grow further. Reuse an existing
// existing entries. Though we don't expect this to be a common case, so // entry.
// for now choosing to replace the first entry. // TODO(mythria): We could use better mechanisms (like lru) to replace
entry = 0; // existing entries. Though we don't expect this to be a common case, so
// for now choosing to replace the first entry.
entry = 0;
}
} }
osr_cache->InitializeEntry(entry, *shared, *code, osr_offset); osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
} }
void OSROptimizedCodeCache::Clear(NativeContext native_context) { void OSROptimizedCodeCache::Clear(Isolate* isolate,
native_context.set_osr_code_cache( NativeContext native_context) {
*native_context.GetIsolate()->factory()->empty_weak_fixed_array()); native_context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate));
} }
void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) { void OSROptimizedCodeCache::Compact(Isolate* isolate,
Handle<OSROptimizedCodeCache> osr_cache( Handle<NativeContext> native_context) {
native_context->GetOSROptimizedCodeCache(), native_context->GetIsolate()); Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
Isolate* isolate = native_context->GetIsolate(); isolate);
// Re-adjust the cache so all the valid entries are on one side. This will // Re-adjust the cache so all the valid entries are on one side. This will
// enable us to compress the cache if needed. // enable us to compress the cache if needed.
...@@ -83,29 +92,31 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) { ...@@ -83,29 +92,31 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
DCHECK_LT(new_osr_cache->length(), osr_cache->length()); DCHECK_LT(new_osr_cache->length(), osr_cache->length());
{ {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
new_osr_cache->CopyElements(native_context->GetIsolate(), 0, *osr_cache, 0, new_osr_cache->CopyElements(isolate, 0, *osr_cache, 0,
new_osr_cache->length(), new_osr_cache->length(),
new_osr_cache->GetWriteBarrierMode(no_gc)); new_osr_cache->GetWriteBarrierMode(no_gc));
} }
native_context->set_osr_code_cache(*new_osr_cache); native_context->set_osr_code_cache(*new_osr_cache);
} }
CodeT OSROptimizedCodeCache::GetOptimizedCode(SharedFunctionInfo shared, CodeT OSROptimizedCodeCache::TryGet(SharedFunctionInfo shared,
BytecodeOffset osr_offset, BytecodeOffset osr_offset,
Isolate* isolate) { Isolate* isolate) {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
int index = FindEntry(shared, osr_offset); int index = FindEntry(shared, osr_offset);
if (index == -1) return CodeT(); if (index == -1) return {};
CodeT code = GetCodeFromEntry(index); CodeT code = GetCodeFromEntry(index);
if (code.is_null()) { if (code.is_null()) {
ClearEntry(index, isolate); ClearEntry(index, isolate);
return CodeT(); return {};
} }
DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization()); DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
return code; return code;
} }
void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) { void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) {
// This is called from DeoptimizeMarkedCodeForContext that uses raw pointers // This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
// and hence the DisallowGarbageCollection scope here. // and hence the DisallowGarbageCollection scope here.
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
...@@ -135,9 +146,8 @@ std::vector<int> OSROptimizedCodeCache::GetBytecodeOffsetsFromSFI( ...@@ -135,9 +146,8 @@ std::vector<int> OSROptimizedCodeCache::GetBytecodeOffsetsFromSFI(
} }
int OSROptimizedCodeCache::GrowOSRCache( int OSROptimizedCodeCache::GrowOSRCache(
Handle<NativeContext> native_context, Isolate* isolate, Handle<NativeContext> native_context,
Handle<OSROptimizedCodeCache>* osr_cache) { Handle<OSROptimizedCodeCache>* osr_cache) {
Isolate* isolate = native_context->GetIsolate();
int old_length = (*osr_cache)->length(); int old_length = (*osr_cache)->length();
int grow_by = CapacityForLength(old_length) - old_length; int grow_by = CapacityForLength(old_length) - old_length;
DCHECK_GT(grow_by, kEntryLength); DCHECK_GT(grow_by, kEntryLength);
...@@ -256,5 +266,13 @@ bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries, ...@@ -256,5 +266,13 @@ bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
return curr_length > kInitialLength && curr_length > num_valid_entries * 3; return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
} }
MaybeObject OSROptimizedCodeCache::RawGetForTesting(int index) const {
return WeakFixedArray::Get(index);
}
void OSROptimizedCodeCache::RawSetForTesting(int index, MaybeObject value) {
WeakFixedArray::Set(index, value);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_ #define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
#include "src/objects/fixed-array.h" #include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards): // Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h" #include "src/objects/object-macros.h"
...@@ -21,51 +22,79 @@ enum OSRCodeCacheStateOfSFI : uint8_t { ...@@ -21,51 +22,79 @@ enum OSRCodeCacheStateOfSFI : uint8_t {
kCachedMultiple, // Very unlikely state, multiple entries. kCachedMultiple, // Very unlikely state, multiple entries.
}; };
// TODO(jgruber): There are a few issues with the current implementation:
//
// - The cache is a flat list, thus any search operation is O(N). This resulted
// in optimization attempts, see OSRCodeCacheStateOfSFI.
// - We always iterate up to `length` (== capacity).
// - We essentially reimplement WeakArrayList, i.e. growth and shrink logic.
// - On overflow, new entries always pick slot 0.
//
// There are a few alternatives:
//
// 1) we could reuse WeakArrayList logic (but then we'd still have to
// implement custom compaction due to our entry tuple structure).
// 2) we could reuse CompilationCacheTable (but then we lose weakness and have
// to deal with aging).
// 3) we could try to base on a weak HashTable variant (EphemeronHashTable?).
class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray { class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
public: public:
DECL_CAST(OSROptimizedCodeCache) DECL_CAST(OSROptimizedCodeCache)
enum OSRCodeCacheConstants { static Handle<OSROptimizedCodeCache> Empty(Isolate* isolate);
kSharedOffset,
kCachedCodeOffset,
kOsrIdOffset,
kEntryLength
};
static const int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
static const int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
// Caches the optimized code |code| corresponding to the shared function // Caches the optimized code |code| corresponding to the shared function
// |shared| and bailout id |osr_offset| in the OSROptimized code cache. // |shared| and bailout id |osr_offset| in the OSROptimized code cache.
// If the OSR code cache wasn't created before it creates a code cache with // If the OSR code cache wasn't created before it creates a code cache with
// kOSRCodeCacheInitialLength entries. // kOSRCodeCacheInitialLength entries.
static void AddOptimizedCode(Handle<NativeContext> context, static void Insert(Isolate* isolate, Handle<NativeContext> context,
Handle<SharedFunctionInfo> shared, Handle<SharedFunctionInfo> shared, Handle<CodeT> code,
Handle<CodeT> code, BytecodeOffset osr_offset); BytecodeOffset osr_offset);
// Reduces the size of the OSR code cache if the number of valid entries are
// less than the current capacity of the cache.
static void Compact(Handle<NativeContext> context);
// Sets the OSR optimized code cache to an empty array.
static void Clear(NativeContext context);
// Returns the code corresponding to the shared function |shared| and // Returns the code corresponding to the shared function |shared| and
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty // BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
// object otherwise. // object otherwise.
CodeT GetOptimizedCode(SharedFunctionInfo shared, BytecodeOffset osr_offset, CodeT TryGet(SharedFunctionInfo shared, BytecodeOffset osr_offset,
Isolate* isolate); Isolate* isolate);
// Remove all code objects marked for deoptimization from OSR code cache. // Remove all code objects marked for deoptimization from OSR code cache.
void EvictMarkedCode(Isolate* isolate); void EvictDeoptimizedCode(Isolate* isolate);
// Reduces the size of the OSR code cache if the number of valid entries are
// less than the current capacity of the cache.
static void Compact(Isolate* isolate, Handle<NativeContext> context);
// Sets the OSR optimized code cache to an empty array.
static void Clear(Isolate* isolate, NativeContext context);
// Returns vector of bytecode offsets corresponding to the shared function // Returns vector of bytecode offsets corresponding to the shared function
// |shared| // |shared|
std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared); std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared);
enum OSRCodeCacheConstants {
kSharedOffset,
kCachedCodeOffset,
kOsrIdOffset,
kEntryLength
};
static constexpr int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
static constexpr int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
// For osr-code-cache-unittest.cc.
MaybeObject RawGetForTesting(int index) const;
void RawSetForTesting(int index, MaybeObject value);
private: private:
// Hide raw accessors to avoid terminology confusion.
using WeakFixedArray::Get;
using WeakFixedArray::Set;
// Functions that implement heuristics on when to grow / shrink the cache. // Functions that implement heuristics on when to grow / shrink the cache.
static int CapacityForLength(int curr_capacity); static int CapacityForLength(int curr_capacity);
static bool NeedsTrimming(int num_valid_entries, int curr_capacity); static bool NeedsTrimming(int num_valid_entries, int curr_capacity);
static int GrowOSRCache(Handle<NativeContext> native_context, static int GrowOSRCache(Isolate* isolate,
Handle<NativeContext> native_context,
Handle<OSROptimizedCodeCache>* osr_cache); Handle<OSROptimizedCodeCache>* osr_cache);
// Helper functions to get individual items from an entry in the cache. // Helper functions to get individual items from an entry in the cache.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment