Commit 9845f01a authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[compiler] Remove OSR code cache

There are very few cases where OSR code can be re-used, and where the
function won't be non-concurrently optimized after OSR has happened.
Maintaining the OSR code cache is unnecessary complexity, and caching
OSR prevents us from e.g. seeding the optimizer with the actual OSR
values.

So, this patch removes it.

Change-Id: Ib9223de590f35ffc1dc2ab593b7cc9fe97dde4a6
Reviewed-on: https://chromium-review.googlesource.com/552637
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46306}
parent aec28626
......@@ -1239,8 +1239,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Isolate* isolate = global_object->GetIsolate();
Factory* factory = isolate->factory();
native_context()->set_osr_code_table(*factory->empty_fixed_array());
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
native_context()->set_script_context_table(*script_context_table);
......
......@@ -646,23 +646,20 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
&RuntimeCallStats::CompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
Code* code = nullptr;
if (osr_ast_id.IsNone()) {
if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
FeedbackVector* feedback_vector = function->feedback_vector();
feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "GetCodeFromOptimizedCodeCache");
code = feedback_vector->optimized_code();
Code* code = feedback_vector->optimized_code();
if (code != nullptr) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
return Handle<Code>(code);
}
}
} else {
code = function->context()->native_context()->SearchOSROptimizedCodeCache(
function->shared(), osr_ast_id);
}
if (code != nullptr) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
return Handle<Code>(code);
}
return MaybeHandle<Code>();
}
......@@ -699,9 +696,6 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* info) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
FeedbackVector::SetOptimizedCode(vector, code);
} else {
Context::AddToOSROptimizedCodeCache(native_context, shared, code,
info->osr_ast_id());
}
}
......
......@@ -133,10 +133,6 @@ bool Context::IsScriptContext() {
return map == map->GetHeap()->script_context_map();
}
bool Context::OSROptimizedCodeCacheIsCleared() {
return osr_code_table() == GetHeap()->empty_fixed_array();
}
bool Context::HasSameSecurityTokenAs(Context* that) {
return this->native_context()->security_token() ==
that->native_context()->security_token();
......
......@@ -434,167 +434,6 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
return Handle<Object>::null();
}
static const int kSharedOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kOsrAstIdOffset = 2;
static const int kEntryLength = 3;
static const int kInitialLength = kEntryLength;
int Context::SearchOSROptimizedCodeCacheEntry(SharedFunctionInfo* shared,
BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(this->IsNativeContext());
DCHECK(!osr_ast_id.IsNone());
if (!OSROptimizedCodeCacheIsCleared()) {
FixedArray* osr_code_table = this->osr_code_table();
int length = osr_code_table->length();
Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = 0; i < length; i += kEntryLength) {
if (WeakCell::cast(osr_code_table->get(i + kSharedOffset))->value() ==
shared &&
osr_code_table->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
return i;
}
}
}
return -1;
}
Code* Context::SearchOSROptimizedCodeCache(SharedFunctionInfo* shared,
BailoutId osr_ast_id) {
DCHECK(this->IsNativeContext());
int entry = SearchOSROptimizedCodeCacheEntry(shared, osr_ast_id);
if (entry != -1) {
FixedArray* code_map = osr_code_table();
DCHECK_LE(entry + kEntryLength, code_map->length());
WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
return cell->cleared() ? nullptr : Code::cast(cell->value());
}
return nullptr;
}
void Context::AddToOSROptimizedCodeCache(Handle<Context> native_context,
Handle<SharedFunctionInfo> shared,
Handle<Code> code,
BailoutId osr_ast_id) {
DCHECK(native_context->IsNativeContext());
DCHECK(!osr_ast_id.IsNone());
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
Isolate* isolate = native_context->GetIsolate();
if (isolate->serializer_enabled()) return;
STATIC_ASSERT(kEntryLength == 3);
Handle<FixedArray> new_code_map;
int entry;
if (native_context->OSROptimizedCodeCacheIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
entry = 0;
} else {
Handle<FixedArray> old_code_map(native_context->osr_code_table(), isolate);
entry =
native_context->SearchOSROptimizedCodeCacheEntry(*shared, osr_ast_id);
if (entry >= 0) {
// Just set the code of the entry.
Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
old_code_map->set(entry + kCachedCodeOffset, *code_cell);
return;
}
// Can we reuse an entry?
DCHECK(entry < 0);
int length = old_code_map->length();
for (int i = 0; i < length; i += kEntryLength) {
if (WeakCell::cast(old_code_map->get(i + kSharedOffset))->cleared()) {
new_code_map = old_code_map;
entry = i;
break;
}
}
if (entry < 0) {
// Copy old optimized code map and append one new entry.
new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
old_code_map, kEntryLength, TENURED);
entry = old_code_map->length();
}
}
Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
Handle<WeakCell> shared_cell = isolate->factory()->NewWeakCell(shared);
new_code_map->set(entry + kSharedOffset, *shared_cell);
new_code_map->set(entry + kCachedCodeOffset, *code_cell);
new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
WeakCell* cell = WeakCell::cast(new_code_map->get(i + kSharedOffset));
DCHECK(cell->cleared() || cell->value()->IsSharedFunctionInfo());
cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
DCHECK(cell->cleared() ||
(cell->value()->IsCode() &&
Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
FixedArray* old_code_map = native_context->osr_code_table();
if (old_code_map != *new_code_map) {
native_context->set_osr_code_table(*new_code_map);
}
}
void Context::EvictFromOSROptimizedCodeCache(Code* optimized_code,
const char* reason) {
DCHECK(IsNativeContext());
DisallowHeapAllocation no_gc;
if (OSROptimizedCodeCacheIsCleared()) return;
Heap* heap = GetHeap();
FixedArray* code_map = osr_code_table();
int dst = 0;
int length = code_map->length();
for (int src = 0; src < length; src += kEntryLength) {
if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
optimized_code) {
BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (FLAG_trace_opt) {
PrintF(
"[evicting entry from native context optimizing code map (%s) for ",
reason);
ShortPrint();
DCHECK(!osr.IsNone());
PrintF(" (osr ast id %d)]\n", osr.ToInt());
}
// Evict the src entry by not copying it to the dst entry.
continue;
}
// Keep the src entry by copying it to the dst entry.
if (dst != src) {
code_map->set(dst + kSharedOffset, code_map->get(src + kSharedOffset));
code_map->set(dst + kCachedCodeOffset,
code_map->get(src + kCachedCodeOffset));
code_map->set(dst + kOsrAstIdOffset,
code_map->get(src + kOsrAstIdOffset));
}
dst += kEntryLength;
}
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
heap->RightTrimFixedArray(code_map, length - dst);
if (code_map->length() == 0) {
ClearOSROptimizedCodeCache();
}
}
}
void Context::ClearOSROptimizedCodeCache() {
DCHECK(IsNativeContext());
FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
set_osr_code_table(empty_fixed_array);
}
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
#ifdef ENABLE_SLOW_DCHECKS
......
......@@ -300,7 +300,6 @@ enum ContextLookupFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(OSR_CODE_TABLE_INDEX, FixedArray, osr_code_table) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
......@@ -598,26 +597,6 @@ class Context: public FixedArray {
inline bool HasSameSecurityTokenAs(Context* that);
// Removes a specific optimized code object from the optimized code map.
// In case of non-OSR the code reference is cleared from the cache entry but
// the entry itself is left in the map in order to proceed sharing literals.
void EvictFromOSROptimizedCodeCache(Code* optimized_code, const char* reason);
// Clear optimized code map.
void ClearOSROptimizedCodeCache();
// A native context keeps track of all osrd optimized functions.
inline bool OSROptimizedCodeCacheIsCleared();
Code* SearchOSROptimizedCodeCache(SharedFunctionInfo* shared,
BailoutId osr_ast_id);
int SearchOSROptimizedCodeCacheEntry(SharedFunctionInfo* shared,
BailoutId osr_ast_id);
static void AddToOSROptimizedCodeCache(Handle<Context> native_context,
Handle<SharedFunctionInfo> shared,
Handle<Code> code,
BailoutId osr_ast_id);
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
......
......@@ -1270,9 +1270,6 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
OptimizingCompileDispatcher::BlockingBehavior::kBlock);
}
// The native context has a list of OSR'd optimized code. Clear it.
isolate_->ClearOSROptimizedCode();
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
GarbageCollectionReason::kDebugger);
......
......@@ -351,13 +351,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
#endif
// It is finally time to die, code object.
// Remove the code from the osr optimized code cache.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(codes[i]->deoptimization_data());
if (deopt_data->OsrAstId()->value() != BailoutId::None().ToInt()) {
isolate->EvictOSROptimizedCode(codes[i], "deoptimized code");
}
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
......
......@@ -3029,26 +3029,6 @@ bool Isolate::IsArrayOrObjectPrototype(Object* object) {
return false;
}
void Isolate::ClearOSROptimizedCode() {
DisallowHeapAllocation no_gc;
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
Context* current_context = Context::cast(context);
current_context->ClearOSROptimizedCodeCache();
context = current_context->next_context_link();
}
}
void Isolate::EvictOSROptimizedCode(Code* code, const char* reason) {
DisallowHeapAllocation no_gc;
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
Context* current_context = Context::cast(context);
current_context->EvictFromOSROptimizedCodeCache(code, reason);
context = current_context->next_context_link();
}
}
bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
DisallowHeapAllocation no_gc;
Object* context = heap()->native_contexts_list();
......
......@@ -1208,12 +1208,6 @@ class Isolate {
return compiler_dispatcher_;
}
// Clear all optimized code stored in native contexts.
void ClearOSROptimizedCode();
// Ensure that a particular optimized code is evicted.
void EvictOSROptimizedCode(Code* code, const char* reason);
bool IsInAnyContext(Object* object, uint32_t index);
void SetHostImportModuleDynamicallyCallback(
......
......@@ -214,12 +214,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
if (function->feedback_vector()->optimized_code() == *optimized_code) {
function->ClearOptimizedCodeSlot("notify deoptimized");
}
// Remove the code from the osr optimized code cache.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(optimized_code->deoptimization_data());
if (deopt_data->OsrAstId()->value() == BailoutId::None().ToInt()) {
isolate->EvictOSROptimizedCode(*optimized_code, "notify deoptimized");
}
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
// unconditionally if the code is not already marked for deoptimization.
......
......@@ -3745,57 +3745,6 @@ TEST(Regress169928) {
}
#ifdef DEBUG
TEST(Regress513507) {
FLAG_allow_natives_syntax = true;
FLAG_gc_global = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
LocalContext env;
Heap* heap = isolate->heap();
HandleScope scope(isolate);
// Prepare function whose optimized code map we can use.
Handle<SharedFunctionInfo> shared;
{
HandleScope inner_scope(isolate);
CompileRun("function f() { return 1 }"
"f(); %OptimizeFunctionOnNextCall(f); f();");
Handle<JSFunction> f = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(env.local(), v8_str("f")).ToLocalChecked())));
shared = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
CompileRun("f = null");
}
// Prepare optimized code that we can use.
Handle<Code> code;
{
HandleScope inner_scope(isolate);
CompileRun("function g() { return 2 }"
"g(); %OptimizeFunctionOnNextCall(g); g();");
Handle<JSFunction> g = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(env.local(), v8_str("g")).ToLocalChecked())));
code = inner_scope.CloseAndEscape(handle(g->code(), isolate));
if (!code->is_optimized_code()) return;
}
Handle<Context> context(isolate->context());
// Add the new code several times to the optimized code map and also set an
// allocation timeout so that expanding the code map will trigger a GC.
heap->set_allocation_timeout(5);
FLAG_gc_interval = 1000;
for (int i = 0; i < 10; ++i) {
BailoutId id = BailoutId(i + 1);
Context::AddToOSROptimizedCodeCache(context, shared, code, id);
}
}
#endif // DEBUG
TEST(Regress513496) {
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment