Commit e56fe846 authored by mvstanton's avatar mvstanton Committed by Commit bot

Use WeakCells in the optimized code map rather than traversing in pause.

It's expensive to walk all shared function infos during the gc atomic
pause. Instead, use WeakCells to implement this structure without
manual clearing.

Reland due to a bug when reusing entries in the optimized code map.

BUG=

Review URL: https://codereview.chromium.org/1508703002

Cr-Commit-Position: refs/heads/master@{#32696}
parent 5dffa353
......@@ -1870,19 +1870,29 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
HValue* context_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
context_slot = Add<HLoadNamedField>(context_slot, nullptr,
HObjectAccess::ForWeakCellValue());
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
HValue* code_object = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
code_object = Add<HLoadNamedField>(code_object, nullptr,
HObjectAccess::ForWeakCellValue());
builder->If<HCompareObjectEqAndBranch>(native_context,
context_slot);
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
builder->And();
builder->IfNot<HCompareObjectEqAndBranch>(code_object,
graph()->GetConstantUndefined());
graph()->GetConstant0());
builder->Then();
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
literals = Add<HLoadNamedField>(literals, nullptr,
HObjectAccess::ForWeakCellValue());
IfBuilder maybe_deopt(this);
maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
maybe_deopt.End();
BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
......@@ -2006,8 +2016,10 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* shared_code =
Add<HLoadNamedField>(optimized_map, nullptr,
HObjectAccess::ForOptimizedCodeMapSharedCode());
shared_code = Add<HLoadNamedField>(shared_code, nullptr,
HObjectAccess::ForWeakCellValue());
shared_code_check.IfNot<HCompareObjectEqAndBranch>(
shared_code, graph()->GetConstantUndefined());
shared_code, graph()->GetConstant0());
shared_code_check.Then();
{
// Store the context-independent optimized code.
......
......@@ -229,6 +229,7 @@ enum BindingFlags {
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
......
......@@ -335,6 +335,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
"Insufficient type feedback for RHS of binary operation") \
V(kKeyIsNegative, "key is negative") \
V(kLiteralsWereDisposed, "literals have been disposed") \
V(kLostPrecision, "lost precision") \
V(kLostPrecisionOrNaN, "lost precision or NaN") \
V(kMementoFound, "memento found") \
......
......@@ -728,6 +728,8 @@ Handle<Context> Factory::NewNativeContext() {
context->set_native_context(*context);
context->set_js_array_maps(*undefined_value());
context->set_errors_thrown(Smi::FromInt(0));
Handle<WeakCell> weak_cell = NewWeakCell(context);
context->set_self_weak_cell(*weak_cell);
DCHECK(context->IsNativeContext());
return context;
}
......
......@@ -520,7 +520,6 @@ void GCTracer::PrintNVP() const {
"mark_weakrefs=%.1f "
"mark_globalhandles=%.1f "
"mark_codeflush=%.1f "
"mark_optimizedcodemaps=%.1f "
"store_buffer_clear=%.1f "
"slots_buffer_clear=%.1f "
"sweep=%.2f "
......@@ -592,7 +591,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_WEAK_REFERENCES],
current_.scopes[Scope::MC_MARK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_MARK_CODE_FLUSH],
current_.scopes[Scope::MC_MARK_OPTIMIZED_CODE_MAPS],
current_.scopes[Scope::MC_STORE_BUFFER_CLEAR],
current_.scopes[Scope::MC_SLOTS_BUFFER_CLEAR],
current_.scopes[Scope::MC_SWEEP],
......
......@@ -109,7 +109,6 @@ class GCTracer {
MC_MARK_WEAK_REFERENCES,
MC_MARK_GLOBAL_HANDLES,
MC_MARK_CODE_FLUSH,
MC_MARK_OPTIMIZED_CODE_MAPS,
MC_STORE_BUFFER_CLEAR,
MC_SLOTS_BUFFER_CLEAR,
MC_SWEEP,
......
......@@ -2777,8 +2777,14 @@ void Heap::CreateInitialObjects() {
}
{
Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
set_empty_weak_cell(*cell);
cell->clear();
Handle<FixedArray> cleared_optimized_code_map =
factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
*cell);
STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
SharedFunctionInfo::kSharedCodeIndex == 0);
set_cleared_optimized_code_map(*cleared_optimized_code_map);
......
......@@ -189,6 +189,7 @@ namespace internal {
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, interpreter_table, InterpreterTable) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
......@@ -449,6 +450,7 @@ namespace internal {
V(JSMessageObjectMap) \
V(ForeignMap) \
V(NeanderMap) \
V(EmptyWeakCell) \
V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
......
......@@ -2163,13 +2163,6 @@ void MarkCompactCollector::AfterMarking() {
code_flusher_->ProcessCandidates();
}
// Process and clear all optimized code maps.
if (!FLAG_flush_optimized_code_cache) {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_MARK_OPTIMIZED_CODE_MAPS);
ProcessAndClearOptimizedCodeMaps();
}
if (FLAG_track_gc_object_stats) {
if (FLAG_trace_gc_object_stats) {
heap()->object_stats_->TraceObjectStats();
......@@ -2179,72 +2172,6 @@ void MarkCompactCollector::AfterMarking() {
}
void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() {
SharedFunctionInfo::Iterator iterator(isolate());
while (SharedFunctionInfo* shared = iterator.Next()) {
if (shared->OptimizedCodeMapIsCleared()) continue;
// Process context-dependent entries in the optimized code map.
FixedArray* code_map = shared->optimized_code_map();
int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
i += SharedFunctionInfo::kEntryLength) {
// Each entry contains [ context, code, literals, ast-id ] as fields.
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
Context* context =
Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
HeapObject* code = HeapObject::cast(
code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
FixedArray* literals = FixedArray::cast(
code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
Smi* ast_id =
Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
// Move every slot in the entry and record slots when needed.
code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
Object** code_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kCachedCodeOffset);
RecordSlot(code_map, code_slot, *code_slot);
Object** context_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kContextOffset);
RecordSlot(code_map, context_slot, *context_slot);
Object** literals_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kLiteralsOffset);
RecordSlot(code_map, literals_slot, *literals_slot);
new_length += SharedFunctionInfo::kEntryLength;
}
// Process context-independent entry in the optimized code map.
Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
if (shared_object->IsCode()) {
Code* shared_code = Code::cast(shared_object);
if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
} else {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
Object** slot =
code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
RecordSlot(code_map, slot, *slot);
}
}
// Trim the optimized code map if entries have been removed.
if (new_length < old_length) {
shared->TrimOptimizedCodeMap(old_length - new_length);
}
}
}
void MarkCompactCollector::ClearNonLiveReferences() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
......
......@@ -685,11 +685,6 @@ class MarkCompactCollector {
void AbortTransitionArrays();
// After all reachable objects have been marked, those entries within
// optimized code maps that became unreachable are removed, potentially
// trimming or clearing out the entire optimized code map.
void ProcessAndClearOptimizedCodeMaps();
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
// a non-compacting collection.
......
......@@ -454,14 +454,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
// Always flush the optimized code map if requested by flag.
shared->ClearOptimizedCodeMap();
}
} else {
if (!shared->OptimizedCodeMapIsCleared()) {
// Treat some references within the code map weakly by marking the
// code map itself but not pushing it onto the marking deque. The
// map will be processed after marking.
FixedArray* code_map = shared->optimized_code_map();
MarkOptimizedCodeMap(heap, code_map);
}
}
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
......@@ -578,23 +570,6 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkOptimizedCodeMap(
Heap* heap, FixedArray* code_map) {
if (!StaticVisitor::MarkObjectWithoutPush(heap, code_map)) return;
// Mark the context-independent entry in the optimized code map. Depending on
// the age of the code object, we treat it as a strong or a weak reference.
Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
if (FLAG_turbo_preserve_shared_code && shared_object->IsCode() &&
FLAG_age_code && !Code::cast(shared_object)->IsOld()) {
StaticVisitor::VisitPointer(
heap, code_map,
code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex));
}
}
inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
Object* undefined = heap->undefined_value();
return (info->script() != undefined) &&
......
......@@ -375,10 +375,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Mark pointers in a Map treating some elements of the descriptor array weak.
static void MarkMapContents(Heap* heap, Map* map);
// Mark pointers in the optimized code map that should act as strong
// references, possibly treating some entries weak.
static void MarkOptimizedCodeMap(Heap* heap, FixedArray* code_map);
// Code flushing support.
INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
......
......@@ -2029,7 +2029,10 @@ Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
void WeakCell::clear() {
DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT);
// Either the garbage collector is clearing the cell or we are simply
// initializing the root empty weak cell.
DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
this == GetHeap()->empty_weak_cell());
WRITE_FIELD(this, kValueOffset, Smi::FromInt(0));
}
......
This diff is collapsed.
......@@ -4492,6 +4492,115 @@ TEST(Regress514122) {
}
TEST(OptimizedCodeMapReuseEntries) {
i::FLAG_flush_optimized_code_cache = false;
i::FLAG_allow_natives_syntax = true;
// BUG(v8:4598): Since TurboFan doesn't treat maps in code weakly, we can't
// run this test.
if (i::FLAG_turbo) return;
CcTest::InitializeVM();
v8::Isolate* v8_isolate = CcTest::isolate();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
// Create 3 contexts, allow the 2nd one to be disposed, and verify that
// a 4th context will re-use the weak slots in the optimized code map
// to hold data, rather than expanding the map.
v8::Local<v8::Context> c1 = v8::Context::New(v8_isolate);
const char* source = "function foo(x) { var l = [1]; return x+l[0]; }";
v8::ScriptCompiler::Source script_source(
v8::String::NewFromUtf8(v8_isolate, source, v8::NewStringType::kNormal)
.ToLocalChecked());
v8::Local<v8::UnboundScript> indep =
v8::ScriptCompiler::CompileUnboundScript(v8_isolate, &script_source)
.ToLocalChecked();
const char* toplevel = "foo(3); %OptimizeFunctionOnNextCall(foo); foo(3);";
// Perfrom one initial GC to enable code flushing.
heap->CollectAllGarbage();
c1->Enter();
indep->BindToCurrentContext()->Run(c1).ToLocalChecked();
CompileRun(toplevel);
Handle<SharedFunctionInfo> shared;
Handle<JSFunction> foo = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(c1, v8_str("foo")).ToLocalChecked())));
CHECK(foo->shared()->is_compiled());
shared = handle(foo->shared());
c1->Exit();
{
HandleScope scope(isolate);
v8::Local<v8::Context> c2 = v8::Context::New(v8_isolate);
c2->Enter();
indep->BindToCurrentContext()->Run(c2).ToLocalChecked();
CompileRun(toplevel);
c2->Exit();
}
{
HandleScope scope(isolate);
v8::Local<v8::Context> c3 = v8::Context::New(v8_isolate);
c3->Enter();
indep->BindToCurrentContext()->Run(c3).ToLocalChecked();
CompileRun(toplevel);
c3->Exit();
// Now, collect garbage. Context c2 should have no roots to it, and it's
// entry in the optimized code map should be free for a new context.
for (int i = 0; i < 4; i++) {
heap->CollectAllGarbage();
}
Handle<FixedArray> optimized_code_map =
handle(shared->optimized_code_map());
// There should be 3 entries in the map.
CHECK_EQ(
3, ((optimized_code_map->length() - SharedFunctionInfo::kEntriesStart) /
SharedFunctionInfo::kEntryLength));
// But one of them (formerly for c2) should be cleared.
int cleared_count = 0;
for (int i = SharedFunctionInfo::kEntriesStart;
i < optimized_code_map->length();
i += SharedFunctionInfo::kEntryLength) {
cleared_count +=
WeakCell::cast(
optimized_code_map->get(i + SharedFunctionInfo::kContextOffset))
->cleared()
? 1
: 0;
}
CHECK_EQ(1, cleared_count);
// Verify that a new context uses the cleared entry rather than creating a
// new
// optimized code map array.
v8::Local<v8::Context> c4 = v8::Context::New(v8_isolate);
c4->Enter();
indep->BindToCurrentContext()->Run(c4).ToLocalChecked();
CompileRun(toplevel);
c4->Exit();
CHECK_EQ(*optimized_code_map, shared->optimized_code_map());
// Now each entry is in use.
cleared_count = 0;
for (int i = SharedFunctionInfo::kEntriesStart;
i < optimized_code_map->length();
i += SharedFunctionInfo::kEntryLength) {
cleared_count +=
WeakCell::cast(
optimized_code_map->get(i + SharedFunctionInfo::kContextOffset))
->cleared()
? 1
: 0;
}
CHECK_EQ(0, cleared_count);
}
}
TEST(Regress513496) {
i::FLAG_flush_optimized_code_cache = false;
i::FLAG_allow_natives_syntax = true;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment