Commit f5d0df35 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[Heap] Remove code flushing.

Only FullCodegen code ever gets flushed by code flushing. Since we are
deprecating the old pipeline, the added complexity introduced by code
flushing is no longer worth it. This CL removes it (but keeps code aging,
which is used to unlink SFIs from the compilation cache).

BUG=v8:6389,v8:6379,v8:6409

Change-Id: I90de113a101f86dbeaaf0511c61a090ef12aa365
Reviewed-on: https://chromium-review.googlesource.com/507388
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45446}
parent 93769449
......@@ -14,7 +14,6 @@
namespace v8 {
namespace internal {
// The number of generations for each sub cache.
static const int kRegExpGenerations = 2;
......@@ -35,10 +34,8 @@ CompilationCache::CompilationCache(Isolate* isolate)
}
}
CompilationCache::~CompilationCache() {}
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
DCHECK(generation < generations_);
Handle<CompilationCacheTable> result;
......@@ -53,7 +50,6 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
void CompilationSubCache::Age() {
// Don't directly age single-generation caches.
if (generations_ == 1) {
......@@ -72,27 +68,15 @@ void CompilationSubCache::Age() {
tables_[0] = isolate()->heap()->undefined_value();
}
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = isolate()->heap()->undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
}
}
}
void CompilationSubCache::Iterate(RootVisitor* v) {
v->VisitRootPointers(Root::kCompilationCache, &tables_[0],
&tables_[generations_]);
}
void CompilationSubCache::Clear() {
MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
......@@ -134,7 +118,6 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<String>(String::cast(script->name())));
}
// TODO(245): Need to allow identical code from different contexts to
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
......@@ -236,7 +219,6 @@ void CompilationCacheEval::Put(Handle<String> source,
SetFirstTable(table);
}
MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
Handle<String> source,
JSRegExp::Flags flags) {
......@@ -264,7 +246,6 @@ MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
}
}
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
......@@ -273,7 +254,6 @@ void CompilationCacheRegExp::Put(Handle<String> source,
SetFirstTable(CompilationCacheTable::PutRegExp(table, source, flags, data));
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
......@@ -312,7 +292,6 @@ InfoVectorPair CompilationCache::LookupEval(
return result;
}
MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
JSRegExp::Flags flags) {
if (!IsEnabled()) return MaybeHandle<FixedArray>();
......@@ -348,8 +327,6 @@ void CompilationCache::PutEval(Handle<String> source,
}
}
void CompilationCache::PutRegExp(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
......@@ -360,7 +337,6 @@ void CompilationCache::PutRegExp(Handle<String> source,
reg_exp_.Put(source, flags, data);
}
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
......@@ -373,31 +349,20 @@ void CompilationCache::Iterate(RootVisitor* v) {
}
}
void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->IterateFunctions(v);
}
}
void CompilationCache::MarkCompactPrologue() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Age();
}
}
void CompilationCache::Enable() {
enabled_ = true;
}
void CompilationCache::Disable() {
enabled_ = false;
Clear();
}
} // namespace internal
} // namespace v8
......@@ -52,7 +52,6 @@ class CompilationSubCache {
// GC support.
void Iterate(RootVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
......@@ -200,7 +199,6 @@ class CompilationCache {
// GC support.
void Iterate(RootVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
......
......@@ -181,8 +181,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool GeneratePreagedPrologue() const {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code flushing more aggressive. Only apply to Code::FUNCTION,
// since StaticMarkingVisitor::IsFlushable only flushes proper functions.
// will make code old more aggressive. Only apply to Code::FUNCTION,
// since only functions are aged in the compilation cache.
return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
output_code_kind() == Code::FUNCTION;
}
......
......@@ -573,8 +573,8 @@ void Context::ClearOSROptimizedCodeCache() {
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
Isolate* isolate = GetIsolate();
#ifdef ENABLE_SLOW_DCHECKS
Isolate* isolate = GetIsolate();
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined(isolate)) {
......@@ -596,15 +596,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
CHECK(found);
#endif
// If the function link field is already used then the function was
// enqueued as a code flushing candidate and we remove it now.
if (!function->next_function_link()->IsUndefined(isolate)) {
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
flusher->EvictCandidate(function);
}
DCHECK(function->next_function_link()->IsUndefined(isolate));
DCHECK(function->next_function_link()->IsUndefined(GetIsolate()));
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST),
UPDATE_WEAK_WRITE_BARRIER);
set(OPTIMIZED_FUNCTIONS_LIST, function, UPDATE_WEAK_WRITE_BARRIER);
......
......@@ -1710,7 +1710,6 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowHeapAllocation no_gc;
code->set_gc_metadata(Smi::kZero);
code->set_ic_age(isolate()->heap()->global_ic_age());
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
......
......@@ -662,11 +662,9 @@ DEFINE_BOOL(trace_fragmentation_verbose, false,
DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
DEFINE_BOOL(trace_mutator_utilization, false,
"print mutator utilization, allocation speed, gc speed")
DEFINE_BOOL(flush_code, false, "flush code that we expect not to use again")
DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(flush_regexp_code, true,
"flush regexp code that we expect not to use again")
DEFINE_BOOL(age_code, true, "track un-executed functions to age code")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
......@@ -688,7 +686,6 @@ DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_NEG_IMPLICATION(minor_mc, flush_code)
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
"use concurrent store buffer processing")
......
......@@ -464,7 +464,6 @@ void GCTracer::PrintNVP() const {
"old_new=%.2f "
"weak=%.2f "
"roots=%.2f "
"code=%.2f "
"semispace=%.2f "
"steps_count=%d "
"steps_took=%.1f "
......@@ -504,7 +503,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::SCAVENGER_WEAK],
current_.scopes[Scope::SCAVENGER_ROOTS],
current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
current_.scopes[Scope::SCAVENGER_SEMISPACE],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
......@@ -585,7 +583,6 @@ void GCTracer::PrintNVP() const {
"heap.external.epilogue=%.1f "
"heap.external.weak_global_handles=%.1f "
"clear=%1.f "
"clear.code_flush=%.1f "
"clear.dependent_code=%.1f "
"clear.maps=%.1f "
"clear.slots_buffer=%.1f "
......@@ -609,7 +606,6 @@ void GCTracer::PrintNVP() const {
"finish=%.1f "
"mark=%.1f "
"mark.finish_incremental=%.1f "
"mark.prepare_code_flush=%.1f "
"mark.roots=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeral=%.1f "
......@@ -671,7 +667,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_CLEAR],
current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
current_.scopes[Scope::MC_CLEAR_MAPS],
current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
......@@ -694,7 +689,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
current_.scopes[Scope::MC_MARK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
......
......@@ -43,7 +43,6 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
......@@ -67,7 +66,6 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_PREPARE_CODE_FLUSH) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
......@@ -107,7 +105,6 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
......
......@@ -1762,17 +1762,6 @@ void Heap::Scavenge() {
IterateEncounteredWeakCollections(&root_scavenge_visitor);
}
{
// Copy objects reachable from the code flushing candidates list.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
MarkCompactCollector* collector = mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
collector->code_flusher()->VisitListHeads(&root_scavenge_visitor);
collector->code_flusher()
->IteratePointersToFromSpace<StaticScavengeVisitor>();
}
}
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
new_space_front = DoScavenge(new_space_front);
......@@ -3423,7 +3412,6 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
code->set_gc_metadata(Smi::kZero);
code->set_ic_age(global_ic_age_);
return code;
}
......
......@@ -48,88 +48,6 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
}
}
void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
if (GetNextCandidate(shared_info) == nullptr) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
}
void CodeFlusher::AddCandidate(JSFunction* function) {
DCHECK(function->code() == function->shared()->code());
if (function->next_function_link()->IsUndefined(isolate_)) {
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
}
JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
}
JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
Object* next_candidate = candidate->next_function_link();
return reinterpret_cast<JSFunction*>(next_candidate);
}
void CodeFlusher::SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
}
void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
DCHECK(undefined->IsUndefined(candidate->GetIsolate()));
candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
}
SharedFunctionInfo* CodeFlusher::GetNextCandidate(
SharedFunctionInfo* candidate) {
Object* next_candidate = candidate->code()->gc_metadata();
return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
}
void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
candidate->code()->set_gc_metadata(next_candidate);
}
void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
void CodeFlusher::VisitListHeads(RootVisitor* visitor) {
visitor->VisitRootPointer(
Root::kCodeFlusher,
reinterpret_cast<Object**>(&jsfunction_candidates_head_));
visitor->VisitRootPointer(
Root::kCodeFlusher,
reinterpret_cast<Object**>(&shared_function_info_candidates_head_));
}
template <typename StaticVisitor>
void CodeFlusher::IteratePointersToFromSpace() {
Heap* heap = isolate_->heap();
JSFunction* candidate = jsfunction_candidates_head_;
while (candidate != nullptr) {
JSFunction** slot = GetNextCandidateSlot(candidate);
if (heap->InFromSpace(*slot)) {
StaticVisitor::VisitPointer(heap, candidate,
reinterpret_cast<Object**>(slot));
}
candidate = GetNextCandidate(candidate);
}
}
template <LiveObjectIterationMode T>
HeapObject* LiveObjectIterator<T>::Next() {
Map* one_word_filler = heap()->one_pointer_filler_map();
......
This diff is collapsed.
......@@ -20,7 +20,6 @@ namespace v8 {
namespace internal {
// Forward declarations.
class CodeFlusher;
class EvacuationJobTraits;
class HeapObjectVisitor;
class LocalWorkStealingMarkingDeque;
......@@ -117,62 +116,6 @@ class ObjectMarking : public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
};
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
// be unreachable. Code objects can be referenced in two ways:
// - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
class CodeFlusher {
public:
explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate),
jsfunction_candidates_head_(nullptr),
shared_function_info_candidates_head_(nullptr) {}
inline void AddCandidate(SharedFunctionInfo* shared_info);
inline void AddCandidate(JSFunction* function);
void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function);
void ProcessCandidates() {
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
inline void VisitListHeads(RootVisitor* v);
template <typename StaticVisitor>
inline void IteratePointersToFromSpace();
private:
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
static inline JSFunction* GetNextCandidate(JSFunction* candidate);
static inline void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate);
static inline void ClearNextCandidate(JSFunction* candidate,
Object* undefined);
static inline SharedFunctionInfo* GetNextCandidate(
SharedFunctionInfo* candidate);
static inline void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate);
static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
class MarkBitCellIterator BASE_EMBEDDED {
public:
MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
......@@ -513,9 +456,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
......@@ -596,12 +536,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Finishes GC, performs heap verification if enabled.
void Finish();
// Mark code objects that are active on the stack to prevent them
// from being flushed.
void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
void PrepareForCodeFlushing();
void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
......@@ -743,8 +677,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
// Candidates for pages that should be evacuated.
List<Page*> evacuation_candidates_;
// Pages that are actually processed during evacuation.
......
......@@ -422,49 +422,16 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same
// SharedFunctionInfo because some of them might be optimized.
// That would also make the non-optimized version of the code
// non-flushable, because it is required for bailing out from
// optimized code.
collector->code_flusher()->AddCandidate(shared);
// Treat the reference to the code object weakly.
VisitSharedFunctionInfoWeakCode(map, object);
return;
}
}
VisitSharedFunctionInfoStrongCode(map, object);
FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
void>::Visit(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
HeapObject* object) {
Heap* heap = map->GetHeap();
JSFunction* function = JSFunction::cast(object);
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, function)) {
// This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same
// SharedFunctionInfo because some of them might be optimized.
// That would also make the non-optimized version of the code
// non-flushable, because it is required for bailing out from
// optimized code.
collector->code_flusher()->AddCandidate(function);
// Treat the reference to the code object weakly.
VisitJSFunctionWeakCode(map, object);
return;
} else {
// Visit all unoptimized code objects to prevent flushing them.
StaticVisitor::MarkObject(heap, function->shared()->code());
}
}
VisitJSFunctionStrongCode(map, object);
FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorStrongCode,
void>::Visit(map, object);
}
template <typename StaticVisitor>
......@@ -508,134 +475,6 @@ inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
template <typename StaticVisitor>
bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
JSFunction* function) {
SharedFunctionInfo* shared_info = function->shared();
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
if (ObjectMarking::IsBlackOrGrey(function->code(),
MarkingState::Internal(function->code()))) {
return false;
}
// We do not (yet) flush code for optimized functions.
if (function->code() != shared_info->code()) {
return false;
}
// Check age of optimized code.
if (FLAG_age_code && !function->code()->IsOld()) {
return false;
}
return IsFlushable(heap, shared_info);
}
template <typename StaticVisitor>
bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
if (ObjectMarking::IsBlackOrGrey(
shared_info->code(), MarkingState::Internal(shared_info->code()))) {
return false;
}
// The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again.
if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
return false;
}
// We never flush code for API functions.
if (shared_info->IsApiFunction()) {
return false;
}
// Only flush code for functions.
if (shared_info->code()->kind() != Code::FUNCTION) {
return false;
}
// Function must be lazy compilable.
if (!shared_info->allows_lazy_compilation()) {
return false;
}
// We do not (yet?) flush code for generator functions, or async functions,
// because we don't know if there are still live activations
// (generator objects) on the heap.
if (IsResumableFunction(shared_info->kind())) {
return false;
}
// If this is a full script wrapped in a function we do not flush the code.
if (shared_info->is_toplevel()) {
return false;
}
// The function must be user code.
if (!shared_info->IsUserJavaScript()) {
return false;
}
// Maintain debug break slots in the code.
if (shared_info->HasDebugCode()) {
return false;
}
// If this is a function initialized with %SetCode then the one-to-one
// relation between SharedFunctionInfo and Code is broken.
if (shared_info->dont_flush()) {
return false;
}
// Check age of code. If code aging is disabled we never flush.
if (!FLAG_age_code || !shared_info->code()->IsOld()) {
return false;
}
return true;
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
Map* map, HeapObject* object) {
FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
void>::Visit(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
Map* map, HeapObject* object) {
// Skip visiting kCodeOffset as it is treated weakly here.
STATIC_ASSERT(SharedFunctionInfo::kCodeOffset <
SharedFunctionInfo::BodyDescriptorWeakCode::kStartOffset);
FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptorWeakCode,
void>::Visit(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
Map* map, HeapObject* object) {
typedef FlexibleBodyVisitor<StaticVisitor,
JSFunction::BodyDescriptorStrongCode,
void> JSFunctionStrongCodeBodyVisitor;
JSFunctionStrongCodeBodyVisitor::Visit(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
Map* map, HeapObject* object) {
typedef FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
void> JSFunctionWeakCodeBodyVisitor;
JSFunctionWeakCodeBodyVisitor::Visit(map, object);
}
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
Map* map = object->map();
......
......@@ -292,17 +292,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Mark pointers in a Map treating some elements of the descriptor array weak.
static void MarkMapContents(Heap* heap, Map* map);
// Code flushing support.
INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
// Helpers used by code flushing support that visit pointer fields and treat
// references to code objects either strongly or weakly.
static void VisitSharedFunctionInfoStrongCode(Map* map, HeapObject* object);
static void VisitSharedFunctionInfoWeakCode(Map* map, HeapObject* object);
static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
class DataObjectVisitor {
public:
template <int size>
......
......@@ -6043,7 +6043,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, must_use_ignition_turbo,
kMustUseIgnitionTurbo)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
kIsAsmWasmBroken)
......@@ -6147,14 +6146,6 @@ void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
void SharedFunctionInfo::ReplaceCode(Code* value) {
// If the GC metadata field is already used then the function was
// enqueued as a code flushing candidate and we remove it now.
if (code()->gc_metadata() != NULL) {
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
flusher->EvictCandidate(this);
}
DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
#ifdef DEBUG
Code::VerifyRecompiledCode(code(), value);
#endif // DEBUG
......@@ -6755,7 +6746,6 @@ void Code::WipeOutHeader() {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
}
WRITE_FIELD(this, kNextCodeLinkOffset, NULL);
WRITE_FIELD(this, kGCMetadataOffset, NULL);
}
......@@ -6793,7 +6783,6 @@ void Code::set_stub_key(uint32_t key) {
}
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
......
......@@ -3751,11 +3751,6 @@ class Code: public HeapObject {
// Note that storage for this field is overlapped with typefeedback_info.
DECL_ACCESSORS(next_code_link, Object)
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
// it is only used by the garbage collector itself.
DECL_ACCESSORS(gc_metadata, Object)
// [ic_age]: Inline caching age: the value of the Heap::global_ic_age
// at the moment when this object was created.
inline void set_ic_age(int count);
......@@ -4065,9 +4060,8 @@ class Code: public HeapObject {
#undef DECLARE_CODE_AGE_ENUM
// Code aging. Indicates how many full GCs this code has survived without
// being entered through the prologue. Used to determine when it is
// relatively safe to flush this code object and replace it with the lazy
// compilation stub.
// being entered through the prologue. Used to determine when to flush code
// held in the compilation cache.
static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeYoung(Isolate* isolate);
......@@ -4117,8 +4111,7 @@ class Code: public HeapObject {
static const int kTypeFeedbackInfoOffset =
kSourcePositionTableOffset + kPointerSize;
static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kInstructionSizeOffset = kGCMetadataOffset + kPointerSize;
static const int kInstructionSizeOffset = kNextCodeLinkOffset + kPointerSize;
static const int kICAgeOffset = kInstructionSizeOffset + kIntSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
......@@ -5273,8 +5266,6 @@ class SharedFunctionInfo: public HeapObject {
inline void set_ic_age(int age);
// Indicates if this function can be lazy compiled.
// This is used to determine if we can safely flush code from a function
// when doing GC if we expect that the function will no longer be used.
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
// Indicates whether optimizations have been disabled for this
......@@ -5312,9 +5303,6 @@ class SharedFunctionInfo: public HeapObject {
// FullCodegen / Crankshaft.
DECL_BOOLEAN_ACCESSORS(must_use_ignition_turbo)
// Indicates that code for this function cannot be flushed.
DECL_BOOLEAN_ACCESSORS(dont_flush)
// Indicates that this function is an asm function.
DECL_BOOLEAN_ACCESSORS(asm_function)
......@@ -5608,12 +5596,12 @@ class SharedFunctionInfo: public HeapObject {
kForceInline,
kIsAsmFunction,
kMustUseIgnitionTurbo,
kDontFlush,
kIsDeclaration,
kIsAsmWasmBroken,
kHasConcurrentOptimizationJob,
kUnused1, // Unused fields.
kUnused2,
// byte 2
kFunctionKind,
......@@ -6146,8 +6134,7 @@ class JSFunction: public JSObject {
inline bool is_compiled();
// [next_function_link]: Links functions into various lists, e.g. the list
// of optimized functions hanging off the native_context. The CodeFlusher
// uses this link to chain together flushing candidates. Treated weakly
// of optimized functions hanging off the native_context. Treated weakly
// by the garbage collector.
DECL_ACCESSORS(next_function_link, Object)
......@@ -6587,9 +6574,9 @@ class JSPromise : public JSObject {
// - a reference to a literal string to search for
// If it is an irregexp regexp:
// - a reference to code for Latin1 inputs (bytecode or compiled), or a smi
// used for tracking the last usage (used for code flushing).
// used for tracking the last usage (used for regexp code flushing).
// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
// used for tracking the last usage (used for code flushing)..
// used for tracking the last usage (used for regexp code flushing).
// - max number of registers used by irregexp implementations.
// - number of capture registers (output values) of the regexp.
class JSRegExp: public JSObject {
......
......@@ -1446,8 +1446,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
code->type_feedback_info(),
Code::kTypeFeedbackInfoOffset);
}
SetInternalReference(code, entry, "gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
}
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
......
......@@ -172,13 +172,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
return isolate->heap()->exception();
}
// Mark both, the source and the target, as un-flushable because the
// shared unoptimized code makes them impossible to enqueue in a list.
DCHECK(target_shared->code()->gc_metadata() == NULL);
DCHECK(source_shared->code()->gc_metadata() == NULL);
target_shared->set_dont_flush(true);
source_shared->set_dont_flush(true);
// Set the code, scope info, formal parameter count, and the length
// of the target shared function info.
target_shared->ReplaceCode(source_shared->code());
......
This diff is collapsed.
......@@ -84,12 +84,10 @@ INTERESTING_NEW_GEN_KEYS="\
weak \
roots \
old_new \
code \
semispace \
"
INTERESTING_OLD_GEN_KEYS="\
clear.code_flush \
clear.dependent_code \
clear.global_handles \
clear.maps \
......@@ -112,7 +110,6 @@ INTERESTING_OLD_GEN_KEYS="\
external.mc_incremental_epilogue \
external.weak_global_handles \
mark.finish_incremental \
mark.prepare_code_flush \
mark.roots \
mark.weak_closure \
mark.weak_closure.ephemeral \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment