Commit f7b6e381 authored by mvstanton's avatar mvstanton Committed by Commit bot

Maintain a FixedArray for the optimized code map.

This simplifies follow-on changes to the FastNewClosureStub.

BUG=

Review URL: https://codereview.chromium.org/1433923002

Cr-Commit-Position: refs/heads/master@{#32123}
parent 58686b97
...@@ -1307,7 +1307,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) { ...@@ -1307,7 +1307,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
{ {
SharedFunctionInfo::Iterator iterator(isolate_); SharedFunctionInfo::Iterator iterator(isolate_);
while (SharedFunctionInfo* shared = iterator.Next()) { while (SharedFunctionInfo* shared = iterator.Next()) {
if (!shared->optimized_code_map()->IsSmi()) { if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap(); shared->ClearOptimizedCodeMap();
} }
} }
......
...@@ -2067,7 +2067,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( ...@@ -2067,7 +2067,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
code = handle(isolate()->builtins()->builtin(Builtins::kIllegal)); code = handle(isolate()->builtins()->builtin(Builtins::kIllegal));
} }
share->set_code(*code); share->set_code(*code);
share->set_optimized_code_map(Smi::FromInt(0)); share->set_optimized_code_map(*cleared_optimized_code_map());
share->set_scope_info(ScopeInfo::Empty(isolate())); share->set_scope_info(ScopeInfo::Empty(isolate()));
Code* construct_stub = Code* construct_stub =
isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric); isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
......
...@@ -2764,6 +2764,14 @@ void Heap::CreateInitialObjects() { ...@@ -2764,6 +2764,14 @@ void Heap::CreateInitialObjects() {
set_dummy_vector(*dummy_vector); set_dummy_vector(*dummy_vector);
} }
{
Handle<FixedArray> cleared_optimized_code_map =
factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
SharedFunctionInfo::kSharedCodeIndex == 0);
set_cleared_optimized_code_map(*cleared_optimized_code_map);
}
set_detached_contexts(empty_fixed_array()); set_detached_contexts(empty_fixed_array());
set_retained_maps(ArrayList::cast(empty_fixed_array())); set_retained_maps(ArrayList::cast(empty_fixed_array()));
......
...@@ -179,6 +179,7 @@ namespace internal { ...@@ -179,6 +179,7 @@ namespace internal {
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \ V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue) \ V(FixedArray, microtask_queue, MicrotaskQueue) \
V(TypeFeedbackVector, dummy_vector, DummyVector) \ V(TypeFeedbackVector, dummy_vector, DummyVector) \
V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(FixedArray, detached_contexts, DetachedContexts) \ V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \ V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \ V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
......
...@@ -922,7 +922,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() { ...@@ -922,7 +922,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
PrintF(" - age: %d]\n", code->GetAge()); PrintF(" - age: %d]\n", code->GetAge());
} }
// Always flush the optimized code map if there is one. // Always flush the optimized code map if there is one.
if (!shared->optimized_code_map()->IsSmi()) { if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap(); shared->ClearOptimizedCodeMap();
} }
shared->set_code(lazy_compile); shared->set_code(lazy_compile);
...@@ -969,7 +969,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() { ...@@ -969,7 +969,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
PrintF(" - age: %d]\n", code->GetAge()); PrintF(" - age: %d]\n", code->GetAge());
} }
// Always flush the optimized code map if there is one. // Always flush the optimized code map if there is one.
if (!candidate->optimized_code_map()->IsSmi()) { if (!candidate->OptimizedCodeMapIsCleared()) {
candidate->ClearOptimizedCodeMap(); candidate->ClearOptimizedCodeMap();
} }
candidate->set_code(lazy_compile); candidate->set_code(lazy_compile);
...@@ -2129,10 +2129,10 @@ void MarkCompactCollector::AfterMarking() { ...@@ -2129,10 +2129,10 @@ void MarkCompactCollector::AfterMarking() {
void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() { void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() {
SharedFunctionInfo::Iterator iterator(isolate()); SharedFunctionInfo::Iterator iterator(isolate());
while (SharedFunctionInfo* shared = iterator.Next()) { while (SharedFunctionInfo* shared = iterator.Next()) {
if (shared->optimized_code_map()->IsSmi()) continue; if (shared->OptimizedCodeMapIsCleared()) continue;
// Process context-dependent entries in the optimized code map. // Process context-dependent entries in the optimized code map.
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map()); FixedArray* code_map = shared->optimized_code_map();
int new_length = SharedFunctionInfo::kEntriesStart; int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length(); int old_length = code_map->length();
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length; for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
......
...@@ -451,16 +451,16 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo( ...@@ -451,16 +451,16 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
shared->ClearTypeFeedbackInfoAtGCTime(); shared->ClearTypeFeedbackInfoAtGCTime();
} }
if (FLAG_flush_optimized_code_cache) { if (FLAG_flush_optimized_code_cache) {
if (!shared->optimized_code_map()->IsSmi()) { if (!shared->OptimizedCodeMapIsCleared()) {
// Always flush the optimized code map if requested by flag. // Always flush the optimized code map if requested by flag.
shared->ClearOptimizedCodeMap(); shared->ClearOptimizedCodeMap();
} }
} else { } else {
if (!shared->optimized_code_map()->IsSmi()) { if (!shared->OptimizedCodeMapIsCleared()) {
// Treat some references within the code map weakly by marking the // Treat some references within the code map weakly by marking the
// code map itself but not pushing it onto the marking deque. The // code map itself but not pushing it onto the marking deque. The
// map will be processed after marking. // map will be processed after marking.
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map()); FixedArray* code_map = shared->optimized_code_map();
MarkOptimizedCodeMap(heap, code_map); MarkOptimizedCodeMap(heap, code_map);
} }
} }
......
...@@ -5720,8 +5720,8 @@ SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex) ...@@ -5720,8 +5720,8 @@ SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex) ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, optimized_code_map, Object, ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
kOptimizedCodeMapOffset) kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector, ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
kFeedbackVectorOffset) kFeedbackVectorOffset)
...@@ -6168,6 +6168,11 @@ bool SharedFunctionInfo::IsBuiltin() { ...@@ -6168,6 +6168,11 @@ bool SharedFunctionInfo::IsBuiltin() {
bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); } bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
return optimized_code_map() == GetHeap()->cleared_optimized_code_map();
}
bool JSFunction::IsOptimized() { bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION; return code()->kind() == Code::OPTIMIZED_FUNCTION;
} }
......
...@@ -11312,10 +11312,9 @@ void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap( ...@@ -11312,10 +11312,9 @@ void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Isolate* isolate = shared->GetIsolate(); Isolate* isolate = shared->GetIsolate();
if (isolate->serializer_enabled()) return; if (isolate->serializer_enabled()) return;
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
Handle<Object> value(shared->optimized_code_map(), isolate); // Empty code maps are unsupported.
if (value->IsSmi()) return; // Empty code maps are unsupported. if (shared->OptimizedCodeMapIsCleared()) return;
Handle<FixedArray> code_map = Handle<FixedArray>::cast(value); shared->optimized_code_map()->set(kSharedCodeIndex, *code);
code_map->set(kSharedCodeIndex, *code);
} }
...@@ -11332,15 +11331,12 @@ void SharedFunctionInfo::AddToOptimizedCodeMap( ...@@ -11332,15 +11331,12 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
DCHECK(native_context->IsNativeContext()); DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4); STATIC_ASSERT(kEntryLength == 4);
Handle<FixedArray> new_code_map; Handle<FixedArray> new_code_map;
Handle<Object> value(shared->optimized_code_map(), isolate);
int entry; int entry;
if (value->IsSmi()) { if (shared->OptimizedCodeMapIsCleared()) {
// No optimized code map.
DCHECK_EQ(0, Smi::cast(*value)->value());
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED); new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
entry = kEntriesStart; entry = kEntriesStart;
} else { } else {
Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value); Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id); entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
if (entry > kSharedCodeIndex) { if (entry > kSharedCodeIndex) {
// Found an existing context-specific entry, it must not contain any code. // Found an existing context-specific entry, it must not contain any code.
...@@ -11358,7 +11354,7 @@ void SharedFunctionInfo::AddToOptimizedCodeMap( ...@@ -11358,7 +11354,7 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
// TODO(mstarzinger): Temporary workaround. The allocation above might have // TODO(mstarzinger): Temporary workaround. The allocation above might have
// flushed the optimized code map and the copy we created is full of holes. // flushed the optimized code map and the copy we created is full of holes.
// For now we just give up on adding the entry and pretend it got flushed. // For now we just give up on adding the entry and pretend it got flushed.
if (shared->optimized_code_map()->IsSmi()) return; if (shared->OptimizedCodeMapIsCleared()) return;
entry = old_code_map->length(); entry = old_code_map->length();
} }
new_code_map->set(entry + kContextOffset, *native_context); new_code_map->set(entry + kContextOffset, *native_context);
...@@ -11380,8 +11376,8 @@ void SharedFunctionInfo::AddToOptimizedCodeMap( ...@@ -11380,8 +11376,8 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
#endif #endif
// Zap any old optimized code map. // Zap any old optimized code map.
if (!shared->optimized_code_map()->IsSmi()) { if (!shared->OptimizedCodeMapIsCleared()) {
FixedArray* old_code_map = FixedArray::cast(shared->optimized_code_map()); FixedArray* old_code_map = shared->optimized_code_map();
old_code_map->FillWithHoles(0, old_code_map->length()); old_code_map->FillWithHoles(0, old_code_map->length());
} }
...@@ -11391,22 +11387,23 @@ void SharedFunctionInfo::AddToOptimizedCodeMap( ...@@ -11391,22 +11387,23 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
void SharedFunctionInfo::ClearOptimizedCodeMap() { void SharedFunctionInfo::ClearOptimizedCodeMap() {
// Zap any old optimized code map. // Zap any old optimized code map.
if (!optimized_code_map()->IsSmi()) { if (!OptimizedCodeMapIsCleared()) {
FixedArray* old_code_map = FixedArray::cast(optimized_code_map()); FixedArray* old_code_map = optimized_code_map();
old_code_map->FillWithHoles(0, old_code_map->length()); old_code_map->FillWithHoles(0, old_code_map->length());
} }
set_optimized_code_map(Smi::FromInt(0)); FixedArray* cleared_map = GetHeap()->cleared_optimized_code_map();
set_optimized_code_map(cleared_map, SKIP_WRITE_BARRIER);
} }
void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code, void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) { const char* reason) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
if (optimized_code_map()->IsSmi()) return; if (OptimizedCodeMapIsCleared()) return;
Heap* heap = GetHeap(); Heap* heap = GetHeap();
FixedArray* code_map = FixedArray::cast(optimized_code_map()); FixedArray* code_map = optimized_code_map();
int dst = kEntriesStart; int dst = kEntriesStart;
int length = code_map->length(); int length = code_map->length();
for (int src = kEntriesStart; src < length; src += kEntryLength) { for (int src = kEntriesStart; src < length; src += kEntryLength) {
...@@ -11465,7 +11462,7 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code, ...@@ -11465,7 +11462,7 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) { void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
FixedArray* code_map = FixedArray::cast(optimized_code_map()); FixedArray* code_map = optimized_code_map();
DCHECK(shrink_by % kEntryLength == 0); DCHECK(shrink_by % kEntryLength == 0);
DCHECK(shrink_by <= code_map->length() - kEntriesStart); DCHECK(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier. // Always trim even when array is cleared because of heap verifier.
...@@ -12607,9 +12604,8 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context, ...@@ -12607,9 +12604,8 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id) { BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext()); DCHECK(native_context->IsNativeContext());
Object* value = optimized_code_map(); if (!OptimizedCodeMapIsCleared()) {
if (!value->IsSmi()) { FixedArray* optimized_code_map = this->optimized_code_map();
FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length(); int length = optimized_code_map->length();
Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt()); Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) { for (int i = kEntriesStart; i < length; i += kEntryLength) {
...@@ -12632,7 +12628,7 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap( ...@@ -12632,7 +12628,7 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
CodeAndLiterals result = {nullptr, nullptr}; CodeAndLiterals result = {nullptr, nullptr};
int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id); int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
if (entry != kNotFound) { if (entry != kNotFound) {
FixedArray* code_map = FixedArray::cast(optimized_code_map()); FixedArray* code_map = optimized_code_map();
if (entry == kSharedCodeIndex) { if (entry == kSharedCodeIndex) {
result = {Code::cast(code_map->get(kSharedCodeIndex)), nullptr}; result = {Code::cast(code_map->get(kSharedCodeIndex)), nullptr};
...@@ -12643,7 +12639,7 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap( ...@@ -12643,7 +12639,7 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
LiteralsArray::cast(code_map->get(entry + kLiteralsOffset))}; LiteralsArray::cast(code_map->get(entry + kLiteralsOffset))};
} }
} }
if (FLAG_trace_opt && !optimized_code_map()->IsSmi() && if (FLAG_trace_opt && !OptimizedCodeMapIsCleared() &&
result.code == nullptr) { result.code == nullptr) {
PrintF("[didn't find optimized code in optimized code map for "); PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint(); ShortPrint();
......
...@@ -6483,8 +6483,8 @@ class SharedFunctionInfo: public HeapObject { ...@@ -6483,8 +6483,8 @@ class SharedFunctionInfo: public HeapObject {
inline void ReplaceCode(Code* code); inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code // [optimized_code_map]: Map from native context to optimized code
// and a shared literals array or Smi(0) if none. // and a shared literals array.
DECL_ACCESSORS(optimized_code_map, Object) DECL_ACCESSORS(optimized_code_map, FixedArray)
// Returns entry from optimized code map for specified context and OSR entry. // Returns entry from optimized code map for specified context and OSR entry.
// Note that {code == nullptr, literals == nullptr} indicates no matching // Note that {code == nullptr, literals == nullptr} indicates no matching
...@@ -6496,6 +6496,11 @@ class SharedFunctionInfo: public HeapObject { ...@@ -6496,6 +6496,11 @@ class SharedFunctionInfo: public HeapObject {
// Clear optimized code map. // Clear optimized code map.
void ClearOptimizedCodeMap(); void ClearOptimizedCodeMap();
// We have a special root FixedArray with the right shape and values
// to represent the cleared optimized code map. This predicate checks
// if that root is installed.
inline bool OptimizedCodeMapIsCleared() const;
// Removes a specific optimized code object from the optimized code map. // Removes a specific optimized code object from the optimized code map.
// In case of non-OSR the code reference is cleared from the cache entry but // In case of non-OSR the code reference is cleared from the cache entry but
// the entry itself is left in the map in order to proceed sharing literals. // the entry itself is left in the map in order to proceed sharing literals.
......
...@@ -392,7 +392,8 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) { ...@@ -392,7 +392,8 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
// TODO(lrn): Create bound function in C++ code from premade shared info. // TODO(lrn): Create bound function in C++ code from premade shared info.
bound_function->shared()->set_bound(true); bound_function->shared()->set_bound(true);
bound_function->shared()->set_optimized_code_map(Smi::FromInt(0)); bound_function->shared()->set_optimized_code_map(
isolate->heap()->cleared_optimized_code_map());
bound_function->shared()->set_inferred_name(isolate->heap()->empty_string()); bound_function->shared()->set_inferred_name(isolate->heap()->empty_string());
// Get all arguments of calling function (Function.prototype.bind). // Get all arguments of calling function (Function.prototype.bind).
int argc = 0; int argc = 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment