Commit 4dd45e11 authored by ishell's avatar ishell Committed by Commit bot

Share literals arrays per <NativeContext, SharedFunctionInfo> pair.

This CL also renames wrongly named test for v8:4173.

BUG=v8:4121
LOG=Y

Review URL: https://codereview.chromium.org/1353363002

Cr-Commit-Position: refs/heads/master@{#30879}
parent c602a91c
......@@ -1731,13 +1731,15 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
HValue* code_object = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
builder->If<HCompareObjectEqAndBranch>(native_context,
context_slot);
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
builder->And();
builder->IfNot<HCompareObjectEqAndBranch>(code_object,
graph()->GetConstantUndefined());
builder->Then();
HValue* code_object = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kCachedCodeOffset);
// and the literals
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
......
......@@ -1314,17 +1314,24 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
context->native_context(), BailoutId::None());
if (cached.code != nullptr) {
// Caching of optimized code enabled and optimized code found.
if (cached.literals != nullptr) result->set_literals(cached.literals);
DCHECK(!cached.code->marked_for_deoptimization());
DCHECK(result->shared()->is_compiled());
result->ReplaceCode(cached.code);
}
if (cached.literals == nullptr && !info->bound()) {
if (cached.literals != nullptr) {
result->set_literals(cached.literals);
} else if (!info->bound()) {
int number_of_literals = info->num_literals();
// TODO(mstarzinger): Consider sharing the newly created literals array.
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
result->set_literals(*literals);
// Cache context-specific literals.
if (FLAG_cache_optimized_code) {
Handle<Context> native_context(context->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(
info, native_context, undefined_value(), literals, BailoutId::None());
}
}
return result;
......
......@@ -1013,8 +1013,8 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
Context* context =
Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
Code* code =
Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
HeapObject* code = HeapObject::cast(
code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
FixedArray* literals = FixedArray::cast(
code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
Smi* ast_id =
......
......@@ -10137,47 +10137,60 @@ void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
Handle<FixedArray> literals,
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
Handle<HeapObject> code, Handle<FixedArray> literals,
BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
DCHECK(!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(*code == isolate->heap()->undefined_value() ||
!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
DCHECK(*code == isolate->heap()->undefined_value() ||
Code::cast(*code)->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4);
Handle<FixedArray> new_code_map;
Handle<Object> value(shared->optimized_code_map(), isolate);
int old_length;
int entry;
if (value->IsSmi()) {
// No optimized code map.
DCHECK_EQ(0, Smi::cast(*value)->value());
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
old_length = kEntriesStart;
entry = kEntriesStart;
} else {
// Copy old optimized code map and append one new entry.
Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
if (entry > kSharedCodeIndex) {
// Found an existing context-specific entry, it must not contain any code.
DCHECK_EQ(isolate->heap()->undefined_value(),
old_code_map->get(entry + kCachedCodeOffset));
// Just set the code and literals to the entry.
old_code_map->set(entry + kCachedCodeOffset, *code);
old_code_map->set(entry + kLiteralsOffset, *literals);
return;
}
// Copy old optimized code map and append one new entry.
new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
old_code_map, kEntryLength, TENURED);
old_length = old_code_map->length();
int old_length = old_code_map->length();
// Zap the old map to avoid any stale entries. Note that this is required
// for correctness because entries are being treated weakly by the GC.
MemsetPointer(old_code_map->data_start(), isolate->heap()->the_hole_value(),
old_length);
entry = old_length;
}
new_code_map->set(old_length + kContextOffset, *native_context);
new_code_map->set(old_length + kCachedCodeOffset, *code);
new_code_map->set(old_length + kLiteralsOffset, *literals);
new_code_map->set(old_length + kOsrAstIdOffset,
Smi::FromInt(osr_ast_id.ToInt()));
new_code_map->set(entry + kContextOffset, *native_context);
new_code_map->set(entry + kCachedCodeOffset, *code);
new_code_map->set(entry + kLiteralsOffset, *literals);
new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
DCHECK(new_code_map->get(i + kContextOffset)->IsNativeContext());
DCHECK(new_code_map->get(i + kCachedCodeOffset)->IsCode());
DCHECK(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
Code::OPTIMIZED_FUNCTION);
Object* code = new_code_map->get(i + kCachedCodeOffset);
if (code != isolate->heap()->undefined_value()) {
DCHECK(code->IsCode());
DCHECK(Code::cast(code)->kind() == Code::OPTIMIZED_FUNCTION);
}
DCHECK(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
......@@ -10206,37 +10219,43 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
DisallowHeapAllocation no_gc;
if (optimized_code_map()->IsSmi()) return;
Heap* heap = GetHeap();
FixedArray* code_map = FixedArray::cast(optimized_code_map());
int dst = kEntriesStart;
int length = code_map->length();
for (int src = kEntriesStart; src < length; src += kEntryLength) {
DCHECK(code_map->get(src)->IsNativeContext());
if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) {
// Evict the src entry by not copying it to the dst entry.
if (code_map->get(src + kCachedCodeOffset) == optimized_code) {
BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (osr.IsNone()) {
PrintF("]\n");
} else {
PrintF(" (osr ast id %d)]\n", osr.ToInt());
}
}
} else {
// Keep the src entry by copying it to the dst entry.
if (dst != src) {
code_map->set(dst + kContextOffset,
code_map->get(src + kContextOffset));
code_map->set(dst + kCachedCodeOffset,
code_map->get(src + kCachedCodeOffset));
code_map->set(dst + kLiteralsOffset,
code_map->get(src + kLiteralsOffset));
code_map->set(dst + kOsrAstIdOffset,
code_map->get(src + kOsrAstIdOffset));
if (!osr.IsNone()) {
// Evict the src entry by not copying it to the dst entry.
continue;
}
dst += kEntryLength;
// In case of non-OSR entry just clear the code in order to proceed
// sharing literals.
code_map->set_undefined(src + kCachedCodeOffset);
}
// Keep the src entry by copying it to the dst entry.
if (dst != src) {
code_map->set(dst + kContextOffset, code_map->get(src + kContextOffset));
code_map->set(dst + kCachedCodeOffset,
code_map->get(src + kCachedCodeOffset));
code_map->set(dst + kLiteralsOffset,
code_map->get(src + kLiteralsOffset));
code_map->set(dst + kOsrAstIdOffset,
code_map->get(src + kOsrAstIdOffset));
}
dst += kEntryLength;
}
if (code_map->get(kSharedCodeIndex) == optimized_code) {
// Evict context-independent code as well.
......@@ -10249,8 +10268,8 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
if (code_map->length() == kEntriesStart &&
code_map->get(kSharedCodeIndex)->IsUndefined()) {
ClearOptimizedCodeMap();
......@@ -11272,8 +11291,8 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
Context* native_context, BailoutId osr_ast_id) {
int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
Object* value = optimized_code_map();
......@@ -11284,21 +11303,41 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (optimized_code_map->get(i + kContextOffset) == native_context &&
optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
return {Code::cast(optimized_code_map->get(i + kCachedCodeOffset)),
FixedArray::cast(optimized_code_map->get(i + kLiteralsOffset))};
return i;
}
}
Object* shared_code = optimized_code_map->get(kSharedCodeIndex);
if (shared_code->IsCode() && osr_ast_id.IsNone()) {
return {Code::cast(shared_code), nullptr};
return kSharedCodeIndex;
}
if (FLAG_trace_opt) {
PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint();
PrintF("]\n");
}
return -1;
}
CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
Context* native_context, BailoutId osr_ast_id) {
CodeAndLiterals result = {nullptr, nullptr};
int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
if (entry != kNotFound) {
FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (entry == kSharedCodeIndex) {
result = {Code::cast(code_map->get(kSharedCodeIndex)), nullptr};
} else {
DCHECK_LE(entry + kEntryLength, code_map->length());
Object* code = code_map->get(entry + kCachedCodeOffset);
result = {code->IsUndefined() ? nullptr : Code::cast(code),
FixedArray::cast(code_map->get(entry + kLiteralsOffset))};
}
}
return {nullptr, nullptr};
if (FLAG_trace_opt && !optimized_code_map()->IsSmi() &&
result.code == nullptr) {
PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint();
PrintF("]\n");
}
return result;
}
......
......@@ -6286,15 +6286,18 @@ class SharedFunctionInfo: public HeapObject {
DECL_ACCESSORS(optimized_code_map, Object)
// Returns entry from optimized code map for specified context and OSR entry.
// Note that {code == nullptr} indicates no matching entry has been found,
// whereas {literals == nullptr} indicates the code is context-independent.
// Note that {code == nullptr, literals == nullptr} indicates no matching
// entry has been found, whereas {code, literals == nullptr} indicates that
// code is context-independent.
CodeAndLiterals SearchOptimizedCodeMap(Context* native_context,
BailoutId osr_ast_id);
// Clear optimized code map.
void ClearOptimizedCodeMap();
// Removed a specific optimized code object from the optimized code map.
// Removes a specific optimized code object from the optimized code map.
// In case of non-OSR the code reference is cleared from the cache entry but
// the entry itself is left in the map in order to proceed sharing literals.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
// Trims the optimized code map after entries have been removed.
......@@ -6305,9 +6308,11 @@ class SharedFunctionInfo: public HeapObject {
Handle<Code> code);
// Add a new entry to the optimized code map for context-dependent code.
// |code| is either a code object or an undefined value. In the latter case
// the entry just maps |native_context, osr_ast_id| pair to |literals| array.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
Handle<HeapObject> code,
Handle<FixedArray> literals,
BailoutId osr_ast_id);
......@@ -6327,6 +6332,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kEntryLength = 4;
static const int kInitialLength = kEntriesStart + kEntryLength;
static const int kNotFound = -1;
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
......@@ -6895,6 +6902,12 @@ class SharedFunctionInfo: public HeapObject {
#endif
private:
// Returns entry from optimized code map for specified context and OSR entry.
// The result is either kNotFound, kSharedCodeIndex for context-independent
// entry or a start index of the context-dependent entry.
int SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id);
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
......
......@@ -30,6 +30,7 @@
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
var __sequence = 0;
function array_natives_test() {
// Ensure small array literals start in specific element kind mode.
......@@ -41,7 +42,6 @@ function array_natives_test() {
// This code exists to eliminate the learning influence of AllocationSites
// on the following tests.
var __sequence = 0;
function make_array_string(literal) {
this.__sequence = this.__sequence + 1;
return "/* " + this.__sequence + " */ " + literal;
......
......@@ -4,55 +4,45 @@
// Flags: --allow-natives-syntax
function Migrator(o) {
return o.foo;
function literals_sharing_test(warmup, optimize) {
function closure() {
// Ensure small array literals start in specific element kind mode.
assertTrue(%HasFastSmiElements([]));
assertTrue(%HasFastSmiElements([1]));
assertTrue(%HasFastSmiElements([1,2]));
assertTrue(%HasFastDoubleElements([1.1]));
assertTrue(%HasFastDoubleElements([1.1,2]));
var a = [1, 2, 3];
if (warmup) {
// Transition elements kind during warmup...
assertTrue(%HasFastSmiElements(a));
assertEquals(4, a.push(1.3));
}
// ... and ensure that the information about transitioning is
// propagated to the next closure.
assertTrue(%HasFastDoubleElements(a));
};
if (optimize) %OptimizeFunctionOnNextCall(closure);
closure();
}
function Loader(o) {
return o[0];
}
var first_smi_array = [1];
var second_smi_array = [2];
var first_object_array = ["first"];
var second_object_array = ["string"];
assertTrue(%HasFastSmiElements(first_smi_array));
assertTrue(%HasFastSmiElements(second_smi_array));
assertTrue(%HasFastObjectElements(first_object_array));
assertTrue(%HasFastObjectElements(second_object_array));
// Prepare identical transition chains for smi and object arrays.
first_smi_array.foo = 0;
second_smi_array.foo = 0;
first_object_array.foo = 0;
second_object_array.foo = 0;
// Collect type feedback for not-yet-deprecated original object array map.
for (var i = 0; i < 3; i++) Migrator(second_object_array);
// Blaze a migration trail for smi array maps.
// This marks the migrated smi array map as a migration target.
first_smi_array.foo = 0.5;
print(second_smi_array.foo);
// Deprecate original object array map.
// Use TryMigrate from deferred optimized code to migrate second object array.
first_object_array.foo = 0.5;
%OptimizeFunctionOnNextCall(Migrator);
Migrator(second_object_array);
// |second_object_array| now erroneously has a smi map.
// Optimized code assuming smi elements will expose this.
for (var i = 0; i < 3; i++) Loader(second_smi_array);
%OptimizeFunctionOnNextCall(Loader);
assertEquals("string", Loader(second_object_array));
function test() {
var warmup = true;
for (var i = 0; i < 3; i++) {
print("iter: " + i + ", warmup: "+ warmup);
literals_sharing_test(warmup, false);
warmup = false;
}
print("iter: " + i + ", opt: true");
literals_sharing_test(warmup, true);
}
// Any of the following checks will also fail:
assertTrue(%HasFastObjectElements(second_object_array));
assertFalse(%HasFastSmiElements(second_object_array));
assertTrue(%HaveSameMap(first_object_array, second_object_array));
assertFalse(%HaveSameMap(first_smi_array, second_object_array));
%ClearFunctionTypeFeedback(Loader);
%ClearFunctionTypeFeedback(Migrator);
function stress_opt_test() {}
stress_opt_test();
if (%GetOptimizationStatus(stress_opt_test) == 2) {
// This test is not suitable for --always-opt mode.
test();
}
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
function Migrator(o) {
return o.foo;
}
function Loader(o) {
return o[0];
}
var first_smi_array = [1];
var second_smi_array = [2];
var first_object_array = ["first"];
var second_object_array = ["string"];
assertTrue(%HasFastSmiElements(first_smi_array));
assertTrue(%HasFastSmiElements(second_smi_array));
assertTrue(%HasFastObjectElements(first_object_array));
assertTrue(%HasFastObjectElements(second_object_array));
// Prepare identical transition chains for smi and object arrays.
first_smi_array.foo = 0;
second_smi_array.foo = 0;
first_object_array.foo = 0;
second_object_array.foo = 0;
// Collect type feedback for not-yet-deprecated original object array map.
for (var i = 0; i < 3; i++) Migrator(second_object_array);
// Blaze a migration trail for smi array maps.
// This marks the migrated smi array map as a migration target.
first_smi_array.foo = 0.5;
print(second_smi_array.foo);
// Deprecate original object array map.
// Use TryMigrate from deferred optimized code to migrate second object array.
first_object_array.foo = 0.5;
%OptimizeFunctionOnNextCall(Migrator);
Migrator(second_object_array);
// |second_object_array| now erroneously has a smi map.
// Optimized code assuming smi elements will expose this.
for (var i = 0; i < 3; i++) Loader(second_smi_array);
%OptimizeFunctionOnNextCall(Loader);
assertEquals("string", Loader(second_object_array));
// Any of the following checks will also fail:
assertTrue(%HasFastObjectElements(second_object_array));
assertFalse(%HasFastSmiElements(second_object_array));
assertTrue(%HaveSameMap(first_object_array, second_object_array));
assertFalse(%HaveSameMap(first_smi_array, second_object_array));
%ClearFunctionTypeFeedback(Loader);
%ClearFunctionTypeFeedback(Migrator);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment