Commit 317cb653 authored by mstarzinger's avatar mstarzinger Committed by Commit bot

[turbofan] Implement sharing of context-independent code.

This allows context-independent code generated by TurboFan to be cached
in the optimized code map and reused across native contexts. Note that
currently this cache is still flushed at GC time.

R=bmeurer@chromium.org,mvstanton@chromium.org
TEST=cctest/test-compiler/OptimizedCodeSharing

Review URL: https://codereview.chromium.org/1208013002

Cr-Commit-Position: refs/heads/master@{#29313}
parent d350ab47
......@@ -93,6 +93,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
IfBuilder* builder,
HValue* optimized_map,
HValue* map_index);
void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
HValue* code_object, HValue* literals);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
......@@ -1651,6 +1653,16 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
// The builder continues in the "then" after this function.
}
void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(HValue* js_function,
HValue* native_context,
HValue* code_object,
HValue* literals) {
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized());
......@@ -1673,8 +1685,6 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
// The builder continues in the "then" after this function.
}
......@@ -1712,6 +1722,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* shared_info,
HValue* native_context) {
Counters* counters = isolate()->counters();
Factory* factory = isolate()->factory();
IfBuilder is_optimized(this);
HInstruction* optimized_map = Add<HLoadNamedField>(
shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
......@@ -1764,18 +1775,34 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
loop_builder.EndBody();
// If slot_iterator equals first entry index, then we failed to find and
// install optimized code
// If slot_iterator equals first entry index, then we failed to find a
// context-dependent code and try context-independent code next.
IfBuilder no_optimized_code_check(this);
no_optimized_code_check.If<HCompareNumericAndBranch>(
slot_iterator, first_entry_index, Token::EQ);
no_optimized_code_check.Then();
{
// Store the unoptimized code
IfBuilder shared_code_check(this);
HValue* shared_code = Add<HLoadNamedField>(
optimized_map, nullptr,
HObjectAccess::ForOptimizedCodeMapSharedCode());
shared_code_check.IfNot<HCompareObjectEqAndBranch>(
shared_code, graph()->GetConstantUndefined());
shared_code_check.Then();
{
// Store the context-independent optimized code.
HValue* literals = Add<HConstant>(factory->empty_fixed_array());
BuildInstallOptimizedCode(js_function, native_context, shared_code,
literals);
}
shared_code_check.Else();
{
// Store the unoptimized code.
BuildInstallCode(js_function, shared_info);
}
}
}
}
}
......
......@@ -714,17 +714,28 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
if (code->is_turbofanned() && info->is_context_specializing()) return;
// Do not cache bound functions.
if (info->closure()->shared()->bound()) return;
Handle<JSFunction> function = info->closure();
if (function->shared()->bound()) return;
// Cache optimized code.
// Cache optimized context-specific code.
if (FLAG_cache_optimized_code) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
literals, info->osr_ast_id());
}
// Do not cache context-independent code compiled for OSR.
if (code->is_turbofanned() && info->is_osr()) return;
// Cache optimized context-independent code.
if (FLAG_turbo_cache_shared_code && code->is_turbofanned()) {
DCHECK(!info->is_context_specializing());
DCHECK(info->osr_ast_id().IsNone());
Handle<SharedFunctionInfo> shared(function->shared());
SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(shared, code);
}
}
......
......@@ -1390,6 +1390,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
if (cached.literals == nullptr && !info->bound()) {
int number_of_literals = info->num_literals();
// TODO(mstarzinger): Consider sharing the newly created literals array.
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
result->set_literals(*literals);
}
......
......@@ -430,6 +430,7 @@ DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, false, "cache context-independent code")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
......
......@@ -5980,6 +5980,11 @@ class HObjectAccess final {
SharedFunctionInfo::kOptimizedCodeMapOffset);
}
static HObjectAccess ForOptimizedCodeMapSharedCode() {
return HObjectAccess(kInobject, FixedArray::OffsetOfElementAt(
SharedFunctionInfo::kSharedCodeIndex));
}
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}
......
......@@ -9568,6 +9568,17 @@ Handle<JSFunction> JSFunction::CloneClosure(Handle<JSFunction> function) {
}
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
Isolate* isolate = shared->GetIsolate();
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
Handle<Object> value(shared->optimized_code_map(), isolate);
if (value->IsSmi()) return; // Empty code maps are unsupported.
Handle<FixedArray> code_map = Handle<FixedArray>::cast(value);
code_map->set(kSharedCodeIndex, *code);
}
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
......@@ -9584,7 +9595,6 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
if (value->IsSmi()) {
// No optimized code map.
DCHECK_EQ(0, Smi::cast(*value)->value());
// Create 3 entries per context {context, code, literals}.
new_code_map = isolate->factory()->NewFixedArray(kInitialLength);
old_length = kEntriesStart;
} else {
......@@ -9672,6 +9682,15 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
dst += kEntryLength;
}
}
if (code_map->get(kSharedCodeIndex) == optimized_code) {
// Evict context-independent code as well.
code_map->set_undefined(kSharedCodeIndex);
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
PrintF(" (context-independent code)]\n");
}
}
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
......@@ -10659,6 +10678,10 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
FixedArray::cast(optimized_code_map->get(i + kLiteralsOffset))};
}
}
Object* shared_code = optimized_code_map->get(kSharedCodeIndex);
if (shared_code->IsCode() && osr_ast_id.IsNone()) {
return {Code::cast(shared_code), nullptr};
}
if (FLAG_trace_opt) {
PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint();
......
......@@ -6555,7 +6555,7 @@ enum BuiltinFunctionId {
// Result of searching in an optimized code map of a SharedFunctionInfo. Note
// that {code == nullptr} indicates that no entry has been found.
// that both {code} and {literals} can be NULL to pass search result status.
struct CodeAndLiterals {
Code* code; // Cached optimized code.
FixedArray* literals; // Cached literals array.
......@@ -6578,7 +6578,8 @@ class SharedFunctionInfo: public HeapObject {
DECL_ACCESSORS(optimized_code_map, Object)
// Returns entry from optimized code map for specified context and OSR entry.
// Note that {code == nullptr} indicates no matching entry has been found.
// Note that {code == nullptr} indicates no matching entry has been found,
// whereas {literals == nullptr} indicates the code is context-independent.
CodeAndLiterals SearchOptimizedCodeMap(Context* native_context,
BailoutId osr_ast_id);
......@@ -6588,20 +6589,14 @@ class SharedFunctionInfo: public HeapObject {
// Removed a specific optimized code object from the optimized code map.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
// Unconditionally clear the type feedback vector (including vector ICs).
void ClearTypeFeedbackInfo();
// Clear the type feedback vector with a more subtle policy at GC time.
void ClearTypeFeedbackInfoAtGCTime();
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
// Initialize a SharedFunctionInfo from a parsed function literal.
static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit);
// Add a new entry to the optimized code map for context-independent code.
static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
// Add a new entry to the optimized code map.
// Add a new entry to the optimized code map for context-dependent code.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
......@@ -6615,7 +6610,8 @@ class SharedFunctionInfo: public HeapObject {
// Layout description of the optimized code map.
static const int kNextMapIndex = 0;
static const int kEntriesStart = 1;
static const int kSharedCodeIndex = 1;
static const int kEntriesStart = 2;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
......@@ -6656,6 +6652,12 @@ class SharedFunctionInfo: public HeapObject {
// available.
DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
// Unconditionally clear the type feedback vector (including vector ICs).
void ClearTypeFeedbackInfo();
// Clear the type feedback vector with a more subtle policy at GC time.
void ClearTypeFeedbackInfoAtGCTime();
#if TRACE_MAPS
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
......@@ -6898,6 +6900,10 @@ class SharedFunctionInfo: public HeapObject {
inline bool is_simple_parameter_list();
// Initialize a SharedFunctionInfo from a parsed function literal.
static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit);
// Dispatched behavior.
DECLARE_PRINTER(SharedFunctionInfo)
DECLARE_VERIFIER(SharedFunctionInfo)
......
......@@ -370,7 +370,7 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
// Test that optimized code for different closures is actually shared
// immediately by the FastNewClosureStub when run in the same context.
TEST(OptimizedCodeSharing) {
TEST(OptimizedCodeSharing1) {
FLAG_stress_compaction = false;
FLAG_allow_natives_syntax = true;
FLAG_cache_optimized_code = true;
......@@ -380,7 +380,8 @@ TEST(OptimizedCodeSharing) {
LocalContext env;
env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
v8::Integer::New(CcTest::isolate(), i));
CompileRun("function MakeClosure() {"
CompileRun(
"function MakeClosure() {"
" return function() { return x; };"
"}"
"var closure0 = MakeClosure();"
......@@ -400,6 +401,61 @@ TEST(OptimizedCodeSharing) {
}
// Test that optimized code for different closures is actually shared
// immediately by the FastNewClosureStub when run different contexts.
TEST(OptimizedCodeSharing2) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
FLAG_cache_optimized_code = true;
FLAG_turbo_cache_shared_code = true;
const char* flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(flag, StrLength(flag));
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Script> script = v8_compile(
"function MakeClosure() {"
" return function() { return x; };"
"}");
Handle<Code> reference_code;
{
LocalContext env;
env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
v8::Integer::New(CcTest::isolate(), 23));
script->GetUnboundScript()->BindToCurrentContext()->Run();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());");
Handle<JSFunction> fun0 = v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure0"))));
CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
reference_code = handle(fun0->code());
}
for (int i = 0; i < 10; i++) {
LocalContext env;
env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
v8::Integer::New(CcTest::isolate(), i));
script->GetUnboundScript()->BindToCurrentContext()->Run();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
"var closure1 = MakeClosure();"
"var closure2 = MakeClosure();");
Handle<JSFunction> fun1 = v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure1"))));
Handle<JSFunction> fun2 = v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure2"))));
CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK_EQ(*reference_code, fun1->code());
CHECK_EQ(*reference_code, fun2->code());
}
}
TEST(CompileFunctionInContext) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment