Commit 62ac593d authored by yangguo@chromium.org's avatar yangguo@chromium.org

Pass PC offset into runtime when compiling for OSR.

R=titzer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/23842004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16649 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0e7f6a29
......@@ -966,13 +966,22 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Lookup and calculate pc offset.
__ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sub(r1, r1, r2);
__ SmiTag(r1);
// Pass both function and pc offset as arguments.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ push(r1);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
// If the code object is null, just return to the unoptimized code.
......
......@@ -1120,166 +1120,6 @@ Handle<Code> Compiler::InstallOptimizedCode(
}
static uint32_t CurrentPcOffset(Isolate* isolate,
Handle<JSFunction> function,
Handle<Code> unoptimized) {
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
ASSERT(frame->function() == *function);
ASSERT(frame->LookupCode() == *unoptimized);
ASSERT(unoptimized->contains(frame->pc()));
// Use linear search of the unoptimized code's back edge table to find
// the AST id matching the PC.
return static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start());
}
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
Handle<Code> unoptimized) {
// Keep track of whether we've succeeded in optimizing.
if (!unoptimized->optimizable()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
// deoptimized so that we are currently in an unoptimized activation.
// Check for optimized activations of this function.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized() && frame->function() == *function) return false;
}
return true;
}
Handle<Code> Compiler::CompileForOnStackReplacement(
Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<Code> unoptimized(function->shared()->code(), isolate);
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
if (FLAG_trace_osr) {
PrintF("[OSR - restored original interrupt calls in ");
function->PrintName();
PrintF("]\n");
}
if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
// Find the PC offset in unoptimized code and translate to an AST id.
uint32_t pc_offset = CurrentPcOffset(isolate, function, unoptimized);
BailoutId ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
// Attempt OSR compilation.
Handle<Code> result = JSFunction::CompileOsr(
function, ast_id, CLEAR_EXCEPTION);
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
// OSR compilation succeeded.
DeoptimizationInputData* data =
DeoptimizationInputData::cast(result->deoptimization_data());
if (FLAG_trace_osr) {
PrintF("[OSR - entry, offset %d in optimized code]\n",
data->OsrPcOffset()->value());
}
ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
return result;
}
}
if (FLAG_trace_osr) {
PrintF("[OSR - attempt failed for ");
function->PrintName();
PrintF("]\n");
}
return Handle<Code>::null();
}
Handle<Code> Compiler::CompileForConcurrentOSR(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<Code> unoptimized(function->shared()->code(), isolate);
uint32_t pc_offset = CurrentPcOffset(isolate, function, unoptimized);
if (isolate->optimizing_compiler_thread()->
IsQueuedForOSR(function, pc_offset)) {
// Still waiting for the optimizing compiler thread to finish. Carry on.
if (FLAG_trace_osr) {
PrintF("[COSR - polling recompile tasks for ");
function->PrintName();
PrintF("]\n");
}
return Handle<Code>::null();
}
OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
FindReadyOSRCandidate(function, pc_offset);
if (compiler != NULL) {
BailoutId ast_id = compiler->info()->osr_ast_id();
if (FLAG_trace_osr) {
PrintF("[COSR - optimization complete for ");
function->PrintName();
PrintF(", restoring interrupt calls]\n");
}
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
// TODO(titzer): don't install the OSR code into the function.
Handle<Code> result = InstallOptimizedCode(compiler);
isolate->optimizing_compiler_thread()->RemoveStaleOSRCandidates();
if (result.is_null()) {
if (FLAG_trace_osr) {
PrintF("[COSR - optimization failed for ");
function->PrintName();
PrintF("]\n");
}
return Handle<Code>::null();
}
// Check the result matches our expectations, and don't use it otherwise.
if (result->kind() == Code::OPTIMIZED_FUNCTION) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(result->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
if (FLAG_trace_osr) {
PrintF("[COSR - entry at AST id %d, offset %d in optimized code]\n",
ast_id.ToInt(), data->OsrPcOffset()->value());
}
return result;
}
}
return Handle<Code>::null();
}
if (!IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
if (FLAG_trace_osr) {
PrintF("[COSR - ");
function->PrintName();
PrintF(" is unsuitable, restoring interrupt calls]\n");
}
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
return Handle<Code>::null();
}
if (!RecompileConcurrent(function, pc_offset)) {
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
}
return Handle<Code>::null();
}
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script) {
// Precondition: code has been parsed and scopes have been analyzed.
......
......@@ -627,10 +627,6 @@ class Compiler : public AllStatic {
static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
static Handle<Code> CompileForOnStackReplacement(Handle<JSFunction> function);
static Handle<Code> CompileForConcurrentOSR(Handle<JSFunction> function);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif
......
......@@ -1327,14 +1327,21 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Lookup and calculate pc offset.
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
__ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
__ SmiTag(edx);
// Pass both function and pc offset as arguments.
__ push(eax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ push(edx);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
Label skip;
......
......@@ -1000,13 +1000,22 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Lookup and calculate pc offset.
__ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Subu(a1, a1, a2);
__ SmiTag(a1);
// Pass both function and pc offset as arguments.
__ push(a0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ push(a1);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
// If the code object is null, just return to the unoptimized code.
......
......@@ -234,14 +234,18 @@ void OptimizingCompilerThread::QueueForOptimization(
OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
Handle<JSFunction> function, uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
for (int i = 0; i < ready_for_osr_.length(); i++) {
if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
osr_hits_++;
return ready_for_osr_.Remove(i);
OptimizingCompiler* result = NULL;
{ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
for (int i = 0; i < ready_for_osr_.length(); i++) {
if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
osr_hits_++;
result = ready_for_osr_.Remove(i);
break;
}
}
}
return NULL;
RemoveStaleOSRCandidates();
return result;
}
......@@ -258,6 +262,18 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
}
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
ASSERT(!IsOptimizerThread());
LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
for (int i = 0; i < osr_candidates_.length(); i++) {
if (*osr_candidates_[i]->info()->closure() == function) {
return true;
}
}
return false;
}
void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
ASSERT(!IsOptimizerThread());
LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
......
......@@ -71,9 +71,7 @@ class OptimizingCompilerThread : public Thread {
uint32_t osr_pc_offset);
bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
// Remove the oldest OSR candidates that are ready so that we
// only have |limit| left waiting.
void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
......@@ -96,6 +94,10 @@ class OptimizingCompilerThread : public Thread {
private:
enum StopFlag { CONTINUE, STOP, FLUSH };
// Remove the oldest OSR candidates that are ready so that we
// only have |limit| left waiting.
void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
void FlushInputQueue(bool restore_function_code);
void FlushOutputQueue(bool restore_function_code);
void CompileNext();
......
......@@ -139,7 +139,16 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF("]\n");
}
if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
if (FLAG_concurrent_osr &&
isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
// Do not attempt regular recompilation if we already queued this for OSR.
// TODO(yangguo): This is necessary so that we don't install optimized
// code on a function that is already optimized, since OSR and regular
// recompilation race. This goes away as soon as OSR becomes one-shot.
return;
}
ASSERT(!function->IsMarkedForInstallingRecompiledCode());
ASSERT(!function->IsInRecompileQueue());
function->MarkForConcurrentRecompilation();
......@@ -224,6 +233,8 @@ void RuntimeProfiler::OptimizeNow() {
isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
DisallowHeapAllocation no_gc;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
......
......@@ -8579,38 +8579,125 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
}
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
Handle<Code> unoptimized) {
// Keep track of whether we've succeeded in optimizing.
if (!unoptimized->optimizable()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
// deoptimized so that we are currently in an unoptimized activation.
// Check for optimized activations of this function.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized() && frame->function() == *function) return false;
}
return true;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
Handle<Code> unoptimized(function->shared()->code(), isolate);
#ifdef DEBUG
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
ASSERT_EQ(frame->function(), *function);
ASSERT_EQ(frame->LookupCode(), *unoptimized);
ASSERT(unoptimized->contains(frame->pc()));
ASSERT(pc_offset ==
static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
#endif // DEBUG
// We're not prepared to handle a function with arguments object.
ASSERT(!function->shared()->uses_arguments());
// If the optimization attempt succeeds, return the code object which
// the unoptimized code can jump into.
Handle<Code> code =
(FLAG_concurrent_recompilation && FLAG_concurrent_osr)
? Compiler::CompileForConcurrentOSR(function)
: Compiler::CompileForOnStackReplacement(function);
if (!code.is_null()) {
#if DEBUG
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
Handle<Code> result = Handle<Code>::null();
BailoutId ast_id = BailoutId::None();
if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
if (isolate->optimizing_compiler_thread()->
IsQueuedForOSR(function, pc_offset)) {
// Still waiting for the optimizing compiler thread to finish. Carry on.
if (FLAG_trace_osr) {
PrintF("[COSR - polling recompile tasks for ");
function->PrintName();
PrintF("]\n");
}
return NULL;
}
OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
FindReadyOSRCandidate(function, pc_offset);
if (compiler == NULL) {
if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
Compiler::RecompileConcurrent(function, pc_offset)) {
if (function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForConcurrentRecompilation()) {
// Prevent regular recompilation if we queue this for OSR.
// TODO(yangguo): remove this as soon as OSR becomes one-shot.
function->ReplaceCode(function->shared()->code());
}
return NULL;
}
// Fall through to the end in case of failure.
} else {
// TODO(titzer): don't install the OSR code into the function.
ast_id = compiler->info()->osr_ast_id();
result = Compiler::InstallOptimizedCode(compiler);
}
} else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
// Attempt OSR compilation.
result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
// Revert the patched interrupt now, regardless of whether OSR succeeds.
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
ASSERT(!BailoutId(data->OsrAstId()->value()).IsNone());
#endif
// TODO(titzer): this is a massive hack to make the deopt counts
// match. Fix heuristics for reenabling optimizations!
function->shared()->increment_deopt_count();
return *code;
} else {
if (function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForConcurrentRecompilation()) {
function->ReplaceCode(function->shared()->code());
DeoptimizationInputData::cast(result->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
if (FLAG_trace_osr) {
PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
ast_id.ToInt(), data->OsrPcOffset()->value());
}
// TODO(titzer): this is a massive hack to make the deopt counts
// match. Fix heuristics for reenabling optimizations!
function->shared()->increment_deopt_count();
return *result;
}
return NULL;
}
if (FLAG_trace_osr) {
PrintF("[OSR - optimization failed for ");
function->PrintName();
PrintF("]\n");
}
if (function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForConcurrentRecompilation()) {
function->ReplaceCode(function->shared()->code());
}
return NULL;
}
......
......@@ -100,7 +100,7 @@ namespace internal {
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(CompileForOnStackReplacement, 2, 1) \
F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInOldPointerSpace, 1, 1) \
......
......@@ -1408,14 +1408,21 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Lookup and calculate pc offset.
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
__ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
__ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
__ Integer32ToSmi(rdx, rdx);
// Pass both function and pc offset as arguments.
__ push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ push(rdx);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
Label skip;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment