Commit 0648103e authored by ricow@chromium.org's avatar ricow@chromium.org

x64: Port OSR to the x64 platform.


Review URL: http://codereview.chromium.org/6515012

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6791 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e25f3baf
......@@ -601,7 +601,16 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
__ int3();
// For now, we are relying on the fact that Runtime::NotifyOSR
// doesn't do any garbage collection which allows us to save/restore
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ Pushad();
__ EnterInternalFrame();
__ CallRuntime(Runtime::kNotifyOSR, 0);
__ LeaveInternalFrame();
__ Popad();
__ ret(0);
}
......@@ -1406,7 +1415,58 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ int3();
// Get the loop depth of the stack guard check. This is recorded in
// a test(rax, depth) instruction right after the call.
Label stack_check;
__ movq(rbx, Operand(rsp, 0)); // return address
__ movzxbq(rbx, Operand(rbx, 1)); // depth
// Get the loop nesting level at which we allow OSR from the
// unoptimized code and check if we want to do OSR yet. If not we
// should perform a stack guard check so we can get interrupts while
// waiting for on-stack replacement.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
__ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
__ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
__ j(greater, &stack_check);
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
__ EnterInternalFrame();
__ push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
NearLabel skip;
__ SmiCompare(rax, Smi::FromInt(-1));
__ j(not_equal, &skip);
__ ret(0);
// If we decide not to perform on-stack replacement we perform a
// stack guard check to enable interrupts.
__ bind(&stack_check);
NearLabel ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
StackCheckStub stub;
__ TailCallStub(&stub);
__ Abort("Unreachable code: returned from tail call.");
__ bind(&ok);
__ ret(0);
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiToInteger32(rax, rax);
__ push(rax);
// Generate the code for doing the frame-to-frame translation using
// the deoptimizer infrastructure.
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
generator.Generate();
}
......
......@@ -203,14 +203,51 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
Address call_target_address = pc_after - kIntSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp rsp, <limit>
// jae ok
// call <stack guard>
// test rax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// cmp rsp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test rax, <loop nesting depth>
// ok:
//
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x05 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT(*(call_target_address - 3) == 0x90 && // nop
*(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x05; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
}
......
......@@ -2109,7 +2109,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
Abort("Unimplemented: %s", "DoCallRuntime");
CallRuntime(instr->function(), instr->arity(), instr);
}
......@@ -2741,7 +2741,19 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
Abort("Unimplemented: %s", "DoOsrEntry");
// This is a pseudo-instruction that ensures that the environment here is
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}
#undef __
......
......@@ -119,9 +119,6 @@ regress/regress-deopt-gc: SKIP
##############################################################################
[ $arch == x64 && $crankshaft ]
# BUG (1026) This test is currently flaky.
compiler/simple-osr: SKIP
# BUG (1094)
regress/regress-deopt-gc: SKIP
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment