Commit b3cbe51f authored by yangguo@chromium.org's avatar yangguo@chromium.org

Refactor interrupt check patching for OSR.

This is to prepare for speculative concurrent OSR. I'm planning to add
another builtin to patch to, to indicate a concurrent OSR.

R=titzer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/23608004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16425 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 95c7ae81
......@@ -101,12 +101,7 @@ static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
......@@ -125,12 +120,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
......@@ -150,10 +140,10 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
......@@ -164,17 +154,23 @@ bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return true;
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_code = NULL;
InterruptStub stub;
if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return false;
return NOT_PATCHED;
}
}
#endif // DEBUG
......
......@@ -1024,13 +1024,7 @@ void Compiler::RecompileConcurrent(Handle<JSFunction> closure) {
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
InterruptStub interrupt_stub;
Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertInterruptCode(shared->code(),
*interrupt_code,
*replacement_code);
Deoptimizer::RevertInterruptCode(isolate, shared->code());
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
......
......@@ -2578,9 +2578,17 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code) {
void Deoptimizer::PatchInterruptCode(Isolate* isolate,
Code* unoptimized_code) {
DisallowHeapAllocation no_gc;
// Get the interrupt stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
Code* interrupt_code = NULL;
InterruptStub interrupt_stub;
CHECK(interrupt_stub.FindCodeInCache(&interrupt_code, isolate));
Code* replacement_code =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
......@@ -2589,9 +2597,11 @@ void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
unoptimized_code,
back_edges.pc()));
PatchInterruptCodeAt(unoptimized_code,
back_edges.pc(),
interrupt_code,
replacement_code);
}
}
......@@ -2599,14 +2609,17 @@ void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
unoptimized_code->set_back_edges_patched_for_osr(true);
#ifdef DEBUG
Deoptimizer::VerifyInterruptCode(
unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
isolate, unoptimized_code, loop_nesting_level);
#endif // DEBUG
}
void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code) {
void Deoptimizer::RevertInterruptCode(Isolate* isolate,
Code* unoptimized_code) {
InterruptStub interrupt_stub;
Code* interrupt_code = *interrupt_stub.GetCode(isolate);
DisallowHeapAllocation no_gc;
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized_code->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
......@@ -2615,10 +2628,10 @@ void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
RevertInterruptCodeAt(unoptimized_code,
back_edges.pc(),
interrupt_code,
replacement_code);
ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
unoptimized_code,
back_edges.pc()));
RevertInterruptCodeAt(unoptimized_code, back_edges.pc(), interrupt_code);
}
}
......@@ -2626,16 +2639,14 @@ void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
#ifdef DEBUG
// Assert that none of the back edges are patched anymore.
Deoptimizer::VerifyInterruptCode(
unoptimized_code, interrupt_code, replacement_code, -1);
Deoptimizer::VerifyInterruptCode(isolate, unoptimized_code, -1);
#endif // DEBUG
}
#ifdef DEBUG
void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code,
void Deoptimizer::VerifyInterruptCode(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level) {
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
!back_edges.Done();
......@@ -2645,10 +2656,9 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
InterruptCodeIsPatched(unoptimized_code,
back_edges.pc(),
interrupt_code,
replacement_code));
GetInterruptPatchState(isolate,
unoptimized_code,
back_edges.pc()) != NOT_PATCHED);
}
}
#endif // DEBUG
......
......@@ -144,6 +144,11 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
enum InterruptPatchState {
NOT_PATCHED,
PATCHED_FOR_OSR
};
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
......@@ -231,40 +236,34 @@ class Deoptimizer : public Malloced {
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void PatchInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code);
static void PatchInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
static void RevertInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code);
static void RevertInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
Code* interrupt_code);
#ifdef DEBUG
static bool InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
Code* unoptimized_code,
Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
static void VerifyInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code,
static void VerifyInterruptCode(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
......
......@@ -200,12 +200,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
......@@ -221,12 +216,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
......@@ -241,23 +231,29 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT_EQ(replacement_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
return true;
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT_EQ(osr_builtin->entry(),
Assembler::target_address_at(call_target_address));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_code = NULL;
InterruptStub stub;
if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT_EQ(interrupt_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return false;
return NOT_PATCHED;
}
}
#endif // DEBUG
......
......@@ -101,12 +101,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
......@@ -123,12 +118,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
......@@ -143,23 +133,29 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(replacement_code->entry()));
return true;
reinterpret_cast<uint32_t>(osr_builtin->entry()));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_code = NULL;
InterruptStub stub;
if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(interrupt_code->entry()));
return false;
return NOT_PATCHED;
}
}
#endif // DEBUG
......
......@@ -177,18 +177,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
PrintF(" for on-stack replacement]\n");
}
// Get the interrupt stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
Code* interrupt_code = NULL;
InterruptStub interrupt_stub;
bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
Deoptimizer::PatchInterruptCode(
unoptimized_code, interrupt_code, replacement_code);
}
Deoptimizer::PatchInterruptCode(isolate_, shared->code());
}
......
......@@ -8662,12 +8662,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
function->PrintName();
PrintF("]\n");
}
InterruptStub interrupt_stub;
Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertInterruptCode(*unoptimized,
*interrupt_code,
*replacement_code);
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
// If the optimization attempt succeeded, return the AST id tagged as a
// smi. This tells the builtin that we need to translate the unoptimized
......
......@@ -105,12 +105,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
......@@ -126,12 +121,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
......@@ -146,23 +136,29 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
return true;
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT_EQ(osr_builtin->entry(),
Assembler::target_address_at(call_target_address));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_code = NULL;
InterruptStub stub;
if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT_EQ(interrupt_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return false;
return NOT_PATCHED;
}
}
#endif // DEBUG
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment