Fix lazy deoptimization at HInvokeFunction and enable target-recording call-function stub.

Changes the way we do lazy deoptimization:

1. For side-effect instructions, we insert the lazy-deopt call at
the following LLazyBailout instruction.

     CALL
     GAP
     LAZY-BAILOUT ==> lazy-deopt-call

2. For other instructions (StackCheck) we insert it right after the
instruction since the deopt targets an earlier deoptimization environment.

   STACK-CHECK
   GAP ==> lazy-deopt-call

The pc of the lazy-deopt call that will be patched in is recorded in the
deoptimization input data. Each Lithium instruction can have 0..n safepoints.
All safepoints get the deoptimization index of the associated LAZY-BAILOUT
instruction. On lazy deoptimization we use the return-pc to find the safepoint.
The safepoint tells us the deoptimization index, which in turn finds us the
PC where to insert the lazy-deopt-call.

Additional changes:
 * RegExpLiteral marked it as having side-effects so that it 
   gets an explicitlazy-bailout instruction (instead of
   treating it specially like stack-checks)
 * Enable target recording CallFunctionStub to achieve
   more inlining on optimized code.

BUG=v8:1789
TEST=jslint and uglify run without crashing, mjsunit/compiler/regress-lazy-deopt.js
Review URL: http://codereview.chromium.org/8492004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10006 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ee2cd428
...@@ -44,12 +44,6 @@ int Deoptimizer::patch_size() { ...@@ -44,12 +44,6 @@ int Deoptimizer::patch_size() {
} }
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Nothing to do. No new relocation information is written for lazy
// deoptimization on ARM.
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope; HandleScope scope;
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
...@@ -58,58 +52,37 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -58,58 +52,37 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code. // Get the optimized code.
Code* code = function->code(); Code* code = function->code();
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the // Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more. // code patching below, and is not needed any more.
code->InvalidateRelocation(); code->InvalidateRelocation();
// For each return after a safepoint insert an absolute call to the // For each LLazyBailout instruction insert a call to the corresponding
// corresponding deoptimization entry. // deoptimization entry.
unsigned last_pc_offset = 0; DeoptimizationInputData* deopt_data =
SafepointTable table(function->code()); DeoptimizationInputData::cast(code->deoptimization_data());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG #ifdef DEBUG
// Destroy the code which is not supposed to be run again. Address prev_call_address = NULL;
int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->bkpt(0);
}
#endif #endif
last_pc_offset = pc_offset; for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { if (deopt_data->Pc(i)->value() == -1) continue;
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry( Address call_address = code_start_address + deopt_data->Pc(i)->value();
deoptimization_index, Deoptimizer::LAZY); Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
last_pc_offset += gap_code_size; int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
RelocInfo::NONE); RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size()); ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(code->instruction_start() + last_pc_offset, CodePatcher patcher(call_address, call_size_in_words);
call_size_in_words); patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE); ASSERT(prev_call_address == NULL ||
last_pc_offset += call_size_in_bytes; call_address >= prev_call_address + patch_size());
} ASSERT(call_address + patch_size() <= code->instruction_end());
}
#ifdef DEBUG #ifdef DEBUG
// Destroy the code which is not supposed to be run again. prev_call_address = call_address;
int instructions =
(code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->bkpt(0);
}
#endif #endif
}
Isolate* isolate = code->GetIsolate(); Isolate* isolate = code->GetIsolate();
...@@ -131,11 +104,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -131,11 +104,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
......
This diff is collapsed.
...@@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED), status_(UNUSED),
deferred_(8), deferred_(8),
osr_pc_offset_(-1), osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
...@@ -112,7 +113,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -112,7 +113,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check);
// Parallel move support. // Parallel move support.
...@@ -215,10 +216,11 @@ class LCodeGen BASE_EMBEDDED { ...@@ -215,10 +216,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object); void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr, void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode); SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment); void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment); void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation, void AddToTranslation(Translation* translation,
...@@ -247,19 +249,16 @@ class LCodeGen BASE_EMBEDDED { ...@@ -247,19 +249,16 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers, void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind, Safepoint::Kind kind,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(int deoptimization_index); void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers, void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordPosition(int position); void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block); void EmitGoto(int block);
...@@ -302,6 +301,8 @@ class LCodeGen BASE_EMBEDDED { ...@@ -302,6 +301,8 @@ class LCodeGen BASE_EMBEDDED {
Address address; Address address;
}; };
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_; LChunk* const chunk_;
MacroAssembler* const masm_; MacroAssembler* const masm_;
CompilationInfo* const info_; CompilationInfo* const info_;
...@@ -318,6 +319,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -318,6 +319,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_; TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_; ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_; int osr_pc_offset_;
int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table // Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code. // itself is emitted at the end of the generated code.
......
...@@ -116,25 +116,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( ...@@ -116,25 +116,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Get the function and code from the frame. // Get the function and code from the frame.
JSFunction* function = JSFunction::cast(frame->function()); JSFunction* function = JSFunction::cast(frame->function());
Code* code = frame->LookupCode(); Code* code = frame->LookupCode();
Address code_start_address = code->instruction_start();
// Locate the deoptimization point in the code. As we are at a call the // Locate the deoptimization point in the code. As we are at a call the
// return address must be at a place in the code with deoptimization support. // return address must be at a place in the code with deoptimization support.
int deoptimization_index = Safepoint::kNoDeoptimizationIndex; SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
// Scope this as the safe point constructor will disallow allocation. int deoptimization_index = safepoint_entry.deoptimization_index();
{
SafepointTable table(code);
for (unsigned i = 0; i < table.length(); ++i) {
Address address = code_start_address + table.GetPcOffset(i);
if (address == frame->pc()) {
SafepointEntry safepoint_entry = table.GetEntry(i);
ASSERT(safepoint_entry.deoptimization_index() !=
Safepoint::kNoDeoptimizationIndex);
deoptimization_index = safepoint_entry.deoptimization_index();
break;
}
}
}
ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex); ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
// Always use the actual stack slots when calculating the fp to sp // Always use the actual stack slots when calculating the fp to sp
......
...@@ -4138,6 +4138,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> { ...@@ -4138,6 +4138,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
pattern_(pattern), pattern_(pattern),
flags_(flags) { flags_(flags) {
SetOperandAt(0, context); SetOperandAt(0, context);
SetAllSideEffects();
} }
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
......
...@@ -45,16 +45,6 @@ int Deoptimizer::patch_size() { ...@@ -45,16 +45,6 @@ int Deoptimizer::patch_size() {
} }
static void ZapCodeRange(Address start, Address end) {
#ifdef DEBUG
ASSERT(start <= end);
int size = end - start;
CodePatcher destroyer(start, size);
while (size-- > 0) destroyer.masm()->int3();
#endif
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Isolate* isolate = code->GetIsolate(); Isolate* isolate = code->GetIsolate();
HandleScope scope(isolate); HandleScope scope(isolate);
...@@ -62,20 +52,14 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { ...@@ -62,20 +52,14 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Compute the size of relocation information needed for the code // Compute the size of relocation information needed for the code
// patching in Deoptimizer::DeoptimizeFunction. // patching in Deoptimizer::DeoptimizeFunction.
int min_reloc_size = 0; int min_reloc_size = 0;
Address prev_reloc_address = code->instruction_start(); int prev_pc_offset = 0;
Address code_start_address = code->instruction_start(); DeoptimizationInputData* deopt_data =
SafepointTable table(*code); DeoptimizationInputData::cast(code->deoptimization_data());
for (unsigned i = 0; i < table.length(); ++i) { for (int i = 0; i < deopt_data->DeoptCount(); i++) {
Address curr_reloc_address = code_start_address + table.GetPcOffset(i); int pc_offset = deopt_data->Pc(i)->value();
ASSERT_GE(curr_reloc_address, prev_reloc_address); if (pc_offset == -1) continue;
SafepointEntry safepoint_entry = table.GetEntry(i); ASSERT_GE(pc_offset, prev_pc_offset);
int deoptimization_index = safepoint_entry.deoptimization_index(); int pc_delta = pc_offset - prev_pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
// The gap code is needed to get to the state expected at the
// bailout and we need to skip the call opcode to get to the
// address that needs reloc.
curr_reloc_address += safepoint_entry.gap_code_size() + 1;
int pc_delta = curr_reloc_address - prev_reloc_address;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
// if encodable with small pc delta encoding and up to 6 bytes // if encodable with small pc delta encoding and up to 6 bytes
// otherwise. // otherwise.
...@@ -84,8 +68,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { ...@@ -84,8 +68,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
} else { } else {
min_reloc_size += 6; min_reloc_size += 6;
} }
prev_reloc_address = curr_reloc_address; prev_pc_offset = pc_offset;
}
} }
// If the relocation information is not big enough we create a new // If the relocation information is not big enough we create a new
...@@ -150,41 +133,41 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -150,41 +133,41 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Address reloc_end_address = reloc_info->address() + reloc_info->Size(); Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address); RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
// For each return after a safepoint insert a call to the corresponding // For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry. Since the call is a relative encoding, write new // deoptimization entry.
// Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the // reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds). // existing code will not be used again (we zap it in debug builds).
SafepointTable table(code); //
Address prev_address = code_start_address; // Emit call to lazy deoptimization at all lazy deopt points.
for (unsigned i = 0; i < table.length(); ++i) { DeoptimizationInputData* deopt_data =
Address curr_address = code_start_address + table.GetPcOffset(i); DeoptimizationInputData::cast(code->deoptimization_data());
ASSERT_GE(curr_address, prev_address); #ifdef DEBUG
ZapCodeRange(prev_address, curr_address); Address prev_call_address = NULL;
#endif
SafepointEntry safepoint_entry = table.GetEntry(i); for (int i = 0; i < deopt_data->DeoptCount(); i++) {
int deoptimization_index = safepoint_entry.deoptimization_index(); if (deopt_data->Pc(i)->value() == -1) continue;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { // Patch lazy deoptimization entry.
// The gap code is needed to get to the state expected at the bailout. Address call_address = code_start_address + deopt_data->Pc(i)->value();
curr_address += safepoint_entry.gap_code_size(); CodePatcher patcher(call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
CodePatcher patcher(curr_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE); patcher.masm()->call(deopt_entry, RelocInfo::NONE);
// We use RUNTIME_ENTRY for deoptimization bailouts. // We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode. RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY, RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry), reinterpret_cast<intptr_t>(deopt_entry),
NULL); NULL);
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(), ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize); reloc_info->address() + ByteArray::kHeaderSize);
curr_address += patch_size(); ASSERT(prev_call_address == NULL ||
} call_address >= prev_call_address + patch_size());
prev_address = curr_address; ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
} }
ZapCodeRange(prev_address,
code_start_address + code->safepoint_table_offset());
// Move the relocation info to the beginning of the byte array. // Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos(); int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
...@@ -218,11 +201,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -218,11 +201,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
......
...@@ -2104,9 +2104,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { ...@@ -2104,9 +2104,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot. // Record call targets in unoptimized code, but not in the snapshot.
// TODO(1789): Reenable temporarily disabled recording CallFunctionStub bool record_call_target = !Serializer::enabled();
// when the issue is fixed.
bool record_call_target = false && !Serializer::enabled();
if (record_call_target) { if (record_call_target) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET); flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
} }
......
This diff is collapsed.
...@@ -61,7 +61,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -61,7 +61,7 @@ class LCodeGen BASE_EMBEDDED {
dynamic_frame_alignment_(false), dynamic_frame_alignment_(false),
deferred_(8), deferred_(8),
osr_pc_offset_(-1), osr_pc_offset_(-1),
deoptimization_reloc_size(), last_lazy_deopt_pc_(0),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
...@@ -101,7 +101,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -101,7 +101,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check);
// Parallel move support. // Parallel move support.
...@@ -144,7 +144,6 @@ class LCodeGen BASE_EMBEDDED { ...@@ -144,7 +144,6 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk_->graph(); }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, void EmitClassOfTest(Label* if_true,
Label* if_false, Label* if_false,
...@@ -210,10 +209,11 @@ class LCodeGen BASE_EMBEDDED { ...@@ -210,10 +209,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object); void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr, void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode); SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment); void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment); void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation, void AddToTranslation(Translation* translation,
...@@ -248,16 +248,13 @@ class LCodeGen BASE_EMBEDDED { ...@@ -248,16 +248,13 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers, void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind, Safepoint::Kind kind,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(int deoptimization_index); void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers, void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordPosition(int position); void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block); void EmitGoto(int block);
...@@ -291,6 +288,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -291,6 +288,7 @@ class LCodeGen BASE_EMBEDDED {
Register object, Register object,
Handle<Map> type, Handle<Map> type,
Handle<String> name); Handle<String> name);
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_; LChunk* const chunk_;
MacroAssembler* const masm_; MacroAssembler* const masm_;
...@@ -308,13 +306,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -308,13 +306,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_; TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_; ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_; int osr_pc_offset_;
int last_lazy_deopt_pc_;
struct DeoptimizationRelocSize {
int min_size;
int last_pc_offset;
};
DeoptimizationRelocSize deoptimization_reloc_size;
// Builder that keeps track of safepoints in the code. The table // Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code. // itself is emitted at the end of the generated code.
......
...@@ -366,17 +366,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> { ...@@ -366,17 +366,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
class LLazyBailout: public LTemplateInstruction<0, 0, 0> { class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public: public:
LLazyBailout() : gap_instructions_size_(0) { }
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
void set_gap_instructions_size(int gap_instructions_size) {
gap_instructions_size_ = gap_instructions_size;
}
int gap_instructions_size() { return gap_instructions_size_; }
private:
int gap_instructions_size_;
}; };
......
...@@ -454,6 +454,7 @@ class LEnvironment: public ZoneObject { ...@@ -454,6 +454,7 @@ class LEnvironment: public ZoneObject {
translation_index_(-1), translation_index_(-1),
ast_id_(ast_id), ast_id_(ast_id),
parameter_count_(parameter_count), parameter_count_(parameter_count),
pc_offset_(-1),
values_(value_count), values_(value_count),
representations_(value_count), representations_(value_count),
spilled_registers_(NULL), spilled_registers_(NULL),
...@@ -467,6 +468,7 @@ class LEnvironment: public ZoneObject { ...@@ -467,6 +468,7 @@ class LEnvironment: public ZoneObject {
int translation_index() const { return translation_index_; } int translation_index() const { return translation_index_; }
int ast_id() const { return ast_id_; } int ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; } int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
LOperand** spilled_registers() const { return spilled_registers_; } LOperand** spilled_registers() const { return spilled_registers_; }
LOperand** spilled_double_registers() const { LOperand** spilled_double_registers() const {
return spilled_double_registers_; return spilled_double_registers_;
...@@ -483,10 +485,13 @@ class LEnvironment: public ZoneObject { ...@@ -483,10 +485,13 @@ class LEnvironment: public ZoneObject {
return representations_[index].IsTagged(); return representations_[index].IsTagged();
} }
void Register(int deoptimization_index, int translation_index) { void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
ASSERT(!HasBeenRegistered()); ASSERT(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index; deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index; translation_index_ = translation_index;
pc_offset_ = pc_offset;
} }
bool HasBeenRegistered() const { bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex; return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
...@@ -507,6 +512,7 @@ class LEnvironment: public ZoneObject { ...@@ -507,6 +512,7 @@ class LEnvironment: public ZoneObject {
int translation_index_; int translation_index_;
int ast_id_; int ast_id_;
int parameter_count_; int parameter_count_;
int pc_offset_;
ZoneList<LOperand*> values_; ZoneList<LOperand*> values_;
ZoneList<Representation> representations_; ZoneList<Representation> representations_;
......
...@@ -7866,11 +7866,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { ...@@ -7866,11 +7866,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count); PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
if (0 == deopt_count) return; if (0 == deopt_count) return;
PrintF(out, "%6s %6s %6s %12s\n", "index", "ast id", "argc", PrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc",
FLAG_print_code_verbose ? "commands" : ""); FLAG_print_code_verbose ? "commands" : "");
for (int i = 0; i < deopt_count; i++) { for (int i = 0; i < deopt_count; i++) {
PrintF(out, "%6d %6d %6d", PrintF(out, "%6d %6d %6d %6d",
i, AstId(i)->value(), ArgumentsStackHeight(i)->value()); i,
AstId(i)->value(),
ArgumentsStackHeight(i)->value(),
Pc(i)->value());
if (!FLAG_print_code_verbose) { if (!FLAG_print_code_verbose) {
PrintF(out, "\n"); PrintF(out, "\n");
......
...@@ -3675,7 +3675,8 @@ class DeoptimizationInputData: public FixedArray { ...@@ -3675,7 +3675,8 @@ class DeoptimizationInputData: public FixedArray {
static const int kAstIdOffset = 0; static const int kAstIdOffset = 0;
static const int kTranslationIndexOffset = 1; static const int kTranslationIndexOffset = 1;
static const int kArgumentsStackHeightOffset = 2; static const int kArgumentsStackHeightOffset = 2;
static const int kDeoptEntrySize = 3; static const int kPcOffset = 3;
static const int kDeoptEntrySize = 4;
// Simple element accessors. // Simple element accessors.
#define DEFINE_ELEMENT_ACCESSORS(name, type) \ #define DEFINE_ELEMENT_ACCESSORS(name, type) \
...@@ -3711,6 +3712,7 @@ class DeoptimizationInputData: public FixedArray { ...@@ -3711,6 +3712,7 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ENTRY_ACCESSORS(AstId, Smi) DEFINE_ENTRY_ACCESSORS(AstId, Smi)
DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi) DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi) DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
DEFINE_ENTRY_ACCESSORS(Pc, Smi)
#undef DEFINE_ENTRY_ACCESSORS #undef DEFINE_ENTRY_ACCESSORS
......
...@@ -122,17 +122,20 @@ void Safepoint::DefinePointerRegister(Register reg) { ...@@ -122,17 +122,20 @@ void Safepoint::DefinePointerRegister(Register reg) {
Safepoint SafepointTableBuilder::DefineSafepoint( Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler, Safepoint::Kind kind, int arguments, Assembler* assembler,
int deoptimization_index) { Safepoint::Kind kind,
ASSERT(deoptimization_index != -1); int arguments,
Safepoint::DeoptMode deopt_mode) {
ASSERT(arguments >= 0); ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index; DeoptimizationInfo info;
pc_and_deoptimization_index.pc = assembler->pc_offset(); info.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index; info.arguments = arguments;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset(); info.has_doubles = (kind & Safepoint::kWithDoubles);
pc_and_deoptimization_index.arguments = arguments; deoptimization_info_.Add(info);
pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles); deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex);
deoptimization_info_.Add(pc_and_deoptimization_index); if (deopt_mode == Safepoint::kNoLazyDeopt) {
last_lazy_safepoint_ = deopt_index_list_.length();
}
indexes_.Add(new ZoneList<int>(8)); indexes_.Add(new ZoneList<int>(8));
registers_.Add((kind & Safepoint::kWithRegisters) registers_.Add((kind & Safepoint::kWithRegisters)
? new ZoneList<int>(4) ? new ZoneList<int>(4)
...@@ -141,6 +144,12 @@ Safepoint SafepointTableBuilder::DefineSafepoint( ...@@ -141,6 +144,12 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
} }
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
while (last_lazy_safepoint_ < deopt_index_list_.length()) {
deopt_index_list_[last_lazy_safepoint_++] = index;
}
}
unsigned SafepointTableBuilder::GetCodeOffset() const { unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_); ASSERT(emitted_);
return offset_; return offset_;
...@@ -173,11 +182,11 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) { ...@@ -173,11 +182,11 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
assembler->dd(length); assembler->dd(length);
assembler->dd(bytes_per_entry); assembler->dd(bytes_per_entry);
// Emit sorted table of pc offsets together with deoptimization indexes and // Emit sorted table of pc offsets together with deoptimization indexes.
// pc after gap information.
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
assembler->dd(deoptimization_info_[i].pc); assembler->dd(deoptimization_info_[i].pc);
assembler->dd(EncodeExceptPC(deoptimization_info_[i])); assembler->dd(EncodeExceptPC(deoptimization_info_[i],
deopt_index_list_[i]));
} }
// Emit table of bitmaps. // Emit table of bitmaps.
...@@ -222,35 +231,14 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) { ...@@ -222,35 +231,14 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
} }
uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) { uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
unsigned index = info.deoptimization_index; unsigned index) {
unsigned gap_size = info.pc_after_gap - info.pc;
uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index); uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
encoding |= SafepointEntry::ArgumentsField::encode(info.arguments); encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles); encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
return encoding; return encoding;
} }
int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
int result = 0;
if (!deoptimization_info_.is_empty()) {
unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
DeoptimizationInfo info = deoptimization_info_[i];
if (static_cast<int>(info.deoptimization_index) !=
Safepoint::kNoDeoptimizationIndex) {
if (previous_gap_end + limit > info.pc) {
result++;
}
previous_gap_end = info.pc_after_gap;
}
}
}
return result;
}
} } // namespace v8::internal } } // namespace v8::internal
...@@ -62,10 +62,20 @@ class SafepointEntry BASE_EMBEDDED { ...@@ -62,10 +62,20 @@ class SafepointEntry BASE_EMBEDDED {
return DeoptimizationIndexField::decode(info_); return DeoptimizationIndexField::decode(info_);
} }
int gap_code_size() const { static const int kArgumentsFieldBits = 3;
ASSERT(is_valid()); static const int kSaveDoublesFieldBits = 1;
return GapCodeSizeField::decode(info_); static const int kDeoptIndexBits =
} 32 - kArgumentsFieldBits - kSaveDoublesFieldBits;
class DeoptimizationIndexField:
public BitField<int, 0, kDeoptIndexBits> {}; // NOLINT
class ArgumentsField:
public BitField<unsigned,
kDeoptIndexBits,
kArgumentsFieldBits> {}; // NOLINT
class SaveDoublesField:
public BitField<bool,
kDeoptIndexBits + kArgumentsFieldBits,
kSaveDoublesFieldBits> { }; // NOLINT
int argument_count() const { int argument_count() const {
ASSERT(is_valid()); ASSERT(is_valid());
...@@ -85,27 +95,6 @@ class SafepointEntry BASE_EMBEDDED { ...@@ -85,27 +95,6 @@ class SafepointEntry BASE_EMBEDDED {
bool HasRegisters() const; bool HasRegisters() const;
bool HasRegisterAt(int reg_index) const; bool HasRegisterAt(int reg_index) const;
// Reserve 13 bits for the gap code size. On ARM a constant pool can be
// emitted when generating the gap code. The size of the const pool is less
// than what can be represented in 12 bits, so 13 bits gives room for having
// instructions before potentially emitting a constant pool.
static const int kGapCodeSizeBits = 13;
static const int kArgumentsFieldBits = 3;
static const int kSaveDoublesFieldBits = 1;
static const int kDeoptIndexBits =
32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
class DeoptimizationIndexField: public BitField<int,
kGapCodeSizeBits,
kDeoptIndexBits> {}; // NOLINT
class ArgumentsField: public BitField<unsigned,
kGapCodeSizeBits + kDeoptIndexBits,
kArgumentsFieldBits> {}; // NOLINT
class SaveDoublesField: public BitField<bool,
kGapCodeSizeBits + kDeoptIndexBits +
kArgumentsFieldBits,
kSaveDoublesFieldBits> { }; // NOLINT
private: private:
unsigned info_; unsigned info_;
uint8_t* bits_; uint8_t* bits_;
...@@ -186,6 +175,11 @@ class Safepoint BASE_EMBEDDED { ...@@ -186,6 +175,11 @@ class Safepoint BASE_EMBEDDED {
kWithRegistersAndDoubles = kWithRegisters | kWithDoubles kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
} Kind; } Kind;
enum DeoptMode {
kNoLazyDeopt,
kLazyDeopt
};
static const int kNoDeoptimizationIndex = static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1; (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
...@@ -206,9 +200,11 @@ class SafepointTableBuilder BASE_EMBEDDED { ...@@ -206,9 +200,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
public: public:
SafepointTableBuilder() SafepointTableBuilder()
: deoptimization_info_(32), : deoptimization_info_(32),
deopt_index_list_(32),
indexes_(32), indexes_(32),
registers_(32), registers_(32),
emitted_(false) { } emitted_(false),
last_lazy_safepoint_(0) { }
// Get the offset of the emitted safepoint table in the code. // Get the offset of the emitted safepoint table in the code.
unsigned GetCodeOffset() const; unsigned GetCodeOffset() const;
...@@ -217,50 +213,34 @@ class SafepointTableBuilder BASE_EMBEDDED { ...@@ -217,50 +213,34 @@ class SafepointTableBuilder BASE_EMBEDDED {
Safepoint DefineSafepoint(Assembler* assembler, Safepoint DefineSafepoint(Assembler* assembler,
Safepoint::Kind kind, Safepoint::Kind kind,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
// Update the last safepoint with the size of the code generated until the
// end of the gap following it.
void SetPcAfterGap(int pc) {
ASSERT(!deoptimization_info_.is_empty());
int index = deoptimization_info_.length() - 1;
deoptimization_info_[index].pc_after_gap = pc;
}
// Get the end pc offset of the last safepoint, including the code generated // Record deoptimization index for lazy deoptimization for the last
// until the end of the gap following it. // outstanding safepoints.
unsigned GetPcAfterGap() { void RecordLazyDeoptimizationIndex(int index);
int index = deoptimization_info_.length();
if (index == 0) return 0;
return deoptimization_info_[index - 1].pc_after_gap;
}
// Emit the safepoint table after the body. The number of bits per // Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes. // entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry); void Emit(Assembler* assembler, int bits_per_entry);
// Count the number of deoptimization points where the next
// following deoptimization point comes less than limit bytes
// after the end of this point's gap.
int CountShortDeoptimizationIntervals(unsigned limit);
private: private:
struct DeoptimizationInfo { struct DeoptimizationInfo {
unsigned pc; unsigned pc;
unsigned deoptimization_index;
unsigned pc_after_gap;
unsigned arguments; unsigned arguments;
bool has_doubles; bool has_doubles;
}; };
uint32_t EncodeExceptPC(const DeoptimizationInfo& info); uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
ZoneList<DeoptimizationInfo> deoptimization_info_; ZoneList<DeoptimizationInfo> deoptimization_info_;
ZoneList<unsigned> deopt_index_list_;
ZoneList<ZoneList<int>*> indexes_; ZoneList<ZoneList<int>*> indexes_;
ZoneList<ZoneList<int>*> registers_; ZoneList<ZoneList<int>*> registers_;
unsigned offset_; unsigned offset_;
bool emitted_; bool emitted_;
int last_lazy_safepoint_;
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder); DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
}; };
......
...@@ -42,67 +42,7 @@ const int Deoptimizer::table_entry_size_ = 10; ...@@ -42,67 +42,7 @@ const int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() { int Deoptimizer::patch_size() {
return MacroAssembler::kCallInstructionLength; return Assembler::kCallInstructionLength;
}
#ifdef DEBUG
// Overwrites code with int3 instructions.
static void ZapCodeRange(Address from, Address to) {
CHECK(from <= to);
int length = static_cast<int>(to - from);
CodePatcher destroyer(from, length);
while (length-- > 0) {
destroyer.masm()->int3();
}
}
#endif
// Iterate through the entries of a SafepointTable that corresponds to
// deoptimization points.
class SafepointTableDeoptimiztionEntryIterator {
public:
explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
: code_(code), table_(code), index_(-1), limit_(table_.length()) {
FindNextIndex();
}
SafepointEntry Next(Address* pc) {
if (index_ >= limit_) {
*pc = NULL;
return SafepointEntry(); // Invalid entry.
}
*pc = code_->instruction_start() + table_.GetPcOffset(index_);
SafepointEntry entry = table_.GetEntry(index_);
FindNextIndex();
return entry;
}
private:
void FindNextIndex() {
ASSERT(index_ < limit_);
while (++index_ < limit_) {
if (table_.GetEntry(index_).deoptimization_index() !=
Safepoint::kNoDeoptimizationIndex) {
return;
}
}
}
Code* code_;
SafepointTable table_;
// Index of next deoptimization entry. If negative after calling
// FindNextIndex, there are no more, and Next will return an invalid
// SafepointEntry.
int index_;
// Table length.
int limit_;
};
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// TODO(1276): Implement.
} }
...@@ -119,84 +59,34 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -119,84 +59,34 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// code patching below, and is not needed any more. // code patching below, and is not needed any more.
code->InvalidateRelocation(); code->InvalidateRelocation();
// For each return after a safepoint insert a absolute call to the // For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute // corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just // jump if space is short. The absolute jumps are put in a table just
// before the safepoint table (space was allocated there when the Code // before the safepoint table (space was allocated there when the Code
// object was created, if necessary). // object was created, if necessary).
Address instruction_start = function->code()->instruction_start(); Address instruction_start = function->code()->instruction_start();
Address jump_table_address =
instruction_start + function->code()->safepoint_table_offset();
#ifdef DEBUG #ifdef DEBUG
Address previous_pc = instruction_start; Address prev_call_address = NULL;
#endif
SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
Address entry_pc = NULL;
SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
while (current_entry.is_valid()) {
int gap_code_size = current_entry.gap_code_size();
unsigned deoptimization_index = current_entry.deoptimization_index();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
ZapCodeRange(previous_pc, entry_pc);
#endif #endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in. // Position where Call will be patched in.
Address call_address = entry_pc + gap_code_size; Address call_address = instruction_start + deopt_data->Pc(i)->value();
// End of call instruction, if using a direct call to a 64-bit address. // There is room enough to write a long call instruction because we pad
Address call_end_address = // LLazyBailout instructions with nops if necessary.
call_address + MacroAssembler::kCallInstructionLength;
// Find next deoptimization entry, if any.
Address next_pc = NULL;
SafepointEntry next_entry = deoptimizations.Next(&next_pc);
if (!next_entry.is_valid() || next_pc >= call_end_address) {
// Room enough to write a long call instruction.
CodePatcher patcher(call_address, Assembler::kCallInstructionLength); CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
RelocInfo::NONE); ASSERT(prev_call_address == NULL ||
#ifdef DEBUG call_address >= prev_call_address + patch_size());
previous_pc = call_end_address; ASSERT(call_address + patch_size() <= code->instruction_end());
#endif
} else {
// Not room enough for a long Call instruction. Write a short call
// instruction to a long jump placed elsewhere in the code.
#ifdef DEBUG #ifdef DEBUG
Address short_call_end_address = prev_call_address = call_address;
call_address + MacroAssembler::kShortCallInstructionLength;
#endif
ASSERT(next_pc >= short_call_end_address);
// Write jump in jump-table.
jump_table_address -= MacroAssembler::kJumpInstructionLength;
CodePatcher jump_patcher(jump_table_address,
MacroAssembler::kJumpInstructionLength);
jump_patcher.masm()->Jump(
GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
// Write call to jump at call_offset.
CodePatcher call_patcher(call_address,
MacroAssembler::kShortCallInstructionLength);
call_patcher.masm()->call(jump_table_address);
#ifdef DEBUG
previous_pc = short_call_end_address;
#endif #endif
} }
// Continue with next deoptimization entry.
current_entry = next_entry;
entry_pc = next_pc;
}
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
ZapCodeRange(previous_pc, jump_table_address);
#endif
Isolate* isolate = code->GetIsolate(); Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list. // Add the deoptimizing code to the list.
...@@ -217,11 +107,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -217,11 +107,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
......
This diff is collapsed.
...@@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED), status_(UNUSED),
deferred_(8), deferred_(8),
osr_pc_offset_(-1), osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
...@@ -98,7 +99,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -98,7 +99,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check);
// Parallel move support. // Parallel move support.
...@@ -135,7 +136,6 @@ class LCodeGen BASE_EMBEDDED { ...@@ -135,7 +136,6 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk_->graph(); }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, void EmitClassOfTest(Label* if_true,
Label* if_false, Label* if_false,
...@@ -200,10 +200,11 @@ class LCodeGen BASE_EMBEDDED { ...@@ -200,10 +200,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object); void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr, void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode, SafepointMode safepoint_mode,
int argc); int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment); void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment); void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation, void AddToTranslation(Translation* translation,
...@@ -237,16 +238,13 @@ class LCodeGen BASE_EMBEDDED { ...@@ -237,16 +238,13 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers, void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind, Safepoint::Kind kind,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(int deoptimization_index); void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers, void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index); Safepoint::DeoptMode mode);
void RecordPosition(int position); void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block); void EmitGoto(int block);
...@@ -292,6 +290,8 @@ class LCodeGen BASE_EMBEDDED { ...@@ -292,6 +290,8 @@ class LCodeGen BASE_EMBEDDED {
Address address; Address address;
}; };
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_; LChunk* const chunk_;
MacroAssembler* const masm_; MacroAssembler* const masm_;
CompilationInfo* const info_; CompilationInfo* const info_;
...@@ -308,6 +308,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -308,6 +308,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_; TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_; ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_; int osr_pc_offset_;
int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table // Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code. // itself is emitted at the end of the generated code.
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Test function.caller. // Test function.caller.
function A() {} function A() {}
...@@ -40,9 +42,10 @@ A.prototype.g = gee; ...@@ -40,9 +42,10 @@ A.prototype.g = gee;
var o = new A(); var o = new A();
for (var i=0; i<5000000; i++) { for (var i=0; i<5; i++) {
o.g(i); o.g(i);
} }
%OptimizeFunctionOnNextCall(o.g);
assertEquals(gee, o.g(0)); assertEquals(gee, o.g(0));
assertEquals(null, o.g(1)); assertEquals(null, o.g(1));
...@@ -53,9 +56,10 @@ function hej(x) { ...@@ -53,9 +56,10 @@ function hej(x) {
return o.g(x); return o.g(x);
} }
for (var j=0; j<5000000; j++) { for (var j=0; j<5; j++) {
hej(j); hej(j);
} }
%OptimizeFunctionOnNextCall(hej);
assertEquals(gee, hej(0)); assertEquals(gee, hej(0));
assertEquals(hej, hej(1)); assertEquals(hej, hej(1));
...@@ -66,8 +70,9 @@ function from_eval(x) { ...@@ -66,8 +70,9 @@ function from_eval(x) {
return o.g(x); return o.g(x);
} }
for (var j=0; j<5000000; j++) { for (var j=0; j<5; j++) {
from_eval(j); from_eval(j);
} }
%OptimizeFunctionOnNextCall(from_eval);
assertEquals(gee, from_eval(0)); assertEquals(gee, from_eval(0));
assertEquals(from_eval, from_eval(1)); assertEquals(from_eval, from_eval(1));
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Test lazy deoptimization after CallFunctionStub.
function foo() { return 1; }
function f(x, y) {
var a = [0];
if (x == 0) {
%DeoptimizeFunction(f);
return 1;
}
a[0] = %_CallFunction(null, x - 1, f);
return x >> a[0];
}
f(42);
f(42);
assertEquals(42, f(42));
%OptimizeFunctionOnNextCall(f);
assertEquals(42, f(42));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment