Fix lazy deoptimization at HInvokeFunction and enable target-recording call-function stub.

Changes the way we do lazy deoptimization:

1. For side-effect instructions, we insert the lazy-deopt call at
the following LLazyBailout instruction.

     CALL
     GAP
     LAZY-BAILOUT ==> lazy-deopt-call

2. For other instructions (StackCheck) we insert it right after the
instruction since the deopt targets an earlier deoptimization environment.

   STACK-CHECK
   GAP ==> lazy-deopt-call

The pc of the lazy-deopt call that will be patched in is recorded in the
deoptimization input data. Each Lithium instruction can have 0..n safepoints.
All safepoints get the deoptimization index of the associated LAZY-BAILOUT
instruction. On lazy deoptimization we use the return-pc to find the safepoint.
The safepoint tells us the deoptimization index, which in turn finds us the
PC where to insert the lazy-deopt-call.

Additional changes:
 * RegExpLiteral marked it as having side-effects so that it 
   gets an explicitlazy-bailout instruction (instead of
   treating it specially like stack-checks)
 * Enable target recording CallFunctionStub to achieve
   more inlining on optimized code.

BUG=v8:1789
TEST=jslint and uglify run without crashing, mjsunit/compiler/regress-lazy-deopt.js
Review URL: http://codereview.chromium.org/8492004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10006 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ee2cd428
......@@ -44,12 +44,6 @@ int Deoptimizer::patch_size() {
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Nothing to do. No new relocation information is written for lazy
// deoptimization on ARM.
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
......@@ -58,58 +52,37 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert an absolute call to the
// corresponding deoptimization entry.
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
CHECK(pc_offset >= last_pc_offset);
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->bkpt(0);
}
Address prev_call_address = NULL;
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
deoptimization_index, Deoptimizer::LAZY);
last_pc_offset += gap_code_size;
int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(code->instruction_start() + last_pc_offset,
call_size_in_words);
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
last_pc_offset += call_size_in_bytes;
}
}
CodePatcher patcher(call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
(code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->bkpt(0);
}
prev_call_address = call_address;
#endif
}
Isolate* isolate = code->GetIsolate();
......@@ -131,11 +104,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......
This diff is collapsed.
......@@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
......@@ -112,7 +113,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
......@@ -215,10 +216,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr,
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
......@@ -247,19 +249,16 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
......@@ -302,6 +301,8 @@ class LCodeGen BASE_EMBEDDED {
Address address;
};
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
......@@ -318,6 +319,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
......
......@@ -116,25 +116,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Get the function and code from the frame.
JSFunction* function = JSFunction::cast(frame->function());
Code* code = frame->LookupCode();
Address code_start_address = code->instruction_start();
// Locate the deoptimization point in the code. As we are at a call the
// return address must be at a place in the code with deoptimization support.
int deoptimization_index = Safepoint::kNoDeoptimizationIndex;
// Scope this as the safe point constructor will disallow allocation.
{
SafepointTable table(code);
for (unsigned i = 0; i < table.length(); ++i) {
Address address = code_start_address + table.GetPcOffset(i);
if (address == frame->pc()) {
SafepointEntry safepoint_entry = table.GetEntry(i);
ASSERT(safepoint_entry.deoptimization_index() !=
Safepoint::kNoDeoptimizationIndex);
deoptimization_index = safepoint_entry.deoptimization_index();
break;
}
}
}
SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
int deoptimization_index = safepoint_entry.deoptimization_index();
ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
// Always use the actual stack slots when calculating the fp to sp
......
......@@ -4138,6 +4138,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
pattern_(pattern),
flags_(flags) {
SetOperandAt(0, context);
SetAllSideEffects();
}
HValue* context() { return OperandAt(0); }
......
......@@ -45,16 +45,6 @@ int Deoptimizer::patch_size() {
}
static void ZapCodeRange(Address start, Address end) {
#ifdef DEBUG
ASSERT(start <= end);
int size = end - start;
CodePatcher destroyer(start, size);
while (size-- > 0) destroyer.masm()->int3();
#endif
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Isolate* isolate = code->GetIsolate();
HandleScope scope(isolate);
......@@ -62,20 +52,14 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Compute the size of relocation information needed for the code
// patching in Deoptimizer::DeoptimizeFunction.
int min_reloc_size = 0;
Address prev_reloc_address = code->instruction_start();
Address code_start_address = code->instruction_start();
SafepointTable table(*code);
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_reloc_address = code_start_address + table.GetPcOffset(i);
ASSERT_GE(curr_reloc_address, prev_reloc_address);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
// The gap code is needed to get to the state expected at the
// bailout and we need to skip the call opcode to get to the
// address that needs reloc.
curr_reloc_address += safepoint_entry.gap_code_size() + 1;
int pc_delta = curr_reloc_address - prev_reloc_address;
int prev_pc_offset = 0;
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
int pc_offset = deopt_data->Pc(i)->value();
if (pc_offset == -1) continue;
ASSERT_GE(pc_offset, prev_pc_offset);
int pc_delta = pc_offset - prev_pc_offset;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
// if encodable with small pc delta encoding and up to 6 bytes
// otherwise.
......@@ -84,8 +68,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
} else {
min_reloc_size += 6;
}
prev_reloc_address = curr_reloc_address;
}
prev_pc_offset = pc_offset;
}
// If the relocation information is not big enough we create a new
......@@ -150,41 +133,41 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
// For each return after a safepoint insert a call to the corresponding
// deoptimization entry. Since the call is a relative encoding, write new
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
// Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
SafepointTable table(code);
Address prev_address = code_start_address;
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_address = code_start_address + table.GetPcOffset(i);
ASSERT_GE(curr_address, prev_address);
ZapCodeRange(prev_address, curr_address);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
// The gap code is needed to get to the state expected at the bailout.
curr_address += safepoint_entry.gap_code_size();
CodePatcher patcher(curr_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
//
// Emit call to lazy deoptimization at all lazy deopt points.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
CodePatcher patcher(call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE);
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry),
NULL);
reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
curr_address += patch_size();
}
prev_address = curr_address;
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
ZapCodeRange(prev_address,
code_start_address + code->safepoint_table_offset());
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
......@@ -218,11 +201,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......
......@@ -2104,9 +2104,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
// TODO(1789): Reenable temporarily disabled recording CallFunctionStub
// when the issue is fixed.
bool record_call_target = false && !Serializer::enabled();
bool record_call_target = !Serializer::enabled();
if (record_call_target) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
}
......
This diff is collapsed.
......@@ -61,7 +61,7 @@ class LCodeGen BASE_EMBEDDED {
dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
......@@ -101,7 +101,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
......@@ -144,7 +144,6 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
......@@ -210,10 +209,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr,
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
......@@ -248,16 +248,13 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
......@@ -291,6 +288,7 @@ class LCodeGen BASE_EMBEDDED {
Register object,
Handle<Map> type,
Handle<String> name);
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_;
MacroAssembler* const masm_;
......@@ -308,13 +306,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
struct DeoptimizationRelocSize {
int min_size;
int last_pc_offset;
};
DeoptimizationRelocSize deoptimization_reloc_size;
int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
......
......@@ -366,17 +366,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
void set_gap_instructions_size(int gap_instructions_size) {
gap_instructions_size_ = gap_instructions_size;
}
int gap_instructions_size() { return gap_instructions_size_; }
private:
int gap_instructions_size_;
};
......
......@@ -454,6 +454,7 @@ class LEnvironment: public ZoneObject {
translation_index_(-1),
ast_id_(ast_id),
parameter_count_(parameter_count),
pc_offset_(-1),
values_(value_count),
representations_(value_count),
spilled_registers_(NULL),
......@@ -467,6 +468,7 @@ class LEnvironment: public ZoneObject {
int translation_index() const { return translation_index_; }
int ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
LOperand** spilled_registers() const { return spilled_registers_; }
LOperand** spilled_double_registers() const {
return spilled_double_registers_;
......@@ -483,10 +485,13 @@ class LEnvironment: public ZoneObject {
return representations_[index].IsTagged();
}
void Register(int deoptimization_index, int translation_index) {
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
ASSERT(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index;
pc_offset_ = pc_offset;
}
bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
......@@ -507,6 +512,7 @@ class LEnvironment: public ZoneObject {
int translation_index_;
int ast_id_;
int parameter_count_;
int pc_offset_;
ZoneList<LOperand*> values_;
ZoneList<Representation> representations_;
......
......@@ -7866,11 +7866,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
if (0 == deopt_count) return;
PrintF(out, "%6s %6s %6s %12s\n", "index", "ast id", "argc",
PrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc",
FLAG_print_code_verbose ? "commands" : "");
for (int i = 0; i < deopt_count; i++) {
PrintF(out, "%6d %6d %6d",
i, AstId(i)->value(), ArgumentsStackHeight(i)->value());
PrintF(out, "%6d %6d %6d %6d",
i,
AstId(i)->value(),
ArgumentsStackHeight(i)->value(),
Pc(i)->value());
if (!FLAG_print_code_verbose) {
PrintF(out, "\n");
......
......@@ -3675,7 +3675,8 @@ class DeoptimizationInputData: public FixedArray {
static const int kAstIdOffset = 0;
static const int kTranslationIndexOffset = 1;
static const int kArgumentsStackHeightOffset = 2;
static const int kDeoptEntrySize = 3;
static const int kPcOffset = 3;
static const int kDeoptEntrySize = 4;
// Simple element accessors.
#define DEFINE_ELEMENT_ACCESSORS(name, type) \
......@@ -3711,6 +3712,7 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ENTRY_ACCESSORS(AstId, Smi)
DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
DEFINE_ENTRY_ACCESSORS(Pc, Smi)
#undef DEFINE_ENTRY_ACCESSORS
......
......@@ -122,17 +122,20 @@ void Safepoint::DefinePointerRegister(Register reg) {
Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler, Safepoint::Kind kind, int arguments,
int deoptimization_index) {
ASSERT(deoptimization_index != -1);
Assembler* assembler,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = arguments;
pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
deoptimization_info_.Add(pc_and_deoptimization_index);
DeoptimizationInfo info;
info.pc = assembler->pc_offset();
info.arguments = arguments;
info.has_doubles = (kind & Safepoint::kWithDoubles);
deoptimization_info_.Add(info);
deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex);
if (deopt_mode == Safepoint::kNoLazyDeopt) {
last_lazy_safepoint_ = deopt_index_list_.length();
}
indexes_.Add(new ZoneList<int>(8));
registers_.Add((kind & Safepoint::kWithRegisters)
? new ZoneList<int>(4)
......@@ -141,6 +144,12 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
}
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
while (last_lazy_safepoint_ < deopt_index_list_.length()) {
deopt_index_list_[last_lazy_safepoint_++] = index;
}
}
unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_);
return offset_;
......@@ -173,11 +182,11 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
assembler->dd(length);
assembler->dd(bytes_per_entry);
// Emit sorted table of pc offsets together with deoptimization indexes and
// pc after gap information.
// Emit sorted table of pc offsets together with deoptimization indexes.
for (int i = 0; i < length; i++) {
assembler->dd(deoptimization_info_[i].pc);
assembler->dd(EncodeExceptPC(deoptimization_info_[i]));
assembler->dd(EncodeExceptPC(deoptimization_info_[i],
deopt_index_list_[i]));
}
// Emit table of bitmaps.
......@@ -222,35 +231,14 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
}
uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
unsigned index = info.deoptimization_index;
unsigned gap_size = info.pc_after_gap - info.pc;
uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
unsigned index) {
uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
return encoding;
}
int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
int result = 0;
if (!deoptimization_info_.is_empty()) {
unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
DeoptimizationInfo info = deoptimization_info_[i];
if (static_cast<int>(info.deoptimization_index) !=
Safepoint::kNoDeoptimizationIndex) {
if (previous_gap_end + limit > info.pc) {
result++;
}
previous_gap_end = info.pc_after_gap;
}
}
}
return result;
}
} } // namespace v8::internal
......@@ -62,10 +62,20 @@ class SafepointEntry BASE_EMBEDDED {
return DeoptimizationIndexField::decode(info_);
}
int gap_code_size() const {
ASSERT(is_valid());
return GapCodeSizeField::decode(info_);
}
static const int kArgumentsFieldBits = 3;
static const int kSaveDoublesFieldBits = 1;
static const int kDeoptIndexBits =
32 - kArgumentsFieldBits - kSaveDoublesFieldBits;
class DeoptimizationIndexField:
public BitField<int, 0, kDeoptIndexBits> {}; // NOLINT
class ArgumentsField:
public BitField<unsigned,
kDeoptIndexBits,
kArgumentsFieldBits> {}; // NOLINT
class SaveDoublesField:
public BitField<bool,
kDeoptIndexBits + kArgumentsFieldBits,
kSaveDoublesFieldBits> { }; // NOLINT
int argument_count() const {
ASSERT(is_valid());
......@@ -85,27 +95,6 @@ class SafepointEntry BASE_EMBEDDED {
bool HasRegisters() const;
bool HasRegisterAt(int reg_index) const;
// Reserve 13 bits for the gap code size. On ARM a constant pool can be
// emitted when generating the gap code. The size of the const pool is less
// than what can be represented in 12 bits, so 13 bits gives room for having
// instructions before potentially emitting a constant pool.
static const int kGapCodeSizeBits = 13;
static const int kArgumentsFieldBits = 3;
static const int kSaveDoublesFieldBits = 1;
static const int kDeoptIndexBits =
32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
class DeoptimizationIndexField: public BitField<int,
kGapCodeSizeBits,
kDeoptIndexBits> {}; // NOLINT
class ArgumentsField: public BitField<unsigned,
kGapCodeSizeBits + kDeoptIndexBits,
kArgumentsFieldBits> {}; // NOLINT
class SaveDoublesField: public BitField<bool,
kGapCodeSizeBits + kDeoptIndexBits +
kArgumentsFieldBits,
kSaveDoublesFieldBits> { }; // NOLINT
private:
unsigned info_;
uint8_t* bits_;
......@@ -186,6 +175,11 @@ class Safepoint BASE_EMBEDDED {
kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
} Kind;
enum DeoptMode {
kNoLazyDeopt,
kLazyDeopt
};
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
......@@ -206,9 +200,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
public:
SafepointTableBuilder()
: deoptimization_info_(32),
deopt_index_list_(32),
indexes_(32),
registers_(32),
emitted_(false) { }
emitted_(false),
last_lazy_safepoint_(0) { }
// Get the offset of the emitted safepoint table in the code.
unsigned GetCodeOffset() const;
......@@ -217,50 +213,34 @@ class SafepointTableBuilder BASE_EMBEDDED {
Safepoint DefineSafepoint(Assembler* assembler,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
// Update the last safepoint with the size of the code generated until the
// end of the gap following it.
void SetPcAfterGap(int pc) {
ASSERT(!deoptimization_info_.is_empty());
int index = deoptimization_info_.length() - 1;
deoptimization_info_[index].pc_after_gap = pc;
}
Safepoint::DeoptMode mode);
// Get the end pc offset of the last safepoint, including the code generated
// until the end of the gap following it.
unsigned GetPcAfterGap() {
int index = deoptimization_info_.length();
if (index == 0) return 0;
return deoptimization_info_[index - 1].pc_after_gap;
}
// Record deoptimization index for lazy deoptimization for the last
// outstanding safepoints.
void RecordLazyDeoptimizationIndex(int index);
// Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry);
// Count the number of deoptimization points where the next
// following deoptimization point comes less than limit bytes
// after the end of this point's gap.
int CountShortDeoptimizationIntervals(unsigned limit);
private:
struct DeoptimizationInfo {
unsigned pc;
unsigned deoptimization_index;
unsigned pc_after_gap;
unsigned arguments;
bool has_doubles;
};
uint32_t EncodeExceptPC(const DeoptimizationInfo& info);
uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
ZoneList<DeoptimizationInfo> deoptimization_info_;
ZoneList<unsigned> deopt_index_list_;
ZoneList<ZoneList<int>*> indexes_;
ZoneList<ZoneList<int>*> registers_;
unsigned offset_;
bool emitted_;
int last_lazy_safepoint_;
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
......
......@@ -42,67 +42,7 @@ const int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
return MacroAssembler::kCallInstructionLength;
}
#ifdef DEBUG
// Overwrites code with int3 instructions.
static void ZapCodeRange(Address from, Address to) {
CHECK(from <= to);
int length = static_cast<int>(to - from);
CodePatcher destroyer(from, length);
while (length-- > 0) {
destroyer.masm()->int3();
}
}
#endif
// Iterate through the entries of a SafepointTable that corresponds to
// deoptimization points.
class SafepointTableDeoptimiztionEntryIterator {
public:
explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
: code_(code), table_(code), index_(-1), limit_(table_.length()) {
FindNextIndex();
}
SafepointEntry Next(Address* pc) {
if (index_ >= limit_) {
*pc = NULL;
return SafepointEntry(); // Invalid entry.
}
*pc = code_->instruction_start() + table_.GetPcOffset(index_);
SafepointEntry entry = table_.GetEntry(index_);
FindNextIndex();
return entry;
}
private:
void FindNextIndex() {
ASSERT(index_ < limit_);
while (++index_ < limit_) {
if (table_.GetEntry(index_).deoptimization_index() !=
Safepoint::kNoDeoptimizationIndex) {
return;
}
}
}
Code* code_;
SafepointTable table_;
// Index of next deoptimization entry. If negative after calling
// FindNextIndex, there are no more, and Next will return an invalid
// SafepointEntry.
int index_;
// Table length.
int limit_;
};
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// TODO(1276): Implement.
return Assembler::kCallInstructionLength;
}
......@@ -119,84 +59,34 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert a absolute call to the
// For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
// before the safepoint table (space was allocated there when the Code
// object was created, if necessary).
Address instruction_start = function->code()->instruction_start();
Address jump_table_address =
instruction_start + function->code()->safepoint_table_offset();
#ifdef DEBUG
Address previous_pc = instruction_start;
#endif
SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
Address entry_pc = NULL;
SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
while (current_entry.is_valid()) {
int gap_code_size = current_entry.gap_code_size();
unsigned deoptimization_index = current_entry.deoptimization_index();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
ZapCodeRange(previous_pc, entry_pc);
Address prev_call_address = NULL;
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
Address call_address = entry_pc + gap_code_size;
// End of call instruction, if using a direct call to a 64-bit address.
Address call_end_address =
call_address + MacroAssembler::kCallInstructionLength;
// Find next deoptimization entry, if any.
Address next_pc = NULL;
SafepointEntry next_entry = deoptimizations.Next(&next_pc);
if (!next_entry.is_valid() || next_pc >= call_end_address) {
// Room enough to write a long call instruction.
Address call_address = instruction_start + deopt_data->Pc(i)->value();
// There is room enough to write a long call instruction because we pad
// LLazyBailout instructions with nops if necessary.
CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
#ifdef DEBUG
previous_pc = call_end_address;
#endif
} else {
// Not room enough for a long Call instruction. Write a short call
// instruction to a long jump placed elsewhere in the code.
patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
Address short_call_end_address =
call_address + MacroAssembler::kShortCallInstructionLength;
#endif
ASSERT(next_pc >= short_call_end_address);
// Write jump in jump-table.
jump_table_address -= MacroAssembler::kJumpInstructionLength;
CodePatcher jump_patcher(jump_table_address,
MacroAssembler::kJumpInstructionLength);
jump_patcher.masm()->Jump(
GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
// Write call to jump at call_offset.
CodePatcher call_patcher(call_address,
MacroAssembler::kShortCallInstructionLength);
call_patcher.masm()->call(jump_table_address);
#ifdef DEBUG
previous_pc = short_call_end_address;
prev_call_address = call_address;
#endif
}
// Continue with next deoptimization entry.
current_entry = next_entry;
entry_pc = next_pc;
}
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
ZapCodeRange(previous_pc, jump_table_address);
#endif
Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
......@@ -217,11 +107,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......
This diff is collapsed.
......@@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
......@@ -98,7 +99,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
......@@ -135,7 +136,6 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
......@@ -200,10 +200,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr,
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
......@@ -237,16 +238,13 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
Safepoint::DeoptMode mode);
void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
......@@ -292,6 +290,8 @@ class LCodeGen BASE_EMBEDDED {
Address address;
};
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
......@@ -308,6 +308,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
......
......@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Test function.caller.
function A() {}
......@@ -40,9 +42,10 @@ A.prototype.g = gee;
var o = new A();
for (var i=0; i<5000000; i++) {
for (var i=0; i<5; i++) {
o.g(i);
}
%OptimizeFunctionOnNextCall(o.g);
assertEquals(gee, o.g(0));
assertEquals(null, o.g(1));
......@@ -53,9 +56,10 @@ function hej(x) {
return o.g(x);
}
for (var j=0; j<5000000; j++) {
for (var j=0; j<5; j++) {
hej(j);
}
%OptimizeFunctionOnNextCall(hej);
assertEquals(gee, hej(0));
assertEquals(hej, hej(1));
......@@ -66,8 +70,9 @@ function from_eval(x) {
return o.g(x);
}
for (var j=0; j<5000000; j++) {
for (var j=0; j<5; j++) {
from_eval(j);
}
%OptimizeFunctionOnNextCall(from_eval);
assertEquals(gee, from_eval(0));
assertEquals(from_eval, from_eval(1));
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Test lazy deoptimization after CallFunctionStub.
function foo() { return 1; }
function f(x, y) {
var a = [0];
if (x == 0) {
%DeoptimizeFunction(f);
return 1;
}
a[0] = %_CallFunction(null, x - 1, f);
return x >> a[0];
}
f(42);
f(42);
assertEquals(42, f(42));
%OptimizeFunctionOnNextCall(f);
assertEquals(42, f(42));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment