Commit 0c8cd463 authored by Jacob.Bramley@arm.com's avatar Jacob.Bramley@arm.com

ARM64: Restructure the L1 deopt jump table.

This restructures the L1 deopt jump table so that the base address of
the L2 table is only loaded once. This significantly reduces the size of
the generated code because only one big immediate needs to be loaded.

The total size of all L1 deopt tables generated during Octane is almost
halved in size, from about 1105kB to 584kB.

BUG=
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/305963010

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21608 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 19c71f9e
...@@ -832,51 +832,82 @@ bool LCodeGen::GenerateDeferredCode() { ...@@ -832,51 +832,82 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() { bool LCodeGen::GenerateDeoptJumpTable() {
Label needs_frame, restore_caller_doubles, call_deopt_entry;
if (deopt_jump_table_.length() > 0) { if (deopt_jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------"); Comment(";;; -------------------- Jump table --------------------");
} Address base = deopt_jump_table_[0]->address;
Label table_start;
__ bind(&table_start); UseScratchRegisterScope temps(masm());
Label needs_frame; Register entry_offset = temps.AcquireX();
for (int i = 0; i < deopt_jump_table_.length(); i++) {
int length = deopt_jump_table_.length();
for (int i = 0; i < length; i++) {
__ Bind(&deopt_jump_table_[i]->label); __ Bind(&deopt_jump_table_[i]->label);
Address entry = deopt_jump_table_[i]->address;
Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type; Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
Address entry = deopt_jump_table_[i]->address;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) { if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i); Comment(";;; jump table entry %d.", i);
} else { } else {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
} }
if (deopt_jump_table_[i]->needs_frame) {
ASSERT(!info()->saves_caller_doubles());
UseScratchRegisterScope temps(masm()); // Second-level deopt table entries are contiguous and small, so instead
Register stub_deopt_entry = temps.AcquireX(); // of loading the full, absolute address of each one, load the base
Register stub_marker = temps.AcquireX(); // address and add an immediate offset.
__ Mov(entry_offset, entry - base);
__ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry)); // The last entry can fall through into `call_deopt_entry`, avoiding a
if (needs_frame.is_bound()) { // branch.
__ B(&needs_frame); bool last_entry = (i + 1) == length;
} else {
__ Bind(&needs_frame); if (deopt_jump_table_[i]->needs_frame) {
ASSERT(!info()->saves_caller_doubles());
if (!needs_frame.is_bound()) {
// This variant of deopt can only be used with stubs. Since we don't // This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're // have a function pointer to install in the stack frame that we're
// building, install a special marker there instead. // building, install a special marker there instead.
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
UseScratchRegisterScope temps(masm());
Register stub_marker = temps.AcquireX();
__ Bind(&needs_frame);
__ Mov(stub_marker, Smi::FromInt(StackFrame::STUB)); __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
__ Push(lr, fp, cp, stub_marker); __ Push(lr, fp, cp, stub_marker);
__ Add(fp, __ StackPointer(), 2 * kPointerSize); __ Add(fp, __ StackPointer(), 2 * kPointerSize);
__ Call(stub_deopt_entry); if (!last_entry) __ B(&call_deopt_entry);
}
} else { } else {
if (info()->saves_caller_doubles()) { // Reuse the existing needs_frame code.
__ B(&needs_frame);
}
} else if (info()->saves_caller_doubles()) {
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
if (!restore_caller_doubles.is_bound()) {
__ Bind(&restore_caller_doubles);
RestoreCallerDoubles(); RestoreCallerDoubles();
if (!last_entry) __ B(&call_deopt_entry);
} else {
// Reuse the existing restore_caller_doubles code.
__ B(&restore_caller_doubles);
} }
__ Call(entry, RelocInfo::RUNTIME_ENTRY); } else {
// There is nothing special to do, so just continue to the second-level
// table.
if (!last_entry) __ B(&call_deopt_entry);
}
masm()->CheckConstPool(false, last_entry);
} }
masm()->CheckConstPool(false, false);
// Generate common code for calling the second-level deopt table.
Register deopt_entry = temps.AcquireX();
__ Bind(&call_deopt_entry);
__ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
RelocInfo::RUNTIME_ENTRY));
__ Add(deopt_entry, deopt_entry, entry_offset);
__ Call(deopt_entry);
} }
// Force constant pool emission at the end of the deopt jump table to make // Force constant pool emission at the end of the deopt jump table to make
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment