Commit 5d5ed19f authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: [deoptimizer] Change deopt entries into builtins

Port 7f58ced7

Original Commit Message:

    While the overall goal of this commit is to change deoptimization
    entries into builtins, there are multiple related things happening:

    - Deoptimization entries, formerly stubs (i.e. Code objects generated
      at runtime, guaranteed to be immovable), have been converted into
      builtins. The major restriction is that we now need to preserve the
      kRootRegister, which was formerly used on most architectures to pass
      the deoptimization id. The solution differs based on platform.
    - Renamed DEOPT_ENTRIES_OR_FOR_TESTING code kind to FOR_TESTING.
    - Removed heap/ support for immovable Code generation.
    - Removed the DeserializerData class (no longer needed).
    - arm64: to preserve 4-byte deopt exits, introduced a new optimization
      in which the final jump to the deoptimization entry is generated
      once per Code object, and deopt exits can continue to emit a
      near-call.
    - arm,ia32,x64: change to fixed-size deopt exits. This reduces exit
      sizes by 4/8, 5, and 5 bytes, respectively.

    On arm the deopt exit size is reduced from 12 (or 16) bytes to 8 bytes
    by using the same strategy as on arm64 (recalc deopt id from return
    address). Before:

     e300a002       movw r10, <id>
     e59fc024       ldr ip, [pc, <entry offset>]
     e12fff3c       blx ip

    After:

     e59acb35       ldr ip, [r10, <entry offset>]
     e12fff3c       blx ip

    On arm64 the deopt exit size remains 4 bytes (or 8 bytes in same cases
    with CFI). Additionally, up to 4 builtin jumps are emitted per Code
    object (max 32 bytes added overhead per Code object). Before:

     9401cdae       bl <entry offset>

    After:

     # eager deoptimization entry jump.
     f95b1f50       ldr x16, [x26, <eager entry offset>]
     d61f0200       br x16
     # lazy deoptimization entry jump.
     f95b2b50       ldr x16, [x26, <lazy entry offset>]
     d61f0200       br x16
     # the deopt exit.
     97fffffc       bl <eager deoptimization entry jump offset>

    On ia32 the deopt exit size is reduced from 10 to 5 bytes. Before:

     bb00000000     mov ebx,<id>
     e825f5372b     call <entry>

    After:

     e8ea2256ba     call <entry>

    On x64 the deopt exit size is reduced from 12 to 7 bytes. Before:

     49c7c511000000 REX.W movq r13,<id>
     e8ea2f0700     call <entry>

    After:

     41ff9560360000 call [r13+<entry offset>]

R=jgruber@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com, miladfar@ca.ibm.com
BUG=
LOG=N

Change-Id: I49e4c92759043e46beb3c76c97823285b16feeef
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2486225Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#70637}
parent 89d9eb73
......@@ -3351,6 +3351,252 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ blr();
}
namespace {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit();
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ stfd(dreg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
__ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
__ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ Move(scratch, ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate));
__ StoreP(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// Get the bailout id is passed as r29 by the caller.
__ mr(r5, r29);
__ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r7.
__ mflr(r6);
__ addi(r7, sp, Operand(kSavedRegistersAreaSize));
__ sub(r7, fp, r7);
// Allocate a new deoptimizer object.
// Pass six arguments in r3 to r8.
__ PrepareCallCFunction(6, r8);
__ li(r3, Operand::Zero());
Label context_check;
__ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r4, &context_check);
__ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: bailout id already loaded.
// r6: code address or 0 already loaded.
// r7: Fp-to-sp delta.
__ Move(r8, ExternalReference::isolate_address(isolate));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register r3 and get the input
// frame descriptor pointer to r4 (deoptimizer->input_);
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r5, MemOperand(r4, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r7;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ li(zero, Operand(0));
__ stb(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ addi(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
__ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ add(r5, r5, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r7);
__ StoreP(r7, MemOperand(r6, 0));
__ addi(r6, r6, Operand(kSystemPointerSize));
__ bind(&pop_loop_header);
__ cmp(r5, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r3); // Preserve deoptimizer object across call.
// r3: deoptimizer object; r4: scratch.
__ PrepareCallCFunction(1, r4);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
__ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r7 = current "FrameDescription** output_",
// r4 = one past the last FrameDescription**.
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
__ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
__ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r5 = current FrameDescription*, r6 = loop index.
__ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
__ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header);
__ bind(&inner_push_loop);
__ addi(r6, r6, Operand(-sizeof(intptr_t)));
__ add(r9, r5, r6);
__ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
__ push(r9);
__ bind(&inner_loop_header);
__ cmpi(r6, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ addi(r7, r7, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(r7, r4);
__ blt(&outer_push_loop);
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ lfd(dreg, MemOperand(r4, src_offset));
}
// Push pc, and continuation from the last output frame.
__ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
__ push(r9);
__ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
__ push(r9);
// Restore the registers from the last output frame.
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
DCHECK(!(scratch.bit() & restored_regs));
__ mr(scratch, r5);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(scratch, offset));
}
}
}
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r7;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ li(one, Operand(1));
__ stb(one, MemOperand(is_iterable));
}
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
__ Jump(scratch);
}
__ stop();
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
#undef __
} // namespace internal
} // namespace v8
......
......@@ -3385,6 +3385,242 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ stop();
}
namespace {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
// Save all the registers onto the stack
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ lay(sp, MemOperand(sp, -kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ StoreDouble(dreg, MemOperand(sp, offset));
}
// Push all GPRs onto the stack
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate));
__ StoreP(fp, MemOperand(r1));
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
__ lgfi(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
// Get the address of the location in the code object (r5)(return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
__ LoadRR(r5, r14);
__ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
__ SubP(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
__ PrepareCallCFunction(6, r7);
__ LoadImmP(r2, Operand::Zero());
Label context_check;
__ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r3, &context_check);
__ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ Move(r7, ExternalReference::isolate_address(isolate));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register r2 and get the input
// frame descriptor pointer to r3 (deoptimizer->input_);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
// MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r6;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ lhi(zero, Operand(0));
__ StoreByte(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r4; that is
// the first stack slot not part of the input frame.
__ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
__ AddP(r4, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header, Label::kNear);
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
__ la(r5, MemOperand(r5, kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r2); // Preserve deoptimizer object across call.
// r2: deoptimizer object; r3: scratch.
__ PrepareCallCFunction(1, r3);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r2); // Restore deoptimizer object (class Deoptimizer).
__ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r6 = current "FrameDescription** output_",
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
__ bind(&outer_push_loop);
// Inner loop state: r4 = current FrameDescription*, r5 = loop index.
__ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
__ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header, Label::kNear);
__ bind(&inner_push_loop);
__ SubP(r5, Operand(sizeof(intptr_t)));
__ AddP(r8, r4, r5);
__ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
__ push(r8);
__ bind(&inner_loop_header);
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ AddP(r6, r6, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ ld(dreg, MemOperand(r3, src_offset));
}
// Push pc and continuation from the last output frame.
__ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
__ push(r8);
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
}
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r6;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ lhi(one, Operand(1));
__ StoreByte(one, MemOperand(is_iterable));
}
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
__ stop();
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
#undef __
} // namespace internal
......
......@@ -3247,16 +3247,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
SizeOfCodeGeneratedSince(&start_call));
}
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
Label* exit, DeoptimizeKind kind) {
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label*) {
LoadP(ip, MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r29 (we don't need the roots array from now on).
DCHECK_LE(deopt_id, 0xFFFF);
mov(r29, Operand(deopt_id));
Call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::ZeroExtByte(Register dst, Register src) {
......
......@@ -441,8 +441,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void JumpCodeObject(Register code_object) override;
void CallBuiltinByIndex(Register builtin_index) override;
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
DeoptimizeKind kind,
Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
......
......@@ -178,18 +178,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (options().inline_offheap_trampolines && target_is_builtin) {
Label skip;
if (cond != al) {
b(NegateCondition(cond), &skip, Label::kNear);
}
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
b(ip);
bind(&skip);
b(cond, ip);
return;
}
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
......@@ -243,7 +238,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (options().inline_offheap_trampolines && target_is_builtin) {
if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
......@@ -4536,15 +4531,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
Label* exit, DeoptimizeKind kind) {
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label*) {
LoadP(ip, MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r10 (we don't need the roots array from now on).
DCHECK_LE(deopt_id, 0xFFFF);
lghi(r10, Operand(deopt_id));
Call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { stop(); }
......
......@@ -153,8 +153,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
DeoptimizeKind kind,
Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
......
......@@ -1131,8 +1131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
CodeGenResult result = AssembleDeoptimizerCall(exit);
if (result != kSuccess) return result;
__ b(exit->label());
break;
}
case kArchRet:
......@@ -3915,7 +3914,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {
// __ EmitConstantPool();
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
......
......@@ -1588,8 +1588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
CodeGenResult result = AssembleDeoptimizerCall(exit);
if (result != kSuccess) return result;
__ b(exit->label());
break;
}
case kArchRet:
......@@ -4748,7 +4747,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
......
......@@ -11,238 +11,9 @@
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
const int Deoptimizer::kNonLazyDeoptExitSize = 0;
const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit();
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ stfd(dreg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
__ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
__ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate)));
__ StoreP(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// Get the bailout id is passed as r29 by the caller.
__ mr(r5, r29);
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r7.
__ mflr(r6);
__ addi(r7, sp, Operand(kSavedRegistersAreaSize));
__ sub(r7, fp, r7);
// Allocate a new deoptimizer object.
// Pass six arguments in r3 to r8.
__ PrepareCallCFunction(6, r8);
__ li(r3, Operand::Zero());
Label context_check;
__ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r4, &context_check);
__ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: bailout id already loaded.
// r6: code address or 0 already loaded.
// r7: Fp-to-sp delta.
__ mov(r8, Operand(ExternalReference::isolate_address(isolate)));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register r3 and get the input
// frame descriptor pointer to r4 (deoptimizer->input_);
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r5, MemOperand(r4, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r7;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ li(zero, Operand(0));
__ stb(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ addi(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
__ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ add(r5, r5, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r7);
__ StoreP(r7, MemOperand(r6, 0));
__ addi(r6, r6, Operand(kSystemPointerSize));
__ bind(&pop_loop_header);
__ cmp(r5, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r3); // Preserve deoptimizer object across call.
// r3: deoptimizer object; r4: scratch.
__ PrepareCallCFunction(1, r4);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
__ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r7 = current "FrameDescription** output_",
// r4 = one past the last FrameDescription**.
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
__ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
__ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r5 = current FrameDescription*, r6 = loop index.
__ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
__ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header);
__ bind(&inner_push_loop);
__ addi(r6, r6, Operand(-sizeof(intptr_t)));
__ add(r9, r5, r6);
__ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
__ push(r9);
__ bind(&inner_loop_header);
__ cmpi(r6, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ addi(r7, r7, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(r7, r4);
__ blt(&outer_push_loop);
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ lfd(dreg, MemOperand(r4, src_offset));
}
// Push pc, and continuation from the last output frame.
__ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
__ push(r9);
__ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
__ push(r9);
// Restore the registers from the last output frame.
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
DCHECK(!(scratch.bit() & restored_regs));
__ mr(scratch, r5);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(scratch, offset));
}
}
}
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r7;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ li(one, Operand(1));
__ stb(one, MemOperand(is_iterable));
}
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
__ Jump(scratch);
}
__ stop();
}
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
float float_val = static_cast<float>(double_registers_[n].get_scalar());
......@@ -264,6 +35,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
#undef __
} // namespace internal
} // namespace v8
......@@ -2,240 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
const int Deoptimizer::kNonLazyDeoptExitSize = 0;
const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
// Save all the registers onto the stack
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ lay(sp, MemOperand(sp, -kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ StoreDouble(dreg, MemOperand(sp, offset));
}
// Push all GPRs onto the stack
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ mov(r1, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate)));
__ StoreP(fp, MemOperand(r1));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// The bailout id is passed using r10
__ LoadRR(r4, r10);
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
// Get the address of the location in the code object (r5)(return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
__ LoadRR(r5, r14);
__ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
__ SubP(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
__ PrepareCallCFunction(6, r7);
__ LoadImmP(r2, Operand::Zero());
Label context_check;
__ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r3, &context_check);
__ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register r2 and get the input
// frame descriptor pointer to r3 (deoptimizer->input_);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
// MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r6;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ lhi(zero, Operand(0));
__ StoreByte(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r4; that is
// the first stack slot not part of the input frame.
__ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
__ AddP(r4, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header, Label::kNear);
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
__ la(r5, MemOperand(r5, kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r2); // Preserve deoptimizer object across call.
// r2: deoptimizer object; r3: scratch.
__ PrepareCallCFunction(1, r3);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r2); // Restore deoptimizer object (class Deoptimizer).
__ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r6 = current "FrameDescription** output_",
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
__ bind(&outer_push_loop);
// Inner loop state: r4 = current FrameDescription*, r5 = loop index.
__ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
__ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header, Label::kNear);
__ bind(&inner_push_loop);
__ SubP(r5, Operand(sizeof(intptr_t)));
__ AddP(r8, r4, r5);
__ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
__ push(r8);
__ bind(&inner_loop_header);
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ AddP(r6, r6, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ ld(dreg, MemOperand(r3, src_offset));
}
// Push pc and continuation from the last output frame.
__ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
__ push(r8);
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
}
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r6;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ lhi(one, Operand(1));
__ StoreByte(one, MemOperand(is_iterable));
}
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
__ stop();
}
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 6 + 2;
const int Deoptimizer::kLazyDeoptExitSize = 6 + 2;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
......@@ -257,7 +31,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
#undef __
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment