Commit e5f1b968 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: Reland "[builtins] Introduce further constant & external reference indirections"

Port 3f99a376

Original Commit Message:

    This is a reland of f5d30851

    Original change's description:
    > [builtins] Introduce further constant & external reference indirections
    >
    > This introduces further indirections for embedded constants and
    > external references for builtins generated by the macro-assembler.
    > The used mechanisms (LookupConstant and LookupExternalReference) are
    > identical to what we already use in CSA.
    >
    > Almost all builtins are now isolate-independent in both release and
    > debug modes. snapshot_blob.bin is roughly 670K smaller in embedded
    > builds vs. non-embedded builds, while libv8.so is roughly 280K larger.
    >
    > Bug: v8:6666
    > Change-Id: I7a6c2193ef5a763e6cf7543dd51597d6fff6c110
    > Reviewed-on: https://chromium-review.googlesource.com/1006581
    > Commit-Queue: Jakob Gruber <jgruber@chromium.org>
    > Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
    > Cr-Commit-Position: refs/heads/master@{#52810}

R=jgruber@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I037faebce37a866091dc35e04500790591292622
Reviewed-on: https://chromium-review.googlesource.com/1031397Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#52855}
parent 6379e2a4
...@@ -19,7 +19,7 @@ namespace internal { ...@@ -19,7 +19,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address, void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) { ExitFrameType exit_frame_type) {
__ mov(r15, Operand(ExternalReference::Create(address))); __ Move(r15, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) { if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
...@@ -412,10 +412,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, ...@@ -412,10 +412,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
} }
__ bind(&do_throw); __ bind(&do_throw);
{ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
...@@ -630,7 +627,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -630,7 +627,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r6: argc // r6: argc
// r7: argv // r7: argv
// r0,r8-r9, cp may be clobbered // r0,r8-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm); {
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ InitializeRootRegister();
}
// Enter an internal frame. // Enter an internal frame.
{ {
...@@ -639,7 +640,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -639,7 +640,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate). // Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create( ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate()); IsolateAddressId::kContextAddress, masm->isolate());
__ mov(cp, Operand(context_address)); __ Move(cp, context_address);
__ LoadP(cp, MemOperand(cp)); __ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack. // Push the function and the receiver onto the stack.
...@@ -851,8 +852,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -851,8 +852,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register scratch2 = bytecode; Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode)); bytecode));
__ mov(bytecode_size_table, __ Move(bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address())); ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode. // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide; Label process_bytecode, extra_wide;
...@@ -1027,9 +1028,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1027,9 +1028,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// handler at the current bytecode offset. // handler at the current bytecode offset.
Label do_dispatch; Label do_dispatch;
__ bind(&do_dispatch); __ bind(&do_dispatch);
__ mov(kInterpreterDispatchTableRegister, __ Move(
Operand(ExternalReference::interpreter_dispatch_table_address( kInterpreterDispatchTableRegister,
masm->isolate()))); ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister, __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister)); kInterpreterBytecodeOffsetRegister));
__ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2)); __ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
...@@ -1257,9 +1258,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1257,9 +1258,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ mtlr(r0); __ mtlr(r0);
// Initialize the dispatch table register. // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister, __ Move(
Operand(ExternalReference::interpreter_dispatch_table_address( kInterpreterDispatchTableRegister,
masm->isolate()))); ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame. // Get the bytecode array pointer from the frame.
__ LoadP(kInterpreterBytecodeArrayRegister, __ LoadP(kInterpreterBytecodeArrayRegister,
......
...@@ -19,7 +19,7 @@ namespace internal { ...@@ -19,7 +19,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address, void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) { ExitFrameType exit_frame_type) {
__ mov(r7, Operand(ExternalReference::Create(address))); __ Move(r7, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) { if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
...@@ -411,10 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, ...@@ -411,10 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ b(&use_receiver); __ b(&use_receiver);
} }
__ bind(&do_throw); __ bind(&do_throw);
{ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
...@@ -632,7 +629,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -632,7 +629,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5: argc // r5: argc
// r6: argv // r6: argv
// r0,r7-r9, cp may be clobbered // r0,r7-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm); {
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ InitializeRootRegister();
}
// Enter an internal frame. // Enter an internal frame.
{ {
...@@ -642,7 +643,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -642,7 +643,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate). // Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create( ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate()); IsolateAddressId::kContextAddress, masm->isolate());
__ mov(cp, Operand(context_address)); __ Move(cp, context_address);
__ LoadP(cp, MemOperand(cp)); __ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack. // Push the function and the receiver onto the stack.
...@@ -861,8 +862,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -861,8 +862,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register scratch2 = bytecode; Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode)); bytecode));
__ mov(bytecode_size_table, __ Move(bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address())); ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode. // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide; Label process_bytecode, extra_wide;
...@@ -1263,9 +1264,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1263,9 +1264,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Code::kHeaderSize - kHeapObjectTag)); Code::kHeaderSize - kHeapObjectTag));
// Initialize the dispatch table register. // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister, __ Move(
Operand(ExternalReference::interpreter_dispatch_table_address( kInterpreterDispatchTableRegister,
masm->isolate()))); ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame. // Get the bytecode array pointer from the frame.
__ LoadP(kInterpreterBytecodeArrayRegister, __ LoadP(kInterpreterBytecodeArrayRegister,
......
...@@ -258,22 +258,25 @@ void JSEntryStub::Generate(MacroAssembler* masm) { ...@@ -258,22 +258,25 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Called from C // Called from C
__ function_descriptor(); __ function_descriptor();
ProfileEntryHookStub::MaybeCallEntryHook(masm); {
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// PPC LINUX ABI: // PPC LINUX ABI:
// preserve LR in pre-reserved slot in caller's frame // preserve LR in pre-reserved slot in caller's frame
__ mflr(r0); __ mflr(r0);
__ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
// Save callee saved registers on the stack. // Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved); __ MultiPush(kCalleeSaved);
// Save callee-saved double registers. // Save callee-saved double registers.
__ MultiPushDoubles(kCalleeSavedDoubles); __ MultiPushDoubles(kCalleeSavedDoubles);
// Set up the reserved register for 0.0. // Set up the reserved register for 0.0.
__ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0); __ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
__ InitializeRootRegister(); __ InitializeRootRegister();
}
// Push a frame with special values setup to mark it as an entry frame. // Push a frame with special values setup to mark it as an entry frame.
// r3: code entry // r3: code entry
......
This diff is collapsed.
...@@ -220,6 +220,9 @@ class TurboAssembler : public Assembler { ...@@ -220,6 +220,9 @@ class TurboAssembler : public Assembler {
void LoadPC(Register dst); void LoadPC(Register dst);
void ComputeCodeStartAddress(Register dst); void ComputeCodeStartAddress(Register dst);
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
void StoreDouble(DoubleRegister src, const MemOperand& mem, void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg); Register scratch = no_reg);
void StoreDoubleU(DoubleRegister src, const MemOperand& mem, void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
...@@ -432,6 +435,13 @@ class TurboAssembler : public Assembler { ...@@ -432,6 +435,13 @@ class TurboAssembler : public Assembler {
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift); Register src_high, uint32_t shift);
#endif #endif
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
// Returns the size of a call in instructions. Note, the value returned is // Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between // only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call. // checking the call size and emitting the actual call.
...@@ -504,6 +514,7 @@ class TurboAssembler : public Assembler { ...@@ -504,6 +514,7 @@ class TurboAssembler : public Assembler {
// Register move. May do nothing if the registers are identical. // Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); } void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value); void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src); void Move(DoubleRegister dst, DoubleRegister src);
...@@ -669,6 +680,7 @@ class TurboAssembler : public Assembler { ...@@ -669,6 +680,7 @@ class TurboAssembler : public Assembler {
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool has_frame_ = false; bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* const isolate_; Isolate* const isolate_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_; Handle<HeapObject> code_object_;
......
...@@ -241,70 +241,73 @@ void JSEntryStub::Generate(MacroAssembler* masm) { ...@@ -241,70 +241,73 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit; Label invoke, handler_entry, exit;
ProfileEntryHookStub::MaybeCallEntryHook(masm); {
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// saving floating point registers // saving floating point registers
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
// 64bit ABI requires f8 to f15 be saved // 64bit ABI requires f8 to f15 be saved
__ lay(sp, MemOperand(sp, -8 * kDoubleSize)); __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
__ std(d8, MemOperand(sp)); __ std(d8, MemOperand(sp));
__ std(d9, MemOperand(sp, 1 * kDoubleSize)); __ std(d9, MemOperand(sp, 1 * kDoubleSize));
__ std(d10, MemOperand(sp, 2 * kDoubleSize)); __ std(d10, MemOperand(sp, 2 * kDoubleSize));
__ std(d11, MemOperand(sp, 3 * kDoubleSize)); __ std(d11, MemOperand(sp, 3 * kDoubleSize));
__ std(d12, MemOperand(sp, 4 * kDoubleSize)); __ std(d12, MemOperand(sp, 4 * kDoubleSize));
__ std(d13, MemOperand(sp, 5 * kDoubleSize)); __ std(d13, MemOperand(sp, 5 * kDoubleSize));
__ std(d14, MemOperand(sp, 6 * kDoubleSize)); __ std(d14, MemOperand(sp, 6 * kDoubleSize));
__ std(d15, MemOperand(sp, 7 * kDoubleSize)); __ std(d15, MemOperand(sp, 7 * kDoubleSize));
#else #else
// 31bit ABI requires you to store f4 and f6: // 31bit ABI requires you to store f4 and f6:
// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417 // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
__ lay(sp, MemOperand(sp, -2 * kDoubleSize)); __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
__ std(d4, MemOperand(sp)); __ std(d4, MemOperand(sp));
__ std(d6, MemOperand(sp, kDoubleSize)); __ std(d6, MemOperand(sp, kDoubleSize));
#endif #endif
// zLinux ABI // zLinux ABI
// Incoming parameters: // Incoming parameters:
// r2: code entry // r2: code entry
// r3: function // r3: function
// r4: receiver // r4: receiver
// r5: argc // r5: argc
// r6: argv // r6: argv
// Requires us to save the callee-preserved registers r6-r13 // Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and // General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG // sp/r15 as well in a single STM/STMG
__ lay(sp, MemOperand(sp, -10 * kPointerSize)); __ lay(sp, MemOperand(sp, -10 * kPointerSize));
__ StoreMultipleP(r6, sp, MemOperand(sp, 0)); __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
// Set up the reserved register for 0.0. // Set up the reserved register for 0.0.
// __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0); // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
// Push a frame with special values setup to mark it as an entry frame. // Push a frame with special values setup to mark it as an entry frame.
// Bad FP (-1) // Bad FP (-1)
// SMI Marker // SMI Marker
// SMI Marker // SMI Marker
// kCEntryFPAddress // kCEntryFPAddress
// Frame type // Frame type
__ lay(sp, MemOperand(sp, -5 * kPointerSize)); __ lay(sp, MemOperand(sp, -5 * kPointerSize));
// Push a bad frame pointer to fail if it is used. // Push a bad frame pointer to fail if it is used.
__ LoadImmP(r10, Operand(-1)); __ LoadImmP(r10, Operand(-1));
StackFrame::Type marker = type(); StackFrame::Type marker = type();
__ Load(r9, Operand(StackFrame::TypeToMarker(marker))); __ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
__ Load(r8, Operand(StackFrame::TypeToMarker(marker))); __ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
// Save copies of the top frame descriptor on the stack. // Save copies of the top frame descriptor on the stack.
__ mov(r7, Operand(ExternalReference::Create( __ mov(r7, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate()))); IsolateAddressId::kCEntryFPAddress, isolate())));
__ LoadP(r7, MemOperand(r7)); __ LoadP(r7, MemOperand(r7));
__ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize)); __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
// Set up frame pointer for the frame to be pushed. // Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra // Need to add kPointerSize, because sp has one extra
// frame already for the frame type being pushed later. // frame already for the frame type being pushed later.
__ lay(fp, __ lay(fp, MemOperand(
MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize)); sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
__ InitializeRootRegister(); __ InitializeRootRegister();
}
// If this is the outermost JS call, set js_entry_sp value. // If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js; Label non_outermost_js;
......
This diff is collapsed.
...@@ -179,12 +179,18 @@ class TurboAssembler : public Assembler { ...@@ -179,12 +179,18 @@ class TurboAssembler : public Assembler {
return code_object_; return code_object_;
} }
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
// Returns the size of a call in instructions. // Returns the size of a call in instructions.
static int CallSize(Register target); static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working. // Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target); void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al, void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7); CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
...@@ -222,6 +228,7 @@ class TurboAssembler : public Assembler { ...@@ -222,6 +228,7 @@ class TurboAssembler : public Assembler {
// Register move. May do nothing if the registers are identical. // Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); } void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value); void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src); void Move(DoubleRegister dst, DoubleRegister src);
...@@ -1014,6 +1021,9 @@ class TurboAssembler : public Assembler { ...@@ -1014,6 +1021,9 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister(); void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst); void ComputeCodeStartAddress(Register dst);
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
private: private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
...@@ -1026,6 +1036,7 @@ class TurboAssembler : public Assembler { ...@@ -1026,6 +1036,7 @@ class TurboAssembler : public Assembler {
int num_double_arguments); int num_double_arguments);
bool has_frame_ = false; bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* isolate_; Isolate* isolate_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_; Handle<HeapObject> code_object_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment