Commit e5f1b968 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: Reland "[builtins] Introduce further constant & external reference indirections"

Port 3f99a376

Original Commit Message:

    This is a reland of f5d30851

    Original change's description:
    > [builtins] Introduce further constant & external reference indirections
    >
    > This introduces further indirections for embedded constants and
    > external references for builtins generated by the macro-assembler.
    > The used mechanisms (LookupConstant and LookupExternalReference) are
    > identical to what we already use in CSA.
    >
    > Almost all builtins are now isolate-independent in both release and
    > debug modes. snapshot_blob.bin is roughly 670K smaller in embedded
    > builds vs. non-embedded builds, while libv8.so is roughly 280K larger.
    >
    > Bug: v8:6666
    > Change-Id: I7a6c2193ef5a763e6cf7543dd51597d6fff6c110
    > Reviewed-on: https://chromium-review.googlesource.com/1006581
    > Commit-Queue: Jakob Gruber <jgruber@chromium.org>
    > Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
    > Cr-Commit-Position: refs/heads/master@{#52810}

R=jgruber@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I037faebce37a866091dc35e04500790591292622
Reviewed-on: https://chromium-review.googlesource.com/1031397Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#52855}
parent 6379e2a4
...@@ -19,7 +19,7 @@ namespace internal { ...@@ -19,7 +19,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address, void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) { ExitFrameType exit_frame_type) {
__ mov(r15, Operand(ExternalReference::Create(address))); __ Move(r15, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) { if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
...@@ -412,10 +412,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, ...@@ -412,10 +412,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
} }
__ bind(&do_throw); __ bind(&do_throw);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
...@@ -630,7 +627,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -630,7 +627,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r6: argc // r6: argc
// r7: argv // r7: argv
// r0,r8-r9, cp may be clobbered // r0,r8-r9, cp may be clobbered
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm); ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ InitializeRootRegister();
}
// Enter an internal frame. // Enter an internal frame.
{ {
...@@ -639,7 +640,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -639,7 +640,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate). // Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create( ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate()); IsolateAddressId::kContextAddress, masm->isolate());
__ mov(cp, Operand(context_address)); __ Move(cp, context_address);
__ LoadP(cp, MemOperand(cp)); __ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack. // Push the function and the receiver onto the stack.
...@@ -851,8 +852,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -851,8 +852,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register scratch2 = bytecode; Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode)); bytecode));
__ mov(bytecode_size_table, __ Move(bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address())); ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode. // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide; Label process_bytecode, extra_wide;
...@@ -1027,9 +1028,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1027,9 +1028,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// handler at the current bytecode offset. // handler at the current bytecode offset.
Label do_dispatch; Label do_dispatch;
__ bind(&do_dispatch); __ bind(&do_dispatch);
__ mov(kInterpreterDispatchTableRegister, __ Move(
Operand(ExternalReference::interpreter_dispatch_table_address( kInterpreterDispatchTableRegister,
masm->isolate()))); ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister, __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister)); kInterpreterBytecodeOffsetRegister));
__ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2)); __ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
...@@ -1257,9 +1258,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1257,9 +1258,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ mtlr(r0); __ mtlr(r0);
// Initialize the dispatch table register. // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister, __ Move(
Operand(ExternalReference::interpreter_dispatch_table_address( kInterpreterDispatchTableRegister,
masm->isolate()))); ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame. // Get the bytecode array pointer from the frame.
__ LoadP(kInterpreterBytecodeArrayRegister, __ LoadP(kInterpreterBytecodeArrayRegister,
......
...@@ -19,7 +19,7 @@ namespace internal { ...@@ -19,7 +19,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address, void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) { ExitFrameType exit_frame_type) {
__ mov(r7, Operand(ExternalReference::Create(address))); __ Move(r7, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) { if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
...@@ -411,10 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, ...@@ -411,10 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ b(&use_receiver); __ b(&use_receiver);
} }
__ bind(&do_throw); __ bind(&do_throw);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
// on-stack receiver as the result. // on-stack receiver as the result.
...@@ -632,7 +629,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -632,7 +629,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5: argc // r5: argc
// r6: argv // r6: argv
// r0,r7-r9, cp may be clobbered // r0,r7-r9, cp may be clobbered
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm); ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ InitializeRootRegister();
}
// Enter an internal frame. // Enter an internal frame.
{ {
...@@ -642,7 +643,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -642,7 +643,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate). // Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create( ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate()); IsolateAddressId::kContextAddress, masm->isolate());
__ mov(cp, Operand(context_address)); __ Move(cp, context_address);
__ LoadP(cp, MemOperand(cp)); __ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack. // Push the function and the receiver onto the stack.
...@@ -861,8 +862,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -861,8 +862,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register scratch2 = bytecode; Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode)); bytecode));
__ mov(bytecode_size_table, __ Move(bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address())); ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode. // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide; Label process_bytecode, extra_wide;
...@@ -1263,9 +1264,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1263,9 +1264,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Code::kHeaderSize - kHeapObjectTag)); Code::kHeaderSize - kHeapObjectTag));
// Initialize the dispatch table register. // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister, __ Move(
Operand(ExternalReference::interpreter_dispatch_table_address( kInterpreterDispatchTableRegister,
masm->isolate()))); ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame. // Get the bytecode array pointer from the frame.
__ LoadP(kInterpreterBytecodeArrayRegister, __ LoadP(kInterpreterBytecodeArrayRegister,
......
...@@ -258,6 +258,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { ...@@ -258,6 +258,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Called from C // Called from C
__ function_descriptor(); __ function_descriptor();
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm); ProfileEntryHookStub::MaybeCallEntryHook(masm);
// PPC LINUX ABI: // PPC LINUX ABI:
...@@ -274,6 +276,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { ...@@ -274,6 +276,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0); __ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
__ InitializeRootRegister(); __ InitializeRootRegister();
}
// Push a frame with special values setup to mark it as an entry frame. // Push a frame with special values setup to mark it as an entry frame.
// r3: code entry // r3: code entry
......
This diff is collapsed.
...@@ -220,6 +220,9 @@ class TurboAssembler : public Assembler { ...@@ -220,6 +220,9 @@ class TurboAssembler : public Assembler {
void LoadPC(Register dst); void LoadPC(Register dst);
void ComputeCodeStartAddress(Register dst); void ComputeCodeStartAddress(Register dst);
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
void StoreDouble(DoubleRegister src, const MemOperand& mem, void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg); Register scratch = no_reg);
void StoreDoubleU(DoubleRegister src, const MemOperand& mem, void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
...@@ -432,6 +435,13 @@ class TurboAssembler : public Assembler { ...@@ -432,6 +435,13 @@ class TurboAssembler : public Assembler {
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift); Register src_high, uint32_t shift);
#endif #endif
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
// Returns the size of a call in instructions. Note, the value returned is // Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between // only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call. // checking the call size and emitting the actual call.
...@@ -504,6 +514,7 @@ class TurboAssembler : public Assembler { ...@@ -504,6 +514,7 @@ class TurboAssembler : public Assembler {
// Register move. May do nothing if the registers are identical. // Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); } void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value); void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src); void Move(DoubleRegister dst, DoubleRegister src);
...@@ -669,6 +680,7 @@ class TurboAssembler : public Assembler { ...@@ -669,6 +680,7 @@ class TurboAssembler : public Assembler {
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool has_frame_ = false; bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* const isolate_; Isolate* const isolate_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_; Handle<HeapObject> code_object_;
......
...@@ -241,6 +241,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { ...@@ -241,6 +241,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit; Label invoke, handler_entry, exit;
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm); ProfileEntryHookStub::MaybeCallEntryHook(masm);
// saving floating point registers // saving floating point registers
...@@ -301,10 +303,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) { ...@@ -301,10 +303,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Set up frame pointer for the frame to be pushed. // Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra // Need to add kPointerSize, because sp has one extra
// frame already for the frame type being pushed later. // frame already for the frame type being pushed later.
__ lay(fp, __ lay(fp, MemOperand(
MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize)); sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
__ InitializeRootRegister(); __ InitializeRootRegister();
}
// If this is the outermost JS call, set js_entry_sp value. // If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js; Label non_outermost_js;
......
This diff is collapsed.
...@@ -179,12 +179,18 @@ class TurboAssembler : public Assembler { ...@@ -179,12 +179,18 @@ class TurboAssembler : public Assembler {
return code_object_; return code_object_;
} }
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
// Returns the size of a call in instructions. // Returns the size of a call in instructions.
static int CallSize(Register target); static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working. // Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target); void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al, void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7); CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
...@@ -222,6 +228,7 @@ class TurboAssembler : public Assembler { ...@@ -222,6 +228,7 @@ class TurboAssembler : public Assembler {
// Register move. May do nothing if the registers are identical. // Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); } void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value); void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src); void Move(DoubleRegister dst, DoubleRegister src);
...@@ -1014,6 +1021,9 @@ class TurboAssembler : public Assembler { ...@@ -1014,6 +1021,9 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister(); void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst); void ComputeCodeStartAddress(Register dst);
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
private: private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
...@@ -1026,6 +1036,7 @@ class TurboAssembler : public Assembler { ...@@ -1026,6 +1036,7 @@ class TurboAssembler : public Assembler {
int num_double_arguments); int num_double_arguments);
bool has_frame_ = false; bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* isolate_; Isolate* isolate_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_; Handle<HeapObject> code_object_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment