Commit e5f1b968 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: Reland "[builtins] Introduce further constant & external reference indirections"

Port 3f99a376

Original Commit Message:

    This is a reland of f5d30851

    Original change's description:
    > [builtins] Introduce further constant & external reference indirections
    >
    > This introduces further indirections for embedded constants and
    > external references for builtins generated by the macro-assembler.
    > The used mechanisms (LookupConstant and LookupExternalReference) are
    > identical to what we already use in CSA.
    >
    > Almost all builtins are now isolate-independent in both release and
    > debug modes. snapshot_blob.bin is roughly 670K smaller in embedded
    > builds vs. non-embedded builds, while libv8.so is roughly 280K larger.
    >
    > Bug: v8:6666
    > Change-Id: I7a6c2193ef5a763e6cf7543dd51597d6fff6c110
    > Reviewed-on: https://chromium-review.googlesource.com/1006581
    > Commit-Queue: Jakob Gruber <jgruber@chromium.org>
    > Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
    > Cr-Commit-Position: refs/heads/master@{#52810}

R=jgruber@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I037faebce37a866091dc35e04500790591292622
Reviewed-on: https://chromium-review.googlesource.com/1031397Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#52855}
parent 6379e2a4
......@@ -19,7 +19,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
__ mov(r15, Operand(ExternalReference::Create(address)));
__ Move(r15, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
......@@ -412,10 +412,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
}
__ bind(&do_throw);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
......@@ -630,7 +627,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r6: argc
// r7: argv
// r0,r8-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ InitializeRootRegister();
}
// Enter an internal frame.
{
......@@ -639,7 +640,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ mov(cp, Operand(context_address));
__ Move(cp, context_address);
__ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
......@@ -851,8 +852,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ mov(bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address()));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
......@@ -1027,9 +1028,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
......@@ -1257,9 +1258,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ mtlr(r0);
// Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame.
__ LoadP(kInterpreterBytecodeArrayRegister,
......
......@@ -19,7 +19,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
__ mov(r7, Operand(ExternalReference::Create(address)));
__ Move(r7, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
......@@ -411,10 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ b(&use_receiver);
}
__ bind(&do_throw);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
......@@ -632,7 +629,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5: argc
// r6: argv
// r0,r7-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ InitializeRootRegister();
}
// Enter an internal frame.
{
......@@ -642,7 +643,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ mov(cp, Operand(context_address));
__ Move(cp, context_address);
__ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
......@@ -861,8 +862,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ mov(bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address()));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
......@@ -1263,9 +1264,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Code::kHeaderSize - kHeapObjectTag));
// Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame.
__ LoadP(kInterpreterBytecodeArrayRegister,
......
......@@ -258,22 +258,25 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Called from C
__ function_descriptor();
ProfileEntryHookStub::MaybeCallEntryHook(masm);
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// PPC LINUX ABI:
// preserve LR in pre-reserved slot in caller's frame
__ mflr(r0);
__ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
// PPC LINUX ABI:
// preserve LR in pre-reserved slot in caller's frame
__ mflr(r0);
__ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved);
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved);
// Save callee-saved double registers.
__ MultiPushDoubles(kCalleeSavedDoubles);
// Set up the reserved register for 0.0.
__ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
// Save callee-saved double registers.
__ MultiPushDoubles(kCalleeSavedDoubles);
// Set up the reserved register for 0.0.
__ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
__ InitializeRootRegister();
__ InitializeRootRegister();
}
// Push a frame with special values setup to mark it as an entry frame.
// r3: code entry
......
......@@ -10,6 +10,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-stubs.h"
#include "src/debug/debug.h"
......@@ -18,6 +19,7 @@
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/serializer-common.h"
#include "src/ppc/macro-assembler-ppc.h"
......@@ -120,6 +122,63 @@ void TurboAssembler::Jump(Register target) {
bctr();
}
#ifdef V8_EMBEDDED_BUILTINS
void TurboAssembler::LookupConstant(Register destination,
Handle<Object> object) {
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// TODO(jgruber, v8:6666): Support self-references. Currently, we'd end up
// adding the temporary code object to the constants list, before creating the
// final object in Factory::CopyCode.
CHECK(code_object_.is_null() || !object.equals(code_object_));
// Ensure the given object is in the builtins constants table and fetch its
// index.
BuiltinsConstantsTableBuilder* builder =
isolate()->builtins_constants_table_builder();
uint32_t index = builder->AddObject(object);
// TODO(jgruber): Load builtins from the builtins table.
// TODO(jgruber): Ensure that code generation can recognize constant targets
// in kArchCallCodeObject.
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
const uint32_t offset =
FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
LoadP(destination, MemOperand(destination, offset), r0);
}
void TurboAssembler::LookupExternalReference(Register destination,
ExternalReference reference) {
CHECK(reference.address() !=
ExternalReference::roots_array_start(isolate()).address());
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Encode as an index into the external reference table stored on the isolate.
ExternalReferenceEncoder encoder(isolate());
ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
CHECK(!v.is_from_api());
uint32_t index = v.index();
// Generate code to load from the external reference table.
int32_t roots_to_external_reference_offset =
Heap::roots_to_external_reference_table_offset() +
ExternalReferenceTable::OffsetOfEntry(index);
LoadP(destination,
MemOperand(kRootRegister, roots_to_external_reference_offset), r0);
}
#endif // V8_EMBEDDED_BUILTINS
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
Jump(ip);
......@@ -150,6 +209,18 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ppc code, never THUMB code
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
Register scratch = ip;
LookupConstant(scratch, code);
addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(scratch);
bind(&skip);
return;
}
#endif // V8_EMBEDDED_BUILTINS
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
......@@ -219,17 +290,20 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
Label start;
bind(&start);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(code, rmode, cond);
#endif
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
LookupConstant(ip, code);
addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
return;
}
#endif // V8_EMBEDDED_BUILTINS
Call(code.address(), rmode, cond);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Drop(int count) {
......@@ -256,9 +330,33 @@ void TurboAssembler::Push(Smi* smi) {
}
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
!value.equals(CodeObject())) {
Heap::RootListIndex root_index;
if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
LookupConstant(dst, value);
} else {
LoadRoot(dst, root_index);
}
return;
}
#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
reference.address() !=
ExternalReference::roots_array_start(isolate()).address()) {
LookupExternalReference(dst, reference);
return;
}
#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(reference));
}
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
DCHECK(cond == al);
if (dst != src) {
......@@ -425,8 +523,7 @@ void TurboAssembler::CallRecordWriteStub(
pop(slot_parameter);
pop(object_parameter);
mov(isolate_parameter,
Operand(ExternalReference::isolate_address(isolate())));
Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
......@@ -962,11 +1059,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate())));
Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
StoreP(fp, MemOperand(r8));
mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kContextAddress,
isolate())));
Move(r8,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreP(cp, MemOperand(r8));
// Optionally save all volatile double registers.
......@@ -1028,19 +1125,19 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
li(r6, Operand::Zero());
mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate())));
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
StoreP(r6, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kContextAddress,
isolate())));
Move(ip,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
LoadP(cp, MemOperand(ip));
#ifdef DEBUG
mov(r6, Operand(Context::kInvalidContext));
mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kContextAddress,
isolate())));
Move(ip,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreP(r6, MemOperand(ip));
#endif
......@@ -1195,9 +1292,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& actual) {
Label skip_hook;
ExternalReference debug_hook_avtive =
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r7, Operand(debug_hook_avtive));
Move(r7, debug_hook_active);
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
CmpSmiLiteral(r7, Smi::kZero, r0);
......@@ -1581,7 +1678,7 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// should remove this need and make the runtime routine entry code
// smarter.
mov(r3, Operand(f->nargs));
mov(r4, Operand(ExternalReference::Create(f)));
Move(r4, ExternalReference::Create(f));
CallStubDelayed(new (zone) CEntryStub(nullptr,
#if V8_TARGET_ARCH_PPC64
f->result_size,
......@@ -1605,7 +1702,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// should remove this need and make the runtime routine entry code
// smarter.
mov(r3, Operand(num_arguments));
mov(r4, Operand(ExternalReference::Create(f)));
Move(r4, ExternalReference::Create(f));
CEntryStub stub(isolate(),
#if V8_TARGET_ARCH_PPC64
f->result_size,
......@@ -1628,7 +1725,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
mov(r4, Operand(builtin));
Move(r4, builtin);
CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
......@@ -1652,7 +1749,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference::Create(counter)));
Move(scratch2, ExternalReference::Create(counter));
lwz(scratch1, MemOperand(scratch2));
addi(scratch1, scratch1, Operand(value));
stw(scratch1, MemOperand(scratch2));
......@@ -1664,7 +1761,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference::Create(counter)));
Move(scratch2, ExternalReference::Create(counter));
lwz(scratch1, MemOperand(scratch2));
subi(scratch1, scratch1, Operand(value));
stw(scratch1, MemOperand(scratch2));
......@@ -1909,7 +2006,7 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
Move(ip, function);
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
......
......@@ -220,6 +220,9 @@ class TurboAssembler : public Assembler {
void LoadPC(Register dst);
void ComputeCodeStartAddress(Register dst);
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
......@@ -432,6 +435,13 @@ class TurboAssembler : public Assembler {
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
#endif
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
......@@ -504,6 +514,7 @@ class TurboAssembler : public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
......@@ -669,6 +680,7 @@ class TurboAssembler : public Assembler {
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* const isolate_;
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
......
......@@ -241,70 +241,73 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
ProfileEntryHookStub::MaybeCallEntryHook(masm);
{
NoRootArrayScope no_root_array(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// saving floating point registers
#if V8_TARGET_ARCH_S390X
// 64bit ABI requires f8 to f15 be saved
__ lay(sp, MemOperand(sp, -8 * kDoubleSize));
__ std(d8, MemOperand(sp));
__ std(d9, MemOperand(sp, 1 * kDoubleSize));
__ std(d10, MemOperand(sp, 2 * kDoubleSize));
__ std(d11, MemOperand(sp, 3 * kDoubleSize));
__ std(d12, MemOperand(sp, 4 * kDoubleSize));
__ std(d13, MemOperand(sp, 5 * kDoubleSize));
__ std(d14, MemOperand(sp, 6 * kDoubleSize));
__ std(d15, MemOperand(sp, 7 * kDoubleSize));
// 64bit ABI requires f8 to f15 be saved
__ lay(sp, MemOperand(sp, -8 * kDoubleSize));
__ std(d8, MemOperand(sp));
__ std(d9, MemOperand(sp, 1 * kDoubleSize));
__ std(d10, MemOperand(sp, 2 * kDoubleSize));
__ std(d11, MemOperand(sp, 3 * kDoubleSize));
__ std(d12, MemOperand(sp, 4 * kDoubleSize));
__ std(d13, MemOperand(sp, 5 * kDoubleSize));
__ std(d14, MemOperand(sp, 6 * kDoubleSize));
__ std(d15, MemOperand(sp, 7 * kDoubleSize));
#else
// 31bit ABI requires you to store f4 and f6:
// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
__ lay(sp, MemOperand(sp, -2 * kDoubleSize));
__ std(d4, MemOperand(sp));
__ std(d6, MemOperand(sp, kDoubleSize));
// 31bit ABI requires you to store f4 and f6:
// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
__ lay(sp, MemOperand(sp, -2 * kDoubleSize));
__ std(d4, MemOperand(sp));
__ std(d6, MemOperand(sp, kDoubleSize));
#endif
// zLinux ABI
// Incoming parameters:
// r2: code entry
// r3: function
// r4: receiver
// r5: argc
// r6: argv
// Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG
__ lay(sp, MemOperand(sp, -10 * kPointerSize));
__ StoreMultipleP(r6, sp, MemOperand(sp, 0));
// Set up the reserved register for 0.0.
// __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
// Push a frame with special values setup to mark it as an entry frame.
// Bad FP (-1)
// SMI Marker
// SMI Marker
// kCEntryFPAddress
// Frame type
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r10, Operand(-1));
StackFrame::Type marker = type();
__ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
__ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
// Save copies of the top frame descriptor on the stack.
__ mov(r7, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate())));
__ LoadP(r7, MemOperand(r7));
__ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
// Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
__ lay(fp,
MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
__ InitializeRootRegister();
// zLinux ABI
// Incoming parameters:
// r2: code entry
// r3: function
// r4: receiver
// r5: argc
// r6: argv
// Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG
__ lay(sp, MemOperand(sp, -10 * kPointerSize));
__ StoreMultipleP(r6, sp, MemOperand(sp, 0));
// Set up the reserved register for 0.0.
// __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
// Push a frame with special values setup to mark it as an entry frame.
// Bad FP (-1)
// SMI Marker
// SMI Marker
// kCEntryFPAddress
// Frame type
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r10, Operand(-1));
StackFrame::Type marker = type();
__ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
__ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
// Save copies of the top frame descriptor on the stack.
__ mov(r7, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate())));
__ LoadP(r7, MemOperand(r7));
__ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
// Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
__ lay(fp, MemOperand(
sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
__ InitializeRootRegister();
}
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
......
......@@ -10,6 +10,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-stubs.h"
#include "src/debug/debug.h"
......@@ -18,6 +19,7 @@
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/serializer-common.h"
#include "src/s390/macro-assembler-s390.h"
......@@ -115,7 +117,64 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
void TurboAssembler::Jump(Register target) { b(target); }
#ifdef V8_EMBEDDED_BUILTINS
void TurboAssembler::LookupConstant(Register destination,
Handle<Object> object) {
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// TODO(jgruber, v8:6666): Support self-references. Currently, we'd end up
// adding the temporary code object to the constants list, before creating the
// final object in Factory::CopyCode.
CHECK(code_object_.is_null() || !object.equals(code_object_));
// Ensure the given object is in the builtins constants table and fetch its
// index.
BuiltinsConstantsTableBuilder* builder =
isolate()->builtins_constants_table_builder();
uint32_t index = builder->AddObject(object);
// TODO(jgruber): Load builtins from the builtins table.
// TODO(jgruber): Ensure that code generation can recognize constant targets
// in kArchCallCodeObject.
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
const uint32_t offset =
FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
LoadP(destination, MemOperand(destination, offset));
}
void TurboAssembler::LookupExternalReference(Register destination,
ExternalReference reference) {
CHECK(reference.address() !=
ExternalReference::roots_array_start(isolate()).address());
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Encode as an index into the external reference table stored on the isolate.
ExternalReferenceEncoder encoder(isolate());
ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
CHECK(!v.is_from_api());
uint32_t index = v.index();
// Generate code to load from the external reference table.
int32_t roots_to_external_reference_offset =
Heap::roots_to_external_reference_table_offset() +
ExternalReferenceTable::OffsetOfEntry(index);
LoadP(destination,
MemOperand(kRootRegister, roots_to_external_reference_offset));
}
#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
......@@ -145,6 +204,15 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
Register scratch = r1;
LookupConstant(scratch, code);
la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
b(cond, scratch);
return;
}
#endif // V8_EMBEDDED_BUILTINS
jump(code, rmode, cond);
}
......@@ -217,15 +285,17 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(code, rmode, cond);
Label start;
bind(&start);
#endif
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
LookupConstant(ip, code);
la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
Call(ip);
return;
}
#endif // V8_EMBEDDED_BUILTINS
call(code, rmode);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Drop(int count) {
......@@ -259,12 +329,40 @@ void TurboAssembler::Push(Smi* smi) {
}
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
!value.equals(CodeObject())) {
Heap::RootListIndex root_index;
if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
LookupConstant(dst, value);
} else {
LoadRoot(dst, root_index);
}
return;
}
#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
reference.address() !=
ExternalReference::roots_array_start(isolate()).address()) {
LookupExternalReference(dst, reference);
return;
}
#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(reference));
}
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
if (dst != src) {
LoadRR(dst, src);
if (cond == al) {
LoadRR(dst, src);
} else {
LoadOnConditionP(cond, dst, src);
}
}
}
......@@ -424,8 +522,7 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter);
Pop(object_parameter);
mov(isolate_parameter,
Operand(ExternalReference::isolate_address(isolate())));
Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
......@@ -986,11 +1083,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(r1, Operand(ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate())));
Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
StoreP(fp, MemOperand(r1));
mov(r1, Operand(ExternalReference::Create(IsolateAddressId::kContextAddress,
isolate())));
Move(r1,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreP(cp, MemOperand(r1));
// Optionally save all volatile double registers.
......@@ -1048,8 +1145,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
// Clear top frame.
mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate())));
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
// Restore current context from top and clear it in debug mode.
......@@ -1218,9 +1315,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& actual) {
Label skip_hook;
ExternalReference debug_hook_avtive =
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r6, Operand(debug_hook_avtive));
Move(r6, debug_hook_active);
tm(MemOperand(r6), Operand::Zero());
bne(&skip_hook);
......@@ -1481,7 +1578,7 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
mov(r2, Operand(f->nargs));
mov(r3, Operand(ExternalReference::Create(f)));
Move(r3, ExternalReference::Create(f));
CallStubDelayed(new (zone) CEntryStub(nullptr,
#if V8_TARGET_ARCH_S390X
f->result_size,
......@@ -1505,7 +1602,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// should remove this need and make the runtime routine entry code
// smarter.
mov(r2, Operand(num_arguments));
mov(r3, Operand(ExternalReference::Create(f)));
Move(r3, ExternalReference::Create(f));
CEntryStub stub(isolate(),
#if V8_TARGET_ARCH_S390X
f->result_size,
......@@ -1527,7 +1624,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
mov(r3, Operand(builtin));
Move(r3, builtin);
CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
......@@ -1550,11 +1647,11 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0 && is_int8(value));
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch1, Operand(ExternalReference::Create(counter)));
Move(scratch2, ExternalReference::Create(counter));
// @TODO(john.yan): can be optimized by asi()
LoadW(scratch2, MemOperand(scratch1));
AddP(scratch2, Operand(value));
StoreW(scratch2, MemOperand(scratch1));
LoadW(scratch1, MemOperand(scratch2));
AddP(scratch1, Operand(value));
StoreW(scratch1, MemOperand(scratch2));
}
}
......@@ -1562,11 +1659,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0 && is_int8(value));
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch1, Operand(ExternalReference::Create(counter)));
Move(scratch2, ExternalReference::Create(counter));
// @TODO(john.yan): can be optimized by asi()
LoadW(scratch2, MemOperand(scratch1));
AddP(scratch2, Operand(-value));
StoreW(scratch2, MemOperand(scratch1));
LoadW(scratch1, MemOperand(scratch2));
AddP(scratch1, Operand(-value));
StoreW(scratch1, MemOperand(scratch2));
}
}
......@@ -1799,7 +1896,7 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
Move(ip, function);
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
......
......@@ -179,12 +179,18 @@ class TurboAssembler : public Assembler {
return code_object_;
}
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
// Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
......@@ -222,6 +228,7 @@ class TurboAssembler : public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
......@@ -1014,6 +1021,9 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
......@@ -1026,6 +1036,7 @@ class TurboAssembler : public Assembler {
int num_double_arguments);
bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment