Commit 2b23e892 authored by Jaideep Bajwa's avatar Jaideep Bajwa Committed by Commit Bot

PPC/s390: Move TF parts of MacroAssembler into new TurboAssembler.

Port dfdcaf43
Port 2e1f5567

Original Commit Message:

    This CL introduces TurboAssembler, a super-class of Assembler and sub-class
    of MacroAssembler. TurboAssembler contains all the functionality that is used
    by Turbofan and previously was part of MacroAssembler. TurboAssembler has
    access to the isolate but, in contrast to MacroAssembler, does not expect to
    be running on the main thread.

R=neis@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=v8:6048
LOG=N

Change-Id: I3f51771afefe46410db7cda2625472d78c87f8c6
Reviewed-on: https://chromium-review.googlesource.com/583584Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46900}
parent 231bb1a2
......@@ -16,8 +16,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ masm()->
#define __ tasm()->
#define kScratchReg r11
......@@ -213,7 +212,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, offset_);
}
if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
......@@ -430,28 +429,27 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_FLOAT_MODULO() \
do { \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
0, 2); \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
#define ASSEMBLE_FLOAT_MODULO() \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction( \
ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(masm(), StackFrame::MANUAL); \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
0, 1); \
__ CallCFunction( \
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
......@@ -461,12 +459,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(masm(), StackFrame::MANUAL); \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
0, 2); \
i.InputDoubleRegister(1)); \
__ CallCFunction( \
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
......@@ -844,20 +842,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
void FlushPendingPushRegisters(MacroAssembler* masm,
void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
masm->Push((*pending_pushes)[0]);
tasm->Push((*pending_pushes)[0]);
break;
case 2:
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
......@@ -868,18 +866,18 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
void AddPendingPushRegister(MacroAssembler* masm,
void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
......@@ -887,15 +885,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(masm, state, pending_pushes);
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(masm, state, pending_pushes);
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
}
}
......@@ -918,20 +916,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
masm(), frame_access_state(),
tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ LoadP(ip, g.SlotToMemOperand(source_location.index()));
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
......@@ -939,15 +937,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(masm(), frame_access_state(),
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
AdjustStackPointerForTailCall(masm(), frame_access_state(),
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
......@@ -961,7 +959,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (opcode) {
case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
masm());
tasm());
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
......@@ -990,7 +988,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
}
......@@ -1008,7 +1006,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
masm());
tasm());
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
......@@ -1102,7 +1100,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchStoreWithWriteBarrier: {
......@@ -2059,14 +2058,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
__ Call(isolate()->builtins()->builtin_handle(trap_id),
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
......@@ -2199,7 +2198,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
__ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
......@@ -2585,11 +2584,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
masm());
tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
......
......@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ masm()->
#define __ tasm()->
#define kScratchReg ip
......@@ -621,26 +621,26 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ LoadlW(i.OutputRegister(), r0); \
} while (0)
#define ASSEMBLE_FLOAT_MODULO() \
do { \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
0, 2); \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
#define ASSEMBLE_FLOAT_MODULO() \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction( \
ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(masm(), StackFrame::MANUAL); \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
0, 1); \
__ CallCFunction( \
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
......@@ -649,12 +649,12 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(masm(), StackFrame::MANUAL); \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
0, 2); \
__ CallCFunction( \
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
......@@ -1050,20 +1050,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
void FlushPendingPushRegisters(MacroAssembler* masm,
void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
masm->Push((*pending_pushes)[0]);
tasm->Push((*pending_pushes)[0]);
break;
case 2:
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
......@@ -1074,17 +1074,17 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
void AddPendingPushRegister(MacroAssembler* masm,
void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
......@@ -1092,15 +1092,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(masm, state, pending_pushes);
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(masm, state, pending_pushes);
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
......@@ -1123,20 +1123,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
masm(), frame_access_state(),
tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ LoadP(ip, g.SlotToMemOperand(source_location.index()));
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
......@@ -1144,15 +1144,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(masm(), frame_access_state(),
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
AdjustStackPointerForTailCall(masm(), frame_access_state(),
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
......@@ -1196,7 +1196,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
}
......@@ -1292,7 +1292,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
......@@ -2473,14 +2474,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
__ Call(isolate()->builtins()->builtin_handle(trap_id),
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
......@@ -2585,7 +2586,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
__ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
......@@ -2938,7 +2939,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % 2);
......
......@@ -200,9 +200,8 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// See assembler-ppc-inl.h for inlined constructors
Operand::Operand(Handle<HeapObject> handle) {
AllowHandleDereference using_location;
rm_ = no_reg;
value_.immediate = reinterpret_cast<intptr_t>(handle.location());
value_.immediate = reinterpret_cast<intptr_t>(handle.address());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
......
......@@ -2313,20 +2313,20 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
if (tasm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
#else
11 * Assembler::kInstrSize);
#endif
__ mflr(r0);
__ Push(r0, ip);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ Pop(r0, ip);
__ mtlr(r0);
tasm->mflr(r0);
tasm->Push(r0, ip);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->Pop(r0, ip);
tasm->mtlr(r0);
}
}
......
......@@ -22,29 +22,19 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, size),
has_frame_(false),
isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate_->heap()->undefined_value(), isolate_);
}
}
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void MacroAssembler::Jump(Register target) {
void TurboAssembler::Jump(Register target) {
mtctr(target);
bctr();
}
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
Jump(ip);
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
Label skip;
......@@ -59,27 +49,22 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
CRegister cr) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ppc code, never THUMB code
AllowHandleDereference using_location;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond);
}
int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
void MacroAssembler::Call(Register target) {
void TurboAssembler::Call(Register target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
......@@ -91,28 +76,24 @@ void MacroAssembler::Call(Register target) {
DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallJSEntry(Register target) {
DCHECK(target.is(ip));
Call(target);
}
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
}
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(cond == al);
......@@ -137,15 +118,12 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
AllowHandleDereference using_location;
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
return CallSize(code.address(), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
......@@ -159,31 +137,29 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
int expected_size = CallSize(code, rmode, cond);
#endif
AllowHandleDereference using_location;
Call(reinterpret_cast<Address>(code.location()), rmode, cond);
Call(code.address(), rmode, cond);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::Drop(int count) {
void TurboAssembler::Drop(int count) {
if (count > 0) {
Add(sp, sp, count * kPointerSize, r0);
}
}
void MacroAssembler::Drop(Register count, Register scratch) {
void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
add(sp, sp, scratch);
}
void MacroAssembler::Call(Label* target) { b(target, SetLK); }
void TurboAssembler::Call(Label* target) { b(target, SetLK); }
void MacroAssembler::Push(Handle<HeapObject> handle) {
void TurboAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
void MacroAssembler::Push(Smi* smi) {
void TurboAssembler::Push(Smi* smi) {
mov(r0, Operand(smi));
push(r0);
}
......@@ -196,27 +172,24 @@ void MacroAssembler::PushObject(Handle<Object> handle) {
}
}
void MacroAssembler::Move(Register dst, Handle<HeapObject> value) {
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
mov(dst, Operand(value));
}
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
DCHECK(cond == al);
if (!dst.is(src)) {
mr(dst, src);
}
}
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (!dst.is(src)) {
fmr(dst, src);
}
}
void MacroAssembler::MultiPush(RegList regs, Register location) {
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
......@@ -229,8 +202,7 @@ void MacroAssembler::MultiPush(RegList regs, Register location) {
}
}
void MacroAssembler::MultiPop(RegList regs, Register location) {
void TurboAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
......@@ -242,8 +214,7 @@ void MacroAssembler::MultiPop(RegList regs, Register location) {
addi(location, location, Operand(stack_offset));
}
void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
int16_t num_to_push = NumberOfBitsSet(dregs);
int16_t stack_offset = num_to_push * kDoubleSize;
......@@ -257,8 +228,7 @@ void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
......@@ -271,14 +241,12 @@ void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond) {
DCHECK(cond == al);
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
DCHECK(cond == eq || cond == ne);
......@@ -567,7 +535,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
void MacroAssembler::PushCommonFrame(Register marker_reg) {
void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
......@@ -607,7 +575,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
mtlr(r0);
}
void MacroAssembler::PushStandardFrame(Register function_reg) {
void TurboAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
......@@ -630,7 +598,7 @@ void MacroAssembler::PushStandardFrame(Register function_reg) {
addi(fp, sp, Operand(fp_delta * kPointerSize));
}
void MacroAssembler::RestoreFrameStateForTailCall() {
void TurboAssembler::RestoreFrameStateForTailCall() {
if (FLAG_enable_embedded_constant_pool) {
LoadP(kConstantPoolRegister,
MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
......@@ -708,66 +676,61 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
fsub(dst, src, kDoubleRegZero);
}
void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
MovIntToDouble(dst, src, r0);
fcfid(dst, dst);
}
void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
DoubleRegister dst) {
MovUnsignedIntToDouble(dst, src, r0);
fcfid(dst, dst);
}
void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
MovIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
DoubleRegister dst) {
MovUnsignedIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
#if V8_TARGET_ARCH_PPC64
void MacroAssembler::ConvertInt64ToDouble(Register src,
void TurboAssembler::ConvertInt64ToDouble(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfid(double_dst, double_dst);
}
void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfidus(double_dst, double_dst);
}
void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfidu(double_dst, double_dst);
}
void MacroAssembler::ConvertInt64ToFloat(Register src,
void TurboAssembler::ConvertInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfids(double_dst, double_dst);
}
#endif
void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_PPC64
const Register dst_hi,
#endif
......@@ -790,7 +753,7 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
}
#if V8_TARGET_ARCH_PPC64
void MacroAssembler::ConvertDoubleToUnsignedInt64(
void TurboAssembler::ConvertDoubleToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
if (rounding_mode == kRoundToZero) {
......@@ -806,7 +769,7 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
#endif
#if !V8_TARGET_ARCH_PPC64
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
......@@ -831,7 +794,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
bind(&done);
}
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
......@@ -853,7 +816,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
}
}
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
......@@ -878,7 +841,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
bind(&done);
}
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
......@@ -900,7 +863,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
}
}
void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high, shift));
......@@ -924,7 +887,7 @@ void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
bind(&done);
}
void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
......@@ -955,19 +918,17 @@ void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
}
void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
void TurboAssembler::LoadConstantPoolPointerRegister(Register base,
int code_start_delta) {
add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
code_start_delta);
}
void MacroAssembler::LoadConstantPoolPointerRegister() {
void TurboAssembler::LoadConstantPoolPointerRegister() {
mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
}
void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
......@@ -985,8 +946,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void TurboAssembler::Prologue(bool code_pre_aging, Register base,
int prologue_offset) {
DCHECK(!base.is(no_reg));
{
......@@ -1028,8 +988,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
// Push type explicitly so we can leverage the constant pool.
......@@ -1048,8 +1007,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
}
}
int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// r3: preserved
// r4: preserved
......@@ -1196,7 +1154,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
int MacroAssembler::ActivationFrameAlignment() {
int TurboAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
......@@ -1256,17 +1214,15 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d1);
}
void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d1);
}
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
......@@ -1521,7 +1477,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
InvokeFunction(r4, expected, actual, flag);
}
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
......@@ -1820,7 +1775,7 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
cmp(obj, r0);
}
void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
......@@ -1852,8 +1807,7 @@ void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
}
void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
intptr_t right,
Register overflow_dst,
Register scratch) {
......@@ -1878,8 +1832,7 @@ void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
}
}
void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
......@@ -1981,7 +1934,7 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
// Block constant pool for the call instruction sequence.
......@@ -1996,8 +1949,7 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
......@@ -2049,6 +2001,49 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
fcmpu(double_scratch, double_input);
bind(&done);
}
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
mflr(r0);
push(r0);
// Put input on stack.
stfdu(double_input, MemOperand(sp, -kDoubleSize));
CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
addi(sp, sp, Operand(kDoubleSize));
pop(r0);
mtlr(r0);
bind(&done);
}
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister double_scratch = kScratchDoubleReg;
#if !V8_TARGET_ARCH_PPC64
Register scratch = ip;
#endif
ConvertDoubleToInt64(double_input,
#if !V8_TARGET_ARCH_PPC64
scratch,
#endif
result, double_scratch);
// Test for overflow
#if V8_TARGET_ARCH_PPC64
TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
beq(done);
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
int num_least_bits) {
......@@ -2067,7 +2062,7 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
rlwinm(dst, src, 0, 32 - num_least_bits, 31);
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
......@@ -2076,7 +2071,13 @@ void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
mov(r3, Operand(f->nargs));
mov(r4, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
CallStubDelayed(new (zone) CEntryStub(nullptr,
#if V8_TARGET_ARCH_PPC64
f->result_size,
#else
1,
#endif
save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
......@@ -2167,15 +2168,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::Assert(Condition cond, BailoutReason reason,
void TurboAssembler::Assert(Condition cond, BailoutReason reason,
CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
Label L;
b(cond, &L, cr);
Abort(reason);
......@@ -2183,8 +2181,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
void MacroAssembler::Abort(BailoutReason reason) {
void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
......@@ -2572,8 +2569,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 8;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
......@@ -2628,8 +2624,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
SmiUntag(index, index);
}
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
......@@ -2655,20 +2650,16 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
}
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2.is(d1)) {
DCHECK(!src1.is(d2));
......@@ -2680,33 +2671,28 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
void MacroAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
......@@ -2769,8 +2755,7 @@ void MacroAssembler::DecodeConstantPoolOffset(Register result,
bind(&done);
}
void MacroAssembler::CheckPageFlag(
void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
......@@ -2778,7 +2763,8 @@ void MacroAssembler::CheckPageFlag(
ClearRightImm(scratch, object, Operand(kPageSizeBits));
LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(r0, scratch, Operand(mask), SetRC);
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
if (cc == ne) {
bne(condition_met, cr0);
......@@ -2900,11 +2886,9 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
}
}
void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
void MacroAssembler::ResetRoundingMode() {
void TurboAssembler::ResetRoundingMode() {
mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
}
......@@ -3027,16 +3011,15 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// New MacroAssembler Interfaces added for PPC
//
////////////////////////////////////////////////////////////////////////////////
void MacroAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadIntLiteral(Register dst, int value) {
mov(dst, Operand(value));
}
void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
mov(dst, Operand(smi));
}
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
Register scratch) {
if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
!(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
......@@ -3085,8 +3068,7 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// sign-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
......@@ -3111,8 +3093,7 @@ void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// zero-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
......@@ -3137,8 +3118,7 @@ void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
#if !V8_TARGET_ARCH_PPC64
Register src_hi,
#endif
......@@ -3164,7 +3144,7 @@ void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
#if V8_TARGET_ARCH_PPC64
void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_hi,
Register src_lo,
Register scratch) {
......@@ -3184,8 +3164,7 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
}
#endif
void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
......@@ -3204,8 +3183,7 @@ void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
......@@ -3224,8 +3202,7 @@ void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprwz(dst, src);
......@@ -3240,8 +3217,7 @@ void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprd(dst, src);
......@@ -3257,8 +3233,7 @@ void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovDoubleToInt64(
void TurboAssembler::MovDoubleToInt64(
#if !V8_TARGET_ARCH_PPC64
Register dst_hi,
#endif
......@@ -3282,8 +3257,7 @@ void MacroAssembler::MovDoubleToInt64(
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
subi(sp, sp, Operand(kFloatSize));
stw(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
......@@ -3291,8 +3265,7 @@ void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
addi(sp, sp, Operand(kFloatSize));
}
void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
subi(sp, sp, Operand(kFloatSize));
stfs(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
......@@ -3300,8 +3273,7 @@ void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kFloatSize));
}
void MacroAssembler::Add(Register dst, Register src, intptr_t value,
void TurboAssembler::Add(Register dst, Register src, intptr_t value,
Register scratch) {
if (is_int16(value)) {
addi(dst, src, Operand(value));
......@@ -3323,8 +3295,7 @@ void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
}
}
void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
if (is_uint16(value)) {
......@@ -3335,8 +3306,7 @@ void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
}
}
void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
......@@ -3469,7 +3439,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
// Load a "pointer" sized value from the memory location
void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
......@@ -3496,7 +3466,7 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
}
}
void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
......@@ -3515,7 +3485,7 @@ void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
void MacroAssembler::StoreP(Register src, const MemOperand& mem,
void TurboAssembler::StoreP(Register src, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
......@@ -3547,7 +3517,7 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
}
}
void MacroAssembler::StorePU(Register src, const MemOperand& mem,
void TurboAssembler::StorePU(Register src, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
......@@ -3745,8 +3715,7 @@ void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
}
}
void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -3772,7 +3741,7 @@ void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
}
}
void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -3785,8 +3754,8 @@ void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
}
}
void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -3798,7 +3767,7 @@ void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
}
}
void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -3811,8 +3780,8 @@ void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
}
}
void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
Register scratch) {
void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -3824,7 +3793,7 @@ void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
}
}
void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -3837,8 +3806,8 @@ void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
}
}
void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch) {
void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......
......@@ -105,80 +105,537 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#define Div divw
#endif
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public Assembler {
class TurboAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
}
}
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
Isolate* isolate() const { return isolate_; }
Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
void ConvertIntToDouble(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a double, storing the result to |dst|
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
void ConvertIntToFloat(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a float, storing the result in |dst|
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
#if V8_TARGET_ARCH_PPC64
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
#endif
// Converts the double_input to an integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_PPC64
const Register dst_hi,
#endif
const Register dst, const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
#if V8_TARGET_ARCH_PPC64
// Converts the double_input to an unsigned integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
#endif
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
// Push a fixed frame, consisting of lr, fp, constant pool.
void PushCommonFrame(Register marker_reg = no_reg);
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type, Register base = no_reg,
int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
void PushStandardFrame(Register function_reg);
// Restore caller's frame pointer and return address prior to being
// overwritten by tail call stack preparation.
void RestoreFrameStateForTailCall();
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// These exist to provide portability between 32 and 64bit
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
void LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
// load an SMI value <value> to GPR <dst>
void LoadSmiLiteral(Register dst, Smi* smi);
void LoadSingle(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreSingle(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void Cmpli(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
// reset rounding mode to default (kRoundToNearest)
void ResetRoundingMode();
void Add(Register dst, Register src, intptr_t value, Register scratch);
void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<HeapObject> handle);
void Push(Smi* smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
StorePU(src2, MemOperand(sp, -2 * kPointerSize));
StoreP(src1, MemOperand(sp, kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
StorePU(src3, MemOperand(sp, -3 * kPointerSize));
StoreP(src2, MemOperand(sp, kPointerSize));
StoreP(src1, MemOperand(sp, 2 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
StorePU(src4, MemOperand(sp, -4 * kPointerSize));
StoreP(src3, MemOperand(sp, kPointerSize));
StoreP(src2, MemOperand(sp, 2 * kPointerSize));
StoreP(src1, MemOperand(sp, 3 * kPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
StorePU(src5, MemOperand(sp, -5 * kPointerSize));
StoreP(src4, MemOperand(sp, kPointerSize));
StoreP(src3, MemOperand(sp, 2 * kPointerSize));
StoreP(src2, MemOperand(sp, 3 * kPointerSize));
StoreP(src1, MemOperand(sp, 4 * kPointerSize));
}
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
LoadP(src1, MemOperand(sp, kPointerSize));
addi(sp, sp, Operand(2 * kPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
LoadP(src2, MemOperand(sp, kPointerSize));
LoadP(src1, MemOperand(sp, 2 * kPointerSize));
addi(sp, sp, Operand(3 * kPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
LoadP(src3, MemOperand(sp, kPointerSize));
LoadP(src2, MemOperand(sp, 2 * kPointerSize));
LoadP(src1, MemOperand(sp, 3 * kPointerSize));
addi(sp, sp, Operand(4 * kPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
LoadP(src4, MemOperand(sp, kPointerSize));
LoadP(src3, MemOperand(sp, 2 * kPointerSize));
LoadP(src2, MemOperand(sp, 3 * kPointerSize));
LoadP(src1, MemOperand(sp, 4 * kPointerSize));
addi(sp, sp, Operand(5 * kPointerSize));
}
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized. If double arguments are used, this function assumes that
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void MovToFloatParameter(DoubleRegister src);
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function, int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
void Abort(BailoutReason reason);
inline bool AllowThisStubCall(CodeStub* stub);
#if !V8_TARGET_ARCH_PPC64
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
#endif
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
void JumpToJSEntry(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target);
void CallJSEntry(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Ret() { blr(); }
void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp.
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 0) {
DCHECK(Descriptor::kPassLastArgsOnStack);
UNIMPLEMENTED();
}
void Call(Label* target);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
void Drop(Register count, Register scratch = r0);
void Ret() { blr(); }
void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
void Ret(int drop) {
Drop(drop);
blr();
}
void Call(Label* target);
// If the value is a NaN, canonicalize the value else, do nothing.
void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
void CanonicalizeNaN(const DoubleRegister value) {
CanonicalizeNaN(value, value);
}
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
// Move values between integer and floating point registers.
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Register scratch);
void MovInt64ToDouble(DoubleRegister dst,
#if !V8_TARGET_ARCH_PPC64
Register src_hi,
#endif
Register src);
#if V8_TARGET_ARCH_PPC64
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
Register src_lo, Register scratch);
#endif
void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
void MovDoubleLowToInt(Register dst, DoubleRegister src);
void MovDoubleHighToInt(Register dst, DoubleRegister src);
void MovDoubleToInt64(
#if !V8_TARGET_ARCH_PPC64
Register dst_hi,
#endif
Register dst, DoubleRegister src);
void MovIntToFloat(DoubleRegister dst, Register src);
void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
ShiftRightArithImm(dst, src, kSmiShift, rc);
}
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
// Bit numbering is such that the least significant bit is bit 0
// (for consistency between 32/64-bit).
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
// Extract consecutive bits (defined by rangeStart - rangeEnd) from src
// and, if !test, shift them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd, RCBit rc = LeaveRC,
bool test = false) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
// Prefer faster andi when applicable.
andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
} else {
#if V8_TARGET_ARCH_PPC64
rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
#else
rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
rc);
#endif
}
}
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
RCBit rc = LeaveRC, bool test = false) {
ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
}
// Extract consecutive bits (defined by mask) from src and place them
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC, bool test = false) {
int start = kBitsPerPointer - 1;
int end;
uintptr_t bit = (1L << start);
while (bit && (mask & bit) == 0) {
start--;
bit >>= 1;
}
end = start;
bit >>= 1;
while (bit && (mask & bit)) {
end--;
bit >>= 1;
}
// 1-bits in mask must be contiguous
DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
ExtractBitRange(dst, src, start, end, rc, test);
}
// Test single bit in value.
inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
}
// Test consecutive bit range in value. Range is defined by mask.
inline void TestBitMask(Register value, uintptr_t mask,
Register scratch = r0) {
ExtractBitMask(scratch, value, mask, SetRC, true);
}
// Test consecutive bit range in value. Range is defined by
// rangeStart - rangeEnd.
inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
Register scratch = r0) {
ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
}
inline void TestIfSmi(Register value, Register scratch) {
TestBitRange(value, kSmiTagSize - 1, 0, scratch);
}
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value, r0);
beq(smi_label, cr0); // branch if SMI
}
#if V8_TARGET_ARCH_PPC64
inline void TestIfInt32(Register value, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
extsw(scratch, value);
cmp(scratch, value, cr);
}
#else
inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
srawi(scratch, lo_word, 31);
cmp(scratch, hi_word, cr);
}
#endif
// Overflow handling functions.
// Usage: call the appropriate arithmetic function and then call one of the
// flow control functions with the corresponding label.
// Compute dst = left + right, setting condition codes. dst may be same as
// either left or right (or a unique register). left and right must not be
// the same register.
void AddAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = r0);
void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
Register overflow_dst, Register scratch = r0);
// Compute dst = left - right, setting condition codes. dst may be same as
// either left or right (or a unique register). left and right must not be
// the same register.
void SubAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = r0);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
//
// Only public for the test code in test-code-stubs-arm.cc.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void TruncateDoubleToIDelayed(Zone* zone, Register result,
DoubleRegister double_input);
// Call a code stub.
void CallStubDelayed(CodeStub* stub);
void LoadConstantPoolPointerRegister();
void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
void AbortConstantPoolBuilding() {
#ifdef DEBUG
// Avoid DCHECK(!is_linked()) failure in ~Label()
bind(ConstantPoolPosition());
#endif
}
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool has_frame_ = false;
Isolate* const isolate_;
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
};
// MacroAssembler implements a collection of frequently used acros.
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp.
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 0) {
DCHECK(Descriptor::kPassLastArgsOnStack);
UNIMPLEMENTED();
}
// ---------------------------------------------------------------------------
// GC Support
......@@ -196,9 +653,7 @@ class MacroAssembler : public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
void JumpToJSEntry(Register target);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
......@@ -269,94 +724,9 @@ class MacroAssembler : public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<HeapObject> handle);
void Push(Smi* smi);
void PushObject(Handle<Object> handle);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
StorePU(src2, MemOperand(sp, -2 * kPointerSize));
StoreP(src1, MemOperand(sp, kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
StorePU(src3, MemOperand(sp, -3 * kPointerSize));
StoreP(src2, MemOperand(sp, kPointerSize));
StoreP(src1, MemOperand(sp, 2 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
StorePU(src4, MemOperand(sp, -4 * kPointerSize));
StoreP(src3, MemOperand(sp, kPointerSize));
StoreP(src2, MemOperand(sp, 2 * kPointerSize));
StoreP(src1, MemOperand(sp, 3 * kPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
StorePU(src5, MemOperand(sp, -5 * kPointerSize));
StoreP(src4, MemOperand(sp, kPointerSize));
StoreP(src3, MemOperand(sp, 2 * kPointerSize));
StoreP(src2, MemOperand(sp, 3 * kPointerSize));
StoreP(src1, MemOperand(sp, 4 * kPointerSize));
}
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
LoadP(src1, MemOperand(sp, kPointerSize));
addi(sp, sp, Operand(2 * kPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
LoadP(src2, MemOperand(sp, kPointerSize));
LoadP(src1, MemOperand(sp, 2 * kPointerSize));
addi(sp, sp, Operand(3 * kPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
LoadP(src3, MemOperand(sp, kPointerSize));
LoadP(src2, MemOperand(sp, 2 * kPointerSize));
LoadP(src1, MemOperand(sp, 3 * kPointerSize));
addi(sp, sp, Operand(4 * kPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
LoadP(src4, MemOperand(sp, kPointerSize));
LoadP(src3, MemOperand(sp, 2 * kPointerSize));
LoadP(src2, MemOperand(sp, 3 * kPointerSize));
LoadP(src1, MemOperand(sp, 4 * kPointerSize));
addi(sp, sp, Operand(5 * kPointerSize));
}
// Push a fixed frame, consisting of lr, fp, constant pool.
void PushCommonFrame(Register marker_reg = no_reg);
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
void PushStandardFrame(Register function_reg);
void PopCommonFrame(Register marker_reg = no_reg);
// Restore caller's frame pointer and return address prior to being
// overwritten by tail call stack preparation.
void RestoreFrameStateForTailCall();
void PushObject(Handle<Object> handle);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
......@@ -368,77 +738,18 @@ class MacroAssembler : public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Loads the constant pool pointer (kConstantPoolRegister).
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
// from C.
// Does not handle errors.
void FlushICache(Register address, size_t size, Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
void CanonicalizeNaN(const DoubleRegister value) {
CanonicalizeNaN(value, value);
}
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
void ConvertIntToDouble(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a double, storing the result to |dst|
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
void ConvertIntToFloat(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a float, storing the result in |dst|
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
#if V8_TARGET_ARCH_PPC64
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
#endif
// Converts the double_input to an integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_PPC64
const Register dst_hi,
#endif
const Register dst, const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
#if V8_TARGET_ARCH_PPC64
// Converts the double_input to an unsigned integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
#endif
#if !V8_TARGET_ARCH_PPC64
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
#endif
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type, Register base = no_reg,
int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
......@@ -453,9 +764,6 @@ class MacroAssembler : public Assembler {
bool restore_context,
bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
......@@ -472,98 +780,35 @@ class MacroAssembler : public Assembler {
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// ----------------------------------------------------------------
// new PPC macro-assembler interfaces that are slightly higher level
// than assembler-ppc and may generate variable length sequences
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
// load an SMI value <value> to GPR <dst>
void LoadSmiLiteral(Register dst, Smi* smi);
// load a literal double value <value> to FPR <result>
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void LoadWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
void LoadHalfWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
void LoadByte(Register dst, const MemOperand& mem, Register scratch);
void StoreByte(Register src, const MemOperand& mem, Register scratch);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
void LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadSingle(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreSingle(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
// Move values between integer and floating point registers.
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Register scratch);
void MovInt64ToDouble(DoubleRegister dst,
#if !V8_TARGET_ARCH_PPC64
Register src_hi,
#endif
Register src);
#if V8_TARGET_ARCH_PPC64
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
Register src_lo, Register scratch);
#endif
void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
void MovDoubleLowToInt(Register dst, DoubleRegister src);
void MovDoubleHighToInt(Register dst, DoubleRegister src);
void MovDoubleToInt64(
#if !V8_TARGET_ARCH_PPC64
Register dst_hi,
#endif
Register dst, DoubleRegister src);
void MovIntToFloat(DoubleRegister dst, Register src);
void MovFloatToInt(Register dst, DoubleRegister src);
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
// ----------------------------------------------------------------
// new PPC macro-assembler interfaces that are slightly higher level
// than assembler-ppc and may generate variable length sequences
// load a literal double value <value> to FPR <result>
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void LoadWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
void LoadHalfWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
void LoadByte(Register dst, const MemOperand& mem, Register scratch);
void StoreByte(Register src, const MemOperand& mem, Register scratch);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void Add(Register dst, Register src, intptr_t value, Register scratch);
void Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void Cmpli(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void Cmplwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
......@@ -579,17 +824,7 @@ class MacroAssembler : public Assembler {
void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
RCBit rc = LeaveRC);
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
// reset rounding mode to default (kRoundToNearest)
void ResetRoundingMode();
// These exist to provide portability between 32 and 64bit
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
// ---------------------------------------------------------------------------
// JavaScript invokes
......@@ -599,9 +834,6 @@ class MacroAssembler : public Assembler {
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
......@@ -828,20 +1060,6 @@ class MacroAssembler : public Assembler {
// Usage: call the appropriate arithmetic function and then call one of the
// flow control functions with the corresponding label.
// Compute dst = left + right, setting condition codes. dst may be same as
// either left or right (or a unique register). left and right must not be
// the same register.
void AddAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = r0);
void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
Register overflow_dst, Register scratch = r0);
// Compute dst = left - right, setting condition codes. dst may be same as
// either left or right (or a unique register). left and right must not be
// the same register.
void SubAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = r0);
void BranchOnOverflow(Label* label) { blt(label, cr0); }
void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
......@@ -853,17 +1071,16 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub,
Condition cond = al);
void CallStubDelayed(CodeStub* stub);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void CallJSEntry(Register target);
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
......@@ -890,56 +1107,12 @@ class MacroAssembler : public Assembler {
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized. If double arguments are used, this function assumes that
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void MovToFloatParameter(DoubleRegister src);
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function, int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
......@@ -954,24 +1127,6 @@ class MacroAssembler : public Assembler {
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
void Abort(BailoutReason reason);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Number utilities
......@@ -991,81 +1146,7 @@ class MacroAssembler : public Assembler {
Label* zero_and_neg,
Label* not_power_of_two);
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
// Bit numbering is such that the least significant bit is bit 0
// (for consistency between 32/64-bit).
// Extract consecutive bits (defined by rangeStart - rangeEnd) from src
// and, if !test, shift them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd, RCBit rc = LeaveRC,
bool test = false) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
// Prefer faster andi when applicable.
andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
} else {
#if V8_TARGET_ARCH_PPC64
rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
#else
rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
rc);
#endif
}
}
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
RCBit rc = LeaveRC, bool test = false) {
ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
}
// Extract consecutive bits (defined by mask) from src and place them
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC, bool test = false) {
int start = kBitsPerPointer - 1;
int end;
uintptr_t bit = (1L << start);
while (bit && (mask & bit) == 0) {
start--;
bit >>= 1;
}
end = start;
bit >>= 1;
while (bit && (mask & bit)) {
end--;
bit >>= 1;
}
// 1-bits in mask must be contiguous
DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
ExtractBitRange(dst, src, start, end, rc, test);
}
// Test single bit in value.
inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
}
// Test consecutive bit range in value. Range is defined by
// rangeStart - rangeEnd.
inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
Register scratch = r0) {
ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
}
// Test consecutive bit range in value. Range is defined by mask.
inline void TestBitMask(Register value, uintptr_t mask,
Register scratch = r0) {
ExtractBitMask(scratch, value, mask, SetRC, true);
}
// ---------------------------------------------------------------------------
......@@ -1104,11 +1185,6 @@ class MacroAssembler : public Assembler {
bne(not_smi_label, cr0);
}
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
ShiftRightArithImm(dst, src, kSmiShift, rc);
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_PPC64
......@@ -1179,9 +1255,6 @@ class MacroAssembler : public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
inline void TestIfSmi(Register value, Register scratch) {
TestBitRange(value, kSmiTagSize - 1, 0, scratch);
}
inline void TestIfPositiveSmi(Register value, Register scratch) {
#if V8_TARGET_ARCH_PPC64
......@@ -1192,11 +1265,6 @@ class MacroAssembler : public Assembler {
#endif
}
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value, r0);
beq(smi_label, cr0); // branch if SMI
}
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value, r0);
......@@ -1212,21 +1280,6 @@ class MacroAssembler : public Assembler {
void AssertSmi(Register object);
#if V8_TARGET_ARCH_PPC64
inline void TestIfInt32(Register value, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
extsw(scratch, value);
cmp(scratch, value, cr);
}
#else
inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
srawi(scratch, lo_word, 31);
cmp(scratch, hi_word, cr);
}
#endif
#if V8_TARGET_ARCH_PPC64
// Ensure it is permissable to read/write int value directly from
......@@ -1356,11 +1409,6 @@ class MacroAssembler : public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
......@@ -1380,28 +1428,9 @@ class MacroAssembler : public Assembler {
Register scratch2_reg,
Label* no_memento_found);
// Loads the constant pool pointer (kConstantPoolRegister).
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
void LoadConstantPoolPointerRegister();
void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
void AbortConstantPoolBuilding() {
#ifdef DEBUG
// Avoid DCHECK(!is_linked()) failure in ~Label()
bind(ConstantPoolPosition());
#endif
}
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
......@@ -1426,10 +1455,6 @@ class MacroAssembler : public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool has_frame_;
Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
......
......@@ -310,7 +310,7 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
Operand::Operand(Handle<HeapObject> handle) {
AllowHandleDereference using_location;
rm_ = no_reg;
value_.immediate = reinterpret_cast<intptr_t>(handle.location());
value_.immediate = reinterpret_cast<intptr_t>(handle.address());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
......
......@@ -336,6 +336,8 @@ class Operand BASE_EMBEDDED {
return is_heap_object_request_;
}
RelocInfo::Mode rmode() const { return rmode_; }
private:
Register rm_;
union Value {
......
......@@ -2248,10 +2248,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
if (tasm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_S390X
40);
#elif V8_HOST_ARCH_S390
......@@ -2259,10 +2259,10 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
#else
32);
#endif
__ CleanseP(r14);
__ Push(r14, ip);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ Pop(r14, ip);
tasm->CleanseP(r14);
tasm->Push(r14, ip);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->Pop(r14, ip);
}
}
......
......@@ -22,23 +22,16 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, size),
has_frame_(false),
isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate_->heap()->undefined_value(), isolate_);
}
}
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void MacroAssembler::Jump(Register target) { b(target); }
void TurboAssembler::Jump(Register target) { b(target); }
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
Jump(ip);
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, CRegister) {
Label skip;
......@@ -52,21 +45,21 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
CRegister cr) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
jump(code, rmode, cond);
}
int MacroAssembler::CallSize(Register target) { return 2; } // BASR
int TurboAssembler::CallSize(Register target) { return 2; } // BASR
void MacroAssembler::Call(Register target) {
void TurboAssembler::Call(Register target) {
Label start;
bind(&start);
......@@ -81,7 +74,7 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
// S390 Assembler::move sequence is IILF / IIHF
int size;
......@@ -106,7 +99,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
return size;
}
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(cond == al);
......@@ -124,12 +117,12 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
return 6; // BRASL
}
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
......@@ -144,7 +137,7 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::Drop(int count) {
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kPointerSize;
if (is_uint12(total)) {
......@@ -157,19 +150,19 @@ void MacroAssembler::Drop(int count) {
}
}
void MacroAssembler::Drop(Register count, Register scratch) {
void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
AddP(sp, sp, scratch);
}
void MacroAssembler::Call(Label* target) { b(r14, target); }
void TurboAssembler::Call(Label* target) { b(r14, target); }
void MacroAssembler::Push(Handle<HeapObject> handle) {
void TurboAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
void MacroAssembler::Push(Smi* smi) {
void TurboAssembler::Push(Smi* smi) {
mov(r0, Operand(smi));
push(r0);
}
......@@ -182,23 +175,23 @@ void MacroAssembler::PushObject(Handle<Object> handle) {
}
}
void MacroAssembler::Move(Register dst, Handle<HeapObject> value) {
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
mov(dst, Operand(value));
}
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
if (!dst.is(src)) {
LoadRR(dst, src);
}
}
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (!dst.is(src)) {
ldr(dst, src);
}
}
void MacroAssembler::MultiPush(RegList regs, Register location) {
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
......@@ -211,7 +204,7 @@ void MacroAssembler::MultiPush(RegList regs, Register location) {
}
}
void MacroAssembler::MultiPop(RegList regs, Register location) {
void TurboAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
......@@ -223,7 +216,7 @@ void MacroAssembler::MultiPop(RegList regs, Register location) {
AddP(location, location, Operand(stack_offset));
}
void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
int16_t num_to_push = NumberOfBitsSet(dregs);
int16_t stack_offset = num_to_push * kDoubleSize;
......@@ -237,7 +230,7 @@ void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
......@@ -250,7 +243,7 @@ void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
AddP(location, location, Operand(stack_offset));
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition) {
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
......@@ -529,7 +522,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
void MacroAssembler::PushCommonFrame(Register marker_reg) {
void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
CleanseP(r14);
if (marker_reg.is_valid()) {
......@@ -542,7 +535,7 @@ void MacroAssembler::PushCommonFrame(Register marker_reg) {
la(fp, MemOperand(sp, fp_delta * kPointerSize));
}
void MacroAssembler::PopCommonFrame(Register marker_reg) {
void TurboAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Pop(r14, fp, marker_reg);
} else {
......@@ -550,7 +543,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
}
}
void MacroAssembler::PushStandardFrame(Register function_reg) {
void TurboAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
CleanseP(r14);
if (function_reg.is_valid()) {
......@@ -563,7 +556,7 @@ void MacroAssembler::PushStandardFrame(Register function_reg) {
la(fp, MemOperand(sp, fp_delta * kPointerSize));
}
void MacroAssembler::RestoreFrameStateForTailCall() {
void TurboAssembler::RestoreFrameStateForTailCall() {
// if (FLAG_enable_embedded_constant_pool) {
// LoadP(kConstantPoolRegister,
// MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
......@@ -635,7 +628,7 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN
if (!dst.is(src)) ldr(dst, src);
......@@ -643,11 +636,11 @@ void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
sdbr(dst, kDoubleRegZero);
}
void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
cdfbr(dst, src);
}
void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
Register src) {
if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
cdlfbr(Condition(5), Condition(0), dst, src);
......@@ -659,36 +652,36 @@ void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
}
}
void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
cefbr(Condition(4), dst, src);
}
void MacroAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
Register src) {
celfbr(Condition(4), Condition(0), dst, src);
}
void MacroAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
Register src) {
cegbr(double_dst, src);
}
void MacroAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdgbr(double_dst, src);
}
void MacroAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
Register src) {
celgbr(Condition(0), Condition(0), double_dst, src);
}
void MacroAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdlgbr(Condition(0), Condition(0), double_dst, src);
}
void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -712,7 +705,7 @@ void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
cgebr(m, dst, double_input);
}
void MacroAssembler::ConvertDoubleToInt64(const Register dst,
void TurboAssembler::ConvertDoubleToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -736,7 +729,7 @@ void MacroAssembler::ConvertDoubleToInt64(const Register dst,
cgdbr(m, dst, double_input);
}
void MacroAssembler::ConvertDoubleToInt32(const Register dst,
void TurboAssembler::ConvertDoubleToInt32(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -760,7 +753,7 @@ void MacroAssembler::ConvertDoubleToInt32(const Register dst,
cfdbr(m, dst, double_input);
}
void MacroAssembler::ConvertFloat32ToInt32(const Register result,
void TurboAssembler::ConvertFloat32ToInt32(const Register result,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -784,7 +777,7 @@ void MacroAssembler::ConvertFloat32ToInt32(const Register result,
cfebr(m, result, double_input);
}
void MacroAssembler::ConvertFloat32ToUnsignedInt32(
void TurboAssembler::ConvertFloat32ToUnsignedInt32(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -808,7 +801,7 @@ void MacroAssembler::ConvertFloat32ToUnsignedInt32(
clfebr(m, Condition(0), result, double_input);
}
void MacroAssembler::ConvertFloat32ToUnsignedInt64(
void TurboAssembler::ConvertFloat32ToUnsignedInt64(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -832,7 +825,7 @@ void MacroAssembler::ConvertFloat32ToUnsignedInt64(
clgebr(m, Condition(0), result, double_input);
}
void MacroAssembler::ConvertDoubleToUnsignedInt64(
void TurboAssembler::ConvertDoubleToUnsignedInt64(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -856,7 +849,7 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
clgdbr(m, Condition(0), dst, double_input);
}
void MacroAssembler::ConvertDoubleToUnsignedInt32(
void TurboAssembler::ConvertDoubleToUnsignedInt32(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
......@@ -881,7 +874,7 @@ void MacroAssembler::ConvertDoubleToUnsignedInt32(
}
#if !V8_TARGET_ARCH_S390X
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
LoadRR(r0, src_high);
......@@ -891,7 +884,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
LoadRR(r0, src_high);
......@@ -901,7 +894,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
LoadRR(r0, src_high);
......@@ -911,7 +904,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
LoadRR(r0, src_high);
......@@ -921,7 +914,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
LoadRR(r0, src_high);
......@@ -931,7 +924,7 @@ void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
LoadRR(r0, src_high);
......@@ -942,15 +935,15 @@ void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
}
#endif
void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
lgdr(dst, src);
}
void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
ldgr(dst, src);
}
void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
......@@ -959,7 +952,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void TurboAssembler::Prologue(bool code_pre_aging, Register base,
int prologue_offset) {
DCHECK(!base.is(no_reg));
{
......@@ -996,7 +989,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// We create a stack frame with:
// Return Addr <-- old sp
......@@ -1014,7 +1007,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
}
}
int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer.
LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
......@@ -1110,7 +1103,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Allocate and align the frame preparing for calling the runtime
// function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
if (frame_alignment > 0) {
DCHECK(frame_alignment == 8);
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
......@@ -1124,7 +1117,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
int MacroAssembler::ActivationFrameAlignment() {
int TurboAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
......@@ -1180,15 +1173,15 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d0);
}
void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d0);
}
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
......@@ -1801,7 +1794,7 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
call(stub);
}
......@@ -1810,7 +1803,7 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
......@@ -1843,6 +1836,36 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
bind(&done);
}
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
push(r14);
// Put input on stack.
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(double_input, MemOperand(sp));
CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
la(sp, MemOperand(sp, kDoubleSize));
pop(r14);
bind(&done);
}
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
ConvertDoubleToInt64(result, double_input);
// Test for overflow
TestIfInt32(result);
beq(done);
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
......@@ -1860,12 +1883,18 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
AndP(dst, src, Operand((1 << num_least_bits) - 1));
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
mov(r2, Operand(f->nargs));
mov(r3, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
CallStubDelayed(new (zone) CEntryStub(nullptr,
#if V8_TARGET_ARCH_S390X
f->result_size,
#else
1,
#endif
save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
......@@ -1952,12 +1981,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
void MacroAssembler::Assert(Condition cond, BailoutReason reason,
void TurboAssembler::Assert(Condition cond, BailoutReason reason,
CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
Label L;
b(cond, &L);
Abort(reason);
......@@ -1965,7 +1994,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
void MacroAssembler::Abort(BailoutReason reason) {
void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
......@@ -2256,8 +2285,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required,
MutableMode mode) {
Label* gc_required, MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
......@@ -2345,7 +2373,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 5;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
......@@ -2399,7 +2427,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
SmiUntag(index, index);
}
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
......@@ -2420,16 +2448,16 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
}
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2.is(d0)) {
DCHECK(!src1.is(d2));
......@@ -2441,28 +2469,28 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
void MacroAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
......@@ -2490,7 +2518,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
}
}
void MacroAssembler::CheckPageFlag(
void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
......@@ -2859,7 +2887,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
void MacroAssembler::mov(Register dst, const Operand& src) {
void TurboAssembler::mov(Register dst, const Operand& src) {
#if V8_TARGET_ARCH_S390X
int64_t value;
#else
......@@ -2872,9 +2900,9 @@ void MacroAssembler::mov(Register dst, const Operand& src) {
value = src.immediate();
}
if (src.rmode_ != kRelocInfo_NONEPTR) {
if (src.rmode() != kRelocInfo_NONEPTR) {
// some form of relocation needed
RecordRelocInfo(src.rmode_, value);
RecordRelocInfo(src.rmode(), value);
}
#if V8_TARGET_ARCH_S390X
......@@ -2888,7 +2916,7 @@ void MacroAssembler::mov(Register dst, const Operand& src) {
#endif
}
void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
void TurboAssembler::Mul32(Register dst, const MemOperand& src1) {
if (is_uint12(src1.offset())) {
ms(dst, src1);
} else if (is_int20(src1.offset())) {
......@@ -2898,9 +2926,9 @@ void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
}
}
void MacroAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
void TurboAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
void MacroAssembler::Mul32(Register dst, const Operand& src1) {
void TurboAssembler::Mul32(Register dst, const Operand& src1) {
msfi(dst, src1);
}
......@@ -2911,19 +2939,19 @@ void MacroAssembler::Mul32(Register dst, const Operand& src1) {
srlg(dst, dst, Operand(32)); \
}
void MacroAssembler::MulHigh32(Register dst, Register src1,
void TurboAssembler::MulHigh32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHigh32(msgf);
}
void MacroAssembler::MulHigh32(Register dst, Register src1, Register src2) {
void TurboAssembler::MulHigh32(Register dst, Register src1, Register src2) {
if (dst.is(src2)) {
std::swap(src1, src2);
}
Generate_MulHigh32(msgfr);
}
void MacroAssembler::MulHigh32(Register dst, Register src1,
void TurboAssembler::MulHigh32(Register dst, Register src1,
const Operand& src2) {
Generate_MulHigh32(msgfi);
}
......@@ -2937,16 +2965,16 @@ void MacroAssembler::MulHigh32(Register dst, Register src1,
LoadlW(dst, r0); \
}
void MacroAssembler::MulHighU32(Register dst, Register src1,
void TurboAssembler::MulHighU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHighU32(ml);
}
void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
Generate_MulHighU32(mlr);
}
void MacroAssembler::MulHighU32(Register dst, Register src1,
void TurboAssembler::MulHighU32(Register dst, Register src1,
const Operand& src2) {
USE(dst);
USE(src1);
......@@ -2963,7 +2991,7 @@ void MacroAssembler::MulHighU32(Register dst, Register src1,
cgfr(dst, dst); \
}
void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2) {
Register result = dst;
if (src2.rx().is(dst) || src2.rb().is(dst)) dst = r0;
......@@ -2971,7 +2999,7 @@ void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
if (!result.is(dst)) llgfr(result, dst);
}
void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Register src2) {
if (dst.is(src2)) {
std::swap(src1, src2);
......@@ -2979,14 +3007,14 @@ void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Generate_Mul32WithOverflowIfCCUnequal(msgfr);
}
void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const Operand& src2) {
Generate_Mul32WithOverflowIfCCUnequal(msgfi);
}
#undef Generate_Mul32WithOverflowIfCCUnequal
void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
void TurboAssembler::Mul64(Register dst, const MemOperand& src1) {
if (is_int20(src1.offset())) {
msg(dst, src1);
} else {
......@@ -2994,13 +3022,13 @@ void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
}
}
void MacroAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
void TurboAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
void MacroAssembler::Mul64(Register dst, const Operand& src1) {
void TurboAssembler::Mul64(Register dst, const Operand& src1) {
msgfi(dst, src1);
}
void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
MulPWithCondition(dst, src1, src2);
} else {
......@@ -3015,7 +3043,7 @@ void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
}
}
void MacroAssembler::DivP(Register dividend, Register divider) {
void TurboAssembler::DivP(Register dividend, Register divider) {
// have to make sure the src and dst are reg pairs
DCHECK(dividend.code() % 2 == 0);
#if V8_TARGET_ARCH_S390X
......@@ -3032,12 +3060,12 @@ void MacroAssembler::DivP(Register dividend, Register divider) {
LoadlW(dst, r1); \
}
void MacroAssembler::Div32(Register dst, Register src1,
void TurboAssembler::Div32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div32(dsgf);
}
void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
Generate_Div32(dsgfr);
}
......@@ -3051,12 +3079,12 @@ void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
LoadlW(dst, r1); \
}
void MacroAssembler::DivU32(Register dst, Register src1,
void TurboAssembler::DivU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU32(dl);
}
void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
Generate_DivU32(dlr);
}
......@@ -3069,12 +3097,12 @@ void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
void MacroAssembler::Div64(Register dst, Register src1,
void TurboAssembler::Div64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div64(dsg);
}
void MacroAssembler::Div64(Register dst, Register src1, Register src2) {
void TurboAssembler::Div64(Register dst, Register src1, Register src2) {
Generate_Div64(dsgr);
}
......@@ -3088,12 +3116,12 @@ void MacroAssembler::Div64(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
void MacroAssembler::DivU64(Register dst, Register src1,
void TurboAssembler::DivU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU64(dlg);
}
void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
Generate_DivU64(dlgr);
}
......@@ -3106,12 +3134,12 @@ void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
LoadlW(dst, r0); \
}
void MacroAssembler::Mod32(Register dst, Register src1,
void TurboAssembler::Mod32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod32(dsgf);
}
void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
Generate_Mod32(dsgfr);
}
......@@ -3125,12 +3153,12 @@ void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
LoadlW(dst, r0); \
}
void MacroAssembler::ModU32(Register dst, Register src1,
void TurboAssembler::ModU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU32(dl);
}
void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
Generate_ModU32(dlr);
}
......@@ -3143,12 +3171,12 @@ void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
void MacroAssembler::Mod64(Register dst, Register src1,
void TurboAssembler::Mod64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod64(dsg);
}
void MacroAssembler::Mod64(Register dst, Register src1, Register src2) {
void TurboAssembler::Mod64(Register dst, Register src1, Register src2) {
Generate_Mod64(dsgr);
}
......@@ -3162,18 +3190,18 @@ void MacroAssembler::Mod64(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
void MacroAssembler::ModU64(Register dst, Register src1,
void TurboAssembler::ModU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU64(dlg);
}
void MacroAssembler::ModU64(Register dst, Register src1, Register src2) {
void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
Generate_ModU64(dlgr);
}
#undef Generate_ModU64
void MacroAssembler::MulP(Register dst, const Operand& opnd) {
void TurboAssembler::MulP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
msgfi(dst, opnd);
#else
......@@ -3181,7 +3209,7 @@ void MacroAssembler::MulP(Register dst, const Operand& opnd) {
#endif
}
void MacroAssembler::MulP(Register dst, Register src) {
void TurboAssembler::MulP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
msgr(dst, src);
#else
......@@ -3189,7 +3217,7 @@ void MacroAssembler::MulP(Register dst, Register src) {
#endif
}
void MacroAssembler::MulPWithCondition(Register dst, Register src1,
void TurboAssembler::MulPWithCondition(Register dst, Register src1,
Register src2) {
CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
#if V8_TARGET_ARCH_S390X
......@@ -3199,7 +3227,7 @@ void MacroAssembler::MulPWithCondition(Register dst, Register src1,
#endif
}
void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
void TurboAssembler::MulP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
if (is_uint16(opnd.offset())) {
ms(dst, opnd);
......@@ -3217,10 +3245,10 @@ void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
#endif
}
void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
sqdbr(result, input);
}
void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
if (is_uint12(input.offset())) {
sqdb(result, input);
} else {
......@@ -3233,7 +3261,7 @@ void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
//----------------------------------------------------------------------------
// Add 32-bit (Register dst = Register dst + Immediate opnd)
void MacroAssembler::Add32(Register dst, const Operand& opnd) {
void TurboAssembler::Add32(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
ahi(dst, opnd);
else
......@@ -3241,13 +3269,13 @@ void MacroAssembler::Add32(Register dst, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register dst + Immediate opnd)
void MacroAssembler::Add32_RI(Register dst, const Operand& opnd) {
void TurboAssembler::Add32_RI(Register dst, const Operand& opnd) {
// Just a wrapper for above
Add32(dst, opnd);
}
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
void MacroAssembler::AddP(Register dst, const Operand& opnd) {
void TurboAssembler::AddP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
if (is_int16(opnd.immediate()))
aghi(dst, opnd);
......@@ -3259,7 +3287,7 @@ void MacroAssembler::AddP(Register dst, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
ahik(dst, src, opnd);
......@@ -3271,14 +3299,14 @@ void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
void MacroAssembler::Add32_RRI(Register dst, Register src,
void TurboAssembler::Add32_RRI(Register dst, Register src,
const Operand& opnd) {
// Just a wrapper for above
Add32(dst, src, opnd);
}
// Add Pointer Size (Register dst = Register src + Immediate opnd)
void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
AddPImm_RRI(dst, src, opnd);
......@@ -3290,16 +3318,16 @@ void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register dst + Register src)
void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
// Add Pointer Size (Register dst = Register dst + Register src)
void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
void TurboAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
// Add Pointer Size with src extension
// (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
void TurboAssembler::AddP_ExtendSrc(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
agfr(dst, src);
#else
......@@ -3308,7 +3336,7 @@ void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
}
// Add 32-bit (Register dst = Register src1 + Register src2)
void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
......@@ -3325,7 +3353,7 @@ void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
}
// Add Pointer Size (Register dst = Register src1 + Register src2)
void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
......@@ -3346,7 +3374,7 @@ void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
// Register src2 (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
if (dst.is(src2)) {
......@@ -3363,7 +3391,7 @@ void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
}
// Add 32-bit (Register-Memory)
void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
void TurboAssembler::Add32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
a(dst, opnd);
......@@ -3372,7 +3400,7 @@ void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
}
// Add Pointer Size (Register-Memory)
void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
void TurboAssembler::AddP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
ag(dst, opnd);
......@@ -3385,7 +3413,7 @@ void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
// (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
void TurboAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
agf(dst, opnd);
......@@ -3395,7 +3423,7 @@ void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
}
// Add 32-bit (Memory - Immediate)
void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
void TurboAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
......@@ -3403,7 +3431,7 @@ void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
}
// Add Pointer-sized (Memory - Immediate)
void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
......@@ -3419,7 +3447,7 @@ void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
//----------------------------------------------------------------------------
// Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
void TurboAssembler::AddLogicalWithCarry32(Register dst, Register src1,
Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
......@@ -3436,7 +3464,7 @@ void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
}
// Add Logical 32-bit (Register dst = Register src1 + Register src2)
void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
alr(dst, src2);
......@@ -3452,12 +3480,12 @@ void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
void TurboAssembler::AddLogical(Register dst, const Operand& imm) {
alfi(dst, imm);
}
// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
void TurboAssembler::AddLogicalP(Register dst, const Operand& imm) {
#ifdef V8_TARGET_ARCH_S390X
algfi(dst, imm);
#else
......@@ -3466,7 +3494,7 @@ void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
}
// Add Logical 32-bit (Register-Memory)
void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
void TurboAssembler::AddLogical(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
al_z(dst, opnd);
......@@ -3475,7 +3503,7 @@ void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
}
// Add Logical Pointer Size (Register-Memory)
void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
alg(dst, opnd);
......@@ -3490,7 +3518,7 @@ void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
// Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
// src2)
void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
......@@ -3508,7 +3536,7 @@ void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
}
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
slr(dst, src2);
......@@ -3525,36 +3553,36 @@ void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
}
// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
void MacroAssembler::Sub32(Register dst, const Operand& imm) {
void TurboAssembler::Sub32(Register dst, const Operand& imm) {
Add32(dst, Operand(-(imm.immediate())));
}
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
void MacroAssembler::SubP(Register dst, const Operand& imm) {
void TurboAssembler::SubP(Register dst, const Operand& imm) {
AddP(dst, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
void TurboAssembler::Sub32(Register dst, Register src, const Operand& imm) {
Add32(dst, src, Operand(-(imm.immediate())));
}
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
void TurboAssembler::SubP(Register dst, Register src, const Operand& imm) {
AddP(dst, src, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register dst - Register src)
void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
// Subtract Pointer Size (Register dst = Register dst - Register src)
void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
void TurboAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
// Subtract Pointer Size with src extension
// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
void TurboAssembler::SubP_ExtendSrc(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
sgfr(dst, src);
#else
......@@ -3563,7 +3591,7 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
}
// Subtract 32-bit (Register = Register - Register)
void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
......@@ -3583,7 +3611,7 @@ void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
}
// Subtract Pointer Sized (Register = Register - Register)
void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
SubP_RRR(dst, src1, src2);
......@@ -3606,7 +3634,7 @@ void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
......@@ -3625,7 +3653,7 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
}
// Subtract 32-bit (Register-Memory)
void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
void TurboAssembler::Sub32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
s(dst, opnd);
......@@ -3634,7 +3662,7 @@ void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
}
// Subtract Pointer Sized (Register - Memory)
void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
sg(dst, opnd);
#else
......@@ -3642,17 +3670,17 @@ void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
#endif
}
void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
sllg(r0, src, Operand(32));
ldgr(dst, r0);
}
void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
lgdr(dst, src);
srlg(dst, dst, Operand(32));
}
void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
sgf(dst, opnd);
......@@ -3666,7 +3694,7 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register - Memory)
void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubLogical(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
sl(dst, opnd);
......@@ -3675,7 +3703,7 @@ void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
}
// Subtract Logical Pointer Sized (Register - Memory)
void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
slgf(dst, opnd);
......@@ -3688,7 +3716,7 @@ void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
// (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
void TurboAssembler::SubLogicalP_ExtendSrc(Register dst,
const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
......@@ -3703,13 +3731,13 @@ void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
//----------------------------------------------------------------------------
// AND 32-bit - dst = dst & src
void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
// AND Pointer Size - dst = dst & src
void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
// Non-clobbering AND 32-bit - dst = src1 & src1
void MacroAssembler::And(Register dst, Register src1, Register src2) {
void TurboAssembler::And(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
......@@ -3726,7 +3754,7 @@ void MacroAssembler::And(Register dst, Register src1, Register src2) {
}
// Non-clobbering AND pointer size - dst = src1 & src1
void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
......@@ -3743,7 +3771,7 @@ void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
}
// AND 32-bit (Reg - Mem)
void MacroAssembler::And(Register dst, const MemOperand& opnd) {
void TurboAssembler::And(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
n(dst, opnd);
......@@ -3752,7 +3780,7 @@ void MacroAssembler::And(Register dst, const MemOperand& opnd) {
}
// AND Pointer Size (Reg - Mem)
void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
ng(dst, opnd);
......@@ -3762,10 +3790,10 @@ void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
}
// AND 32-bit - dst = dst & imm
void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
// AND Pointer Size - dst = dst & imm
void MacroAssembler::AndP(Register dst, const Operand& opnd) {
void TurboAssembler::AndP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
if (value >> 32 != -1) {
......@@ -3779,13 +3807,13 @@ void MacroAssembler::AndP(Register dst, const Operand& opnd) {
}
// AND 32-bit - dst = src & imm
void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) lr(dst, src);
nilf(dst, opnd);
}
// AND Pointer Size - dst = src & imm
void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// Try to exploit RISBG first
intptr_t value = opnd.immediate();
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
......@@ -3825,13 +3853,13 @@ void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
}
// OR 32-bit - dst = dst & src
void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
// OR Pointer Size - dst = dst & src
void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
// Non-clobbering OR 32-bit - dst = src1 & src1
void MacroAssembler::Or(Register dst, Register src1, Register src2) {
void TurboAssembler::Or(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
......@@ -3848,7 +3876,7 @@ void MacroAssembler::Or(Register dst, Register src1, Register src2) {
}
// Non-clobbering OR pointer size - dst = src1 & src1
void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
......@@ -3865,7 +3893,7 @@ void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
}
// OR 32-bit (Reg - Mem)
void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
o(dst, opnd);
......@@ -3874,7 +3902,7 @@ void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
}
// OR Pointer Size (Reg - Mem)
void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
og(dst, opnd);
......@@ -3884,10 +3912,10 @@ void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
}
// OR 32-bit - dst = dst & imm
void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
// OR Pointer Size - dst = dst & imm
void MacroAssembler::OrP(Register dst, const Operand& opnd) {
void TurboAssembler::OrP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
if (value >> 32 != 0) {
......@@ -3901,25 +3929,25 @@ void MacroAssembler::OrP(Register dst, const Operand& opnd) {
}
// OR 32-bit - dst = src & imm
void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) lr(dst, src);
oilf(dst, opnd);
}
// OR Pointer Size - dst = src & imm
void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) LoadRR(dst, src);
OrP(dst, opnd);
}
// XOR 32-bit - dst = dst & src
void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
// XOR Pointer Size - dst = dst & src
void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
// Non-clobbering XOR 32-bit - dst = src1 & src1
void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
......@@ -3936,7 +3964,7 @@ void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
}
// Non-clobbering XOR pointer size - dst = src1 & src1
void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
......@@ -3953,7 +3981,7 @@ void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
}
// XOR 32-bit (Reg - Mem)
void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
x(dst, opnd);
......@@ -3962,7 +3990,7 @@ void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
}
// XOR Pointer Size (Reg - Mem)
void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
xg(dst, opnd);
......@@ -3972,10 +4000,10 @@ void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
}
// XOR 32-bit - dst = dst & imm
void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
// XOR Pointer Size - dst = dst & imm
void MacroAssembler::XorP(Register dst, const Operand& opnd) {
void TurboAssembler::XorP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
xihf(dst, Operand(value >> 32));
......@@ -3986,29 +4014,29 @@ void MacroAssembler::XorP(Register dst, const Operand& opnd) {
}
// XOR 32-bit - dst = src & imm
void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) lr(dst, src);
xilf(dst, opnd);
}
// XOR Pointer Size - dst = src & imm
void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) LoadRR(dst, src);
XorP(dst, opnd);
}
void MacroAssembler::Not32(Register dst, Register src) {
void TurboAssembler::Not32(Register dst, Register src) {
if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
xilf(dst, Operand(0xFFFFFFFF));
}
void MacroAssembler::Not64(Register dst, Register src) {
void TurboAssembler::Not64(Register dst, Register src) {
if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
}
void MacroAssembler::NotP(Register dst, Register src) {
void TurboAssembler::NotP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
Not64(dst, src);
#else
......@@ -4017,7 +4045,7 @@ void MacroAssembler::NotP(Register dst, Register src) {
}
// works the same as mov
void MacroAssembler::Load(Register dst, const Operand& opnd) {
void TurboAssembler::Load(Register dst, const Operand& opnd) {
intptr_t value = opnd.immediate();
if (is_int16(value)) {
#if V8_TARGET_ARCH_S390X
......@@ -4046,7 +4074,7 @@ void MacroAssembler::Load(Register dst, const Operand& opnd) {
}
}
void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
void TurboAssembler::Load(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
lgf(dst, opnd); // 64<-32
......@@ -4059,7 +4087,7 @@ void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
#endif
}
void MacroAssembler::LoadPositiveP(Register result, Register input) {
void TurboAssembler::LoadPositiveP(Register result, Register input) {
#if V8_TARGET_ARCH_S390X
lpgr(result, input);
#else
......@@ -4067,7 +4095,7 @@ void MacroAssembler::LoadPositiveP(Register result, Register input) {
#endif
}
void MacroAssembler::LoadPositive32(Register result, Register input) {
void TurboAssembler::LoadPositive32(Register result, Register input) {
lpr(result, input);
lgfr(result, result);
}
......@@ -4077,10 +4105,10 @@ void MacroAssembler::LoadPositive32(Register result, Register input) {
//-----------------------------------------------------------------------------
// Compare 32-bit Register vs Register
void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
void TurboAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
// Compare Pointer Sized Register vs Register
void MacroAssembler::CmpP(Register src1, Register src2) {
void TurboAssembler::CmpP(Register src1, Register src2) {
#if V8_TARGET_ARCH_S390X
cgr(src1, src2);
#else
......@@ -4090,8 +4118,8 @@ void MacroAssembler::CmpP(Register src1, Register src2) {
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
if (opnd.rmode_ == kRelocInfo_NONEPTR) {
void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
if (opnd.rmode() == kRelocInfo_NONEPTR) {
intptr_t value = opnd.immediate();
if (is_int16(value))
chi(dst, opnd);
......@@ -4099,16 +4127,16 @@ void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
cfi(dst, opnd);
} else {
// Need to generate relocation record here
RecordRelocInfo(opnd.rmode_, opnd.immediate());
RecordRelocInfo(opnd.rmode(), opnd.immediate());
cfi(dst, opnd);
}
}
// Compare Pointer Sized Register vs Immediate
// This helper will set up proper relocation entries if required.
void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
void TurboAssembler::CmpP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
if (opnd.rmode_ == kRelocInfo_NONEPTR) {
if (opnd.rmode() == kRelocInfo_NONEPTR) {
cgfi(dst, opnd);
} else {
mov(r0, opnd); // Need to generate 64-bit relocation
......@@ -4120,7 +4148,7 @@ void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
}
// Compare 32-bit Register vs Memory
void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
void TurboAssembler::Cmp32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
......@@ -4130,7 +4158,7 @@ void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
}
// Compare Pointer Size Register vs Memory
void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
void TurboAssembler::CmpP(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
......@@ -4145,10 +4173,10 @@ void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
//-----------------------------------------------------------------------------
// Compare Logical 32-bit Register vs Register
void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
void TurboAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
// Compare Logical Pointer Sized Register vs Register
void MacroAssembler::CmpLogicalP(Register dst, Register src) {
void TurboAssembler::CmpLogicalP(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
clgr(dst, src);
#else
......@@ -4157,12 +4185,12 @@ void MacroAssembler::CmpLogicalP(Register dst, Register src) {
}
// Compare Logical 32-bit Register vs Immediate
void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
void TurboAssembler::CmpLogical32(Register dst, const Operand& opnd) {
clfi(dst, opnd);
}
// Compare Logical Pointer Sized Register vs Immediate
void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
void TurboAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
clgfi(dst, opnd);
......@@ -4172,7 +4200,7 @@ void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
}
// Compare Logical 32-bit Register vs Memory
void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
void TurboAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
......@@ -4182,7 +4210,7 @@ void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
}
// Compare Logical Pointer Sized Register vs Memory
void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
void TurboAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
......@@ -4193,7 +4221,7 @@ void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
}
// Compare Logical Byte (Mem - Imm)
void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
void TurboAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
DCHECK(is_uint8(imm.immediate()));
if (is_uint12(mem.offset()))
cli(mem, imm);
......@@ -4201,7 +4229,7 @@ void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
cliy(mem, imm);
}
void MacroAssembler::Branch(Condition c, const Operand& opnd) {
void TurboAssembler::Branch(Condition c, const Operand& opnd) {
intptr_t value = opnd.immediate();
if (is_int16(value))
brc(c, opnd);
......@@ -4210,7 +4238,7 @@ void MacroAssembler::Branch(Condition c, const Operand& opnd) {
}
// Branch On Count. Decrement R1, and branch if R1 != 0.
void MacroAssembler::BranchOnCount(Register r1, Label* l) {
void TurboAssembler::BranchOnCount(Register r1, Label* l) {
int32_t offset = branch_offset(l);
if (is_int16(offset)) {
#if V8_TARGET_ARCH_S390X
......@@ -4224,11 +4252,11 @@ void MacroAssembler::BranchOnCount(Register r1, Label* l) {
}
}
void MacroAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadIntLiteral(Register dst, int value) {
Load(dst, Operand(value));
}
void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
intptr_t value = reinterpret_cast<intptr_t>(smi);
#if V8_TARGET_ARCH_S390X
DCHECK((value & 0xffffffff) == 0);
......@@ -4239,7 +4267,7 @@ void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
#endif
}
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
Register scratch) {
uint32_t hi_32 = value >> 32;
uint32_t lo_32 = static_cast<uint32_t>(value);
......@@ -4257,20 +4285,20 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
}
}
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
Register scratch) {
uint64_t int_val = bit_cast<uint64_t, double>(value);
LoadDoubleLiteral(result, int_val, scratch);
}
void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
Register scratch) {
uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
<< 32;
LoadDoubleLiteral(result, int_val, scratch);
}
void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
void TurboAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
......@@ -4284,7 +4312,7 @@ void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#endif
}
void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
void TurboAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4299,7 +4327,7 @@ void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
#endif
}
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
void TurboAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4314,7 +4342,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4329,7 +4357,7 @@ void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
if (!dst.is(src)) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
......@@ -4341,7 +4369,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
}
// Load a "pointer" sized value from the memory location
void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
......@@ -4367,7 +4395,7 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
void MacroAssembler::StoreP(Register src, const MemOperand& mem,
void TurboAssembler::StoreP(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(!scratch.is(no_reg));
......@@ -4390,10 +4418,10 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
}
// Store a "pointer" sized constant to the memory location
void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
DCHECK(opnd.rmode() == kRelocInfo_NONEPTR);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
......@@ -4409,7 +4437,7 @@ void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
}
}
void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
......@@ -4424,7 +4452,7 @@ void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
#endif
}
void MacroAssembler::StoreMultipleP(Register src1, Register src2,
void TurboAssembler::StoreMultipleP(Register src1, Register src2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
......@@ -4439,7 +4467,7 @@ void MacroAssembler::StoreMultipleP(Register src1, Register src2,
#endif
}
void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
lm(dst1, dst2, mem);
......@@ -4449,7 +4477,7 @@ void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
}
}
void MacroAssembler::StoreMultipleW(Register src1, Register src2,
void TurboAssembler::StoreMultipleW(Register src1, Register src2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
stm(src1, src2, mem);
......@@ -4460,7 +4488,7 @@ void MacroAssembler::StoreMultipleW(Register src1, Register src2,
}
// Load 32-bits and sign extend if necessary.
void MacroAssembler::LoadW(Register dst, Register src) {
void TurboAssembler::LoadW(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgfr(dst, src);
#else
......@@ -4469,7 +4497,7 @@ void MacroAssembler::LoadW(Register dst, Register src) {
}
// Load 32-bits and sign extend if necessary.
void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
void TurboAssembler::LoadW(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
......@@ -4495,7 +4523,7 @@ void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
}
// Load 32-bits and zero extend if necessary.
void MacroAssembler::LoadlW(Register dst, Register src) {
void TurboAssembler::LoadlW(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgfr(dst, src);
#else
......@@ -4505,7 +4533,7 @@ void MacroAssembler::LoadlW(Register dst, Register src) {
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
void TurboAssembler::LoadlW(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4546,7 +4574,7 @@ void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
#endif
}
void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
void TurboAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
#else
......@@ -4554,7 +4582,7 @@ void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
#endif
}
void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
void TurboAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llghr(dst, src);
#else
......@@ -4562,7 +4590,7 @@ void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
#endif
}
void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
void TurboAssembler::LoadB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
#else
......@@ -4570,7 +4598,7 @@ void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
#endif
}
void MacroAssembler::LoadB(Register dst, Register src) {
void TurboAssembler::LoadB(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgbr(dst, src);
#else
......@@ -4578,7 +4606,7 @@ void MacroAssembler::LoadB(Register dst, Register src) {
#endif
}
void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
void TurboAssembler::LoadlB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
#else
......@@ -4586,7 +4614,7 @@ void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#endif
}
void MacroAssembler::LoadlB(Register dst, Register src) {
void TurboAssembler::LoadlB(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgcr(dst, src);
#else
......@@ -4594,22 +4622,21 @@ void MacroAssembler::LoadlB(Register dst, Register src) {
#endif
}
void MacroAssembler::LoadLogicalReversedWordP(Register dst,
void TurboAssembler::LoadLogicalReversedWordP(Register dst,
const MemOperand& mem) {
lrv(dst, mem);
LoadlW(dst, dst);
}
void MacroAssembler::LoadLogicalReversedHalfWordP(Register dst,
const MemOperand& mem) {
void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
const MemOperand& mem) {
lrvh(dst, mem);
LoadLogicalHalfWordP(dst, dst);
}
// Load And Test (Reg <- Reg)
void MacroAssembler::LoadAndTest32(Register dst, Register src) {
void TurboAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
}
......@@ -4617,7 +4644,7 @@ void MacroAssembler::LoadAndTest32(Register dst, Register src) {
// (Register dst(ptr) = Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
void TurboAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
ltgfr(dst, src);
#else
......@@ -4626,7 +4653,7 @@ void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
}
// Load And Test Pointer Sized (Reg <- Reg)
void MacroAssembler::LoadAndTestP(Register dst, Register src) {
void TurboAssembler::LoadAndTestP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
ltgr(dst, src);
#else
......@@ -4635,12 +4662,12 @@ void MacroAssembler::LoadAndTestP(Register dst, Register src) {
}
// Load And Test 32-bit (Reg <- Mem)
void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
lt_z(dst, mem);
}
// Load And Test Pointer Sized (Reg <- Mem)
void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
ltg(dst, mem);
#else
......@@ -4649,7 +4676,7 @@ void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
}
// Load On Condition Pointer Sized (Reg <- Reg)
void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
Register src) {
#if V8_TARGET_ARCH_S390X
locgr(cond, dst, src);
......@@ -4659,7 +4686,7 @@ void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
}
// Load Double Precision (64-bit) Floating Point number from memory
void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
// for 32bit and 64bit we all use 64bit floating point regs
if (is_uint12(mem.offset())) {
ld(dst, mem);
......@@ -4669,7 +4696,7 @@ void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
}
// Load Single Precision (32-bit) Floating Point number from memory
void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
le_z(dst, mem);
} else {
......@@ -4680,14 +4707,14 @@ void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
// Load Single Precision (32-bit) Floating Point number from memory,
// and convert to Double Precision (64-bit)
void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
const MemOperand& mem) {
LoadFloat32(dst, mem);
ldebr(dst, dst);
}
// Store Double Precision (64-bit) Floating Point number to memory
void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
std(dst, mem);
} else {
......@@ -4696,7 +4723,7 @@ void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
}
// Store Single Precision (32-bit) Floating Point number to memory
void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
void TurboAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
ste(src, mem);
} else {
......@@ -4706,14 +4733,14 @@ void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
// Convert Double precision (64-bit) to Single Precision (32-bit)
// and store resulting Float32 to memory
void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
const MemOperand& mem,
DoubleRegister scratch) {
ledbr(scratch, src);
StoreFloat32(scratch, mem);
}
void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
aeb(dst, opnd);
......@@ -4723,7 +4750,7 @@ void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
adb(dst, opnd);
......@@ -4733,7 +4760,7 @@ void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
seb(dst, opnd);
......@@ -4743,7 +4770,7 @@ void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
sdb(dst, opnd);
......@@ -4753,7 +4780,7 @@ void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
meeb(dst, opnd);
......@@ -4763,7 +4790,7 @@ void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
mdb(dst, opnd);
......@@ -4773,7 +4800,7 @@ void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
deb(dst, opnd);
......@@ -4783,7 +4810,7 @@ void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ddb(dst, opnd);
......@@ -4793,7 +4820,7 @@ void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
void MacroAssembler::LoadFloat32ToDouble(DoubleRegister dst,
void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
......@@ -4806,7 +4833,7 @@ void MacroAssembler::LoadFloat32ToDouble(DoubleRegister dst,
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
void MacroAssembler::StoreW(Register src, const MemOperand& mem,
void TurboAssembler::StoreW(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4839,7 +4866,7 @@ void MacroAssembler::StoreW(Register src, const MemOperand& mem,
// Loads 16-bits half-word value from memory and sign extends to pointer
// sized register
void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4867,7 +4894,7 @@ void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4885,7 +4912,7 @@ void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4902,7 +4929,7 @@ void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
}
// Shift left logical for 32-bit integer types.
void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
if (dst.is(src)) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4914,7 +4941,7 @@ void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
}
// Shift left logical for 32-bit integer types.
void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
if (dst.is(src)) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4927,7 +4954,7 @@ void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
}
// Shift right logical for 32-bit integer types.
void MacroAssembler::ShiftRight(Register dst, Register src,
void TurboAssembler::ShiftRight(Register dst, Register src,
const Operand& val) {
if (dst.is(src)) {
srl(dst, val);
......@@ -4940,7 +4967,7 @@ void MacroAssembler::ShiftRight(Register dst, Register src,
}
// Shift right logical for 32-bit integer types.
void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
if (dst.is(src)) {
srl(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4953,7 +4980,7 @@ void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
}
// Shift left arithmetic for 32-bit integer types.
void MacroAssembler::ShiftLeftArith(Register dst, Register src,
void TurboAssembler::ShiftLeftArith(Register dst, Register src,
const Operand& val) {
if (dst.is(src)) {
sla(dst, val);
......@@ -4966,7 +4993,7 @@ void MacroAssembler::ShiftLeftArith(Register dst, Register src,
}
// Shift left arithmetic for 32-bit integer types.
void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
if (dst.is(src)) {
sla(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -4979,7 +5006,7 @@ void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
}
// Shift right arithmetic for 32-bit integer types.
void MacroAssembler::ShiftRightArith(Register dst, Register src,
void TurboAssembler::ShiftRightArith(Register dst, Register src,
const Operand& val) {
if (dst.is(src)) {
sra(dst, val);
......@@ -4992,7 +5019,7 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src,
}
// Shift right arithmetic for 32-bit integer types.
void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
if (dst.is(src)) {
sra(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
......@@ -5005,7 +5032,7 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
}
// Clear right most # of bits
void MacroAssembler::ClearRightImm(Register dst, Register src,
void TurboAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
int numBitsToClear = val.immediate() % (kPointerSize * 8);
......@@ -5031,7 +5058,7 @@ void MacroAssembler::ClearRightImm(Register dst, Register src,
}
}
void MacroAssembler::Popcnt32(Register dst, Register src) {
void TurboAssembler::Popcnt32(Register dst, Register src) {
DCHECK(!src.is(r0));
DCHECK(!dst.is(r0));
......@@ -5044,7 +5071,7 @@ void MacroAssembler::Popcnt32(Register dst, Register src) {
}
#ifdef V8_TARGET_ARCH_S390X
void MacroAssembler::Popcnt64(Register dst, Register src) {
void TurboAssembler::Popcnt64(Register dst, Register src) {
DCHECK(!src.is(r0));
DCHECK(!dst.is(r0));
......
......@@ -162,29 +162,39 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#endif
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public Assembler {
class TurboAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
}
}
Isolate* isolate() const { return isolate_; }
Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
void JumpToJSEntry(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value);
beq(smi_label /*, cr0*/); // branch if SMI
}
void Call(Register target);
void CallJSEntry(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
......@@ -194,17 +204,6 @@ class MacroAssembler : public Assembler {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp.
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 0) {
DCHECK(Descriptor::kPassLastArgsOnStack);
UNIMPLEMENTED();
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
......@@ -233,7 +232,6 @@ class MacroAssembler : public Assembler {
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
//--------------------------------------------------------------------------
// S390 Macro Assemblers for Instructions
//--------------------------------------------------------------------------
......@@ -503,94 +501,6 @@ class MacroAssembler : public Assembler {
#endif
}
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
void HasColor(Register object, Register scratch0, Register scratch1,
Label* has_color, int first_bit, int second_bit);
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
lr_status, save_fp, remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
void push(Register src) {
lay(sp, MemOperand(sp, -kPointerSize));
......@@ -609,7 +519,6 @@ class MacroAssembler : public Assembler {
// Push a handle.
void Push(Handle<HeapObject> handle);
void Push(Smi* smi);
void PushObject(Handle<Object> handle);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
......@@ -707,16 +616,11 @@ class MacroAssembler : public Assembler {
// overwritten by tail call stack preparation.
void RestoreFrameStateForTailCall();
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
// from C.
......@@ -808,47 +712,8 @@ class MacroAssembler : public Assembler {
int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
void EnterExitFrame(bool save_doubles, int stack_space = 1,
StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
}
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// ----------------------------------------------------------------
// new S390 macro-assembler interfaces that are slightly higher level
// than assembler-s390 and may generate variable length sequences
......@@ -875,11 +740,6 @@ class MacroAssembler : public Assembler {
Register scratch = r0);
void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
void AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch = r0);
void SubSmiLiteral(Register dst, Register src, Smi* smi,
......@@ -912,138 +772,260 @@ class MacroAssembler : public Assembler {
#endif
}
// ---------------------------------------------------------------------------
// JavaScript invokes
// Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
// void SetCallKind(Register dst, CallKind kind);
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
// ---------------------------------------------------------------------------
// Runtime calls
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// Call a code stub.
void CallStubDelayed(CodeStub* stub);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized. If double arguments are used, this function assumes that
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void MovToFloatParameter(DoubleRegister src);
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
// Frame restart support
void MaybeDropFrames();
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function, int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
// Exception handling
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
// Push a new stack handler and link into stack handler chain.
void PushStackHandler();
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncateDoubleToIDelayed(Zone* zone, Register result,
DoubleRegister double_input);
void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input,
Label* done);
// Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
void PopStackHandler();
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
void Abort(BailoutReason reason);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Inline caching support
// Bit testing/extraction
//
// Bit numbering is such that the least significant bit is bit 0
// (for consistency between 32/64-bit).
void GetNumberHash(Register t0, Register scratch);
// Extract consecutive bits (defined by rangeStart - rangeEnd) from src
// and place them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Try to use RISBG if possible.
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
int endBit = 63; // End is always LSB after shifting.
int startBit = 63 - rangeStart + rangeEnd;
risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
else if (!dst.is(src)) // If we didn't shift, we might need to copy
LoadRR(dst, src);
int width = rangeStart - rangeEnd + 1;
#if V8_TARGET_ARCH_S390X
uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
nihf(dst, Operand(mask >> 32));
nilf(dst, Operand(mask & 0xFFFFFFFF));
ltgr(dst, dst);
#else
uint32_t mask = (1 << width) - 1;
AndP(dst, Operand(mask));
#endif
}
}
// Check if the given instruction is a 'type' marker.
// i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
ExtractBitRange(dst, src, bitNumber, bitNumber);
}
static inline int GetCodeMarker(Instr instr) {
int dst_reg_offset = 12;
int dst_mask = 0xf << dst_reg_offset;
int src_mask = 0xf;
int dst_reg = (instr & dst_mask) >> dst_reg_offset;
int src_reg = instr & src_mask;
uint32_t non_register_mask = ~(dst_mask | src_mask);
uint32_t mov_mask = al | 13 << 21;
// Extract consecutive bits (defined by mask) from src and place them
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC) {
int start = kBitsPerPointer - 1;
int end;
uintptr_t bit = (1L << start);
// Return <n> if we have a mov rn rn, else return -1.
int type = ((instr & non_register_mask) == mov_mask) &&
(dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
(dst_reg < LAST_CODE_MARKER)
? src_reg
: -1;
DCHECK((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
while (bit && (mask & bit) == 0) {
start--;
bit >>= 1;
}
end = start;
bit >>= 1;
while (bit && (mask & bit)) {
end--;
bit >>= 1;
}
// 1-bits in mask must be contiguous
DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
ExtractBitRange(dst, src, start, end);
}
// ---------------------------------------------------------------------------
// Allocation support
// Test single bit in value.
inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
ExtractBitRange(scratch, value, bitNumber, bitNumber);
}
// Allocate an object in new space or old pointer space. The object_size is
// specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
// All registers are clobbered also when control continues at the gc_required
// label.
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
// Test consecutive bit range in value. Range is defined by
// rangeStart - rangeEnd.
inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
Register scratch = r0) {
ExtractBitRange(scratch, value, rangeStart, rangeEnd);
}
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// Test consecutive bit range in value. Range is defined by mask.
inline void TestBitMask(Register value, uintptr_t mask,
Register scratch = r0) {
ExtractBitMask(scratch, value, mask, SetRC);
}
inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Register heap_number_map, Label* gc_required,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
Register scratch1, Register scratch2,
Register heap_number_map,
Label* gc_required);
inline void TestIfSmi(MemOperand value) {
if (is_uint12(value.offset())) {
tm(value, Operand(1));
} else if (is_int20(value.offset())) {
tmy(value, Operand(1));
} else {
LoadB(r0, value);
tmll(r0, Operand(1));
}
}
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch1, Register scratch2,
Label* gc_required);
inline void TestIfInt32(Register value) {
// High bits must be identical to fit into an 32-bit integer
cgfr(value, value);
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.
// |count| is assumed to be non-zero.
void InitializeNFieldsWithFiller(Register current_address, Register count,
Register filler);
void SmiUntag(Register dst, Register src) {
ShiftRightArithP(dst, src, Operand(kSmiShift));
}
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
bool has_frame_ = false;
Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp.
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 0) {
DCHECK(Descriptor::kPassLastArgsOnStack);
UNIMPLEMENTED();
}
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
void CallStub(CodeStub* stub, Condition cond = al);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext, int num_arguments);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
// ---------------------------------------------------------------------------
// Support functions.
......@@ -1104,6 +1086,10 @@ class MacroAssembler : public Assembler {
Push(r0);
}
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
CompareRoot(with, index);
......@@ -1149,228 +1135,168 @@ class MacroAssembler : public Assembler {
Register scratch, DoubleRegister double_scratch);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub,
Condition cond = al);
void CallStubDelayed(CodeStub* stub);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// StatsCounter support
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
void SetCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// ---------------------------------------------------------------------------
// JavaScript invokes
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext, int num_arguments);
// Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
// void SetCallKind(Register dst, CallKind kind);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized. If double arguments are used, this function assumes that
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void MovToFloatParameter(DoubleRegister src);
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function, int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
// Frame restart support
void MaybeDropFrames();
Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Exception handling
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
// Push a new stack handler and link into stack handler chain.
void PushStackHandler();
// ---------------------------------------------------------------------------
// StatsCounter support
// Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
void PopStackHandler();
void SetCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
void EnterExitFrame(bool save_doubles, int stack_space = 1,
StackFrame::Type frame_type = StackFrame::EXIT);
// ---------------------------------------------------------------------------
// Debugging
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length = false);
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
void LoadContext(Register dst, int context_chain_length);
// Print a message to stdout and abort execution.
void Abort(BailoutReason reason);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
}
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
// ---------------------------------------------------------------------------
// Number utilities
void LoadNativeContextSlot(int index, Register dst);
// Check whether the value of reg is a power of two and not zero. If not
// control continues at the label not_power_of_two. If reg is a power of two
// the register scratch contains the value of (reg - 1) when control falls
// through.
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
Label* not_power_of_two_or_zero);
// Check whether the value of reg is a power of two and not zero.
// Control falls through if it is, with scratch containing the mask
// value (reg - 1).
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
// strictly positive but not a power of two.
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
Label* zero_and_neg,
Label* not_power_of_two);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
// Bit numbering is such that the least significant bit is bit 0
// (for consistency between 32/64-bit).
// Extract consecutive bits (defined by rangeStart - rangeEnd) from src
// and place them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
// Try to use RISBG if possible.
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
int endBit = 63; // End is always LSB after shifting.
int startBit = 63 - rangeStart + rangeEnd;
risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
else if (!dst.is(src)) // If we didn't shift, we might need to copy
LoadRR(dst, src);
int width = rangeStart - rangeEnd + 1;
#if V8_TARGET_ARCH_S390X
uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
nihf(dst, Operand(mask >> 32));
nilf(dst, Operand(mask & 0xFFFFFFFF));
ltgr(dst, dst);
#else
uint32_t mask = (1 << width) - 1;
AndP(dst, Operand(mask));
#endif
}
}
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
ExtractBitRange(dst, src, bitNumber, bitNumber);
}
// Inline caching support
// Extract consecutive bits (defined by mask) from src and place them
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC) {
int start = kBitsPerPointer - 1;
int end;
uintptr_t bit = (1L << start);
void GetNumberHash(Register t0, Register scratch);
while (bit && (mask & bit) == 0) {
start--;
bit >>= 1;
}
end = start;
bit >>= 1;
inline void MarkCode(NopMarkerTypes type) { nop(type); }
while (bit && (mask & bit)) {
end--;
bit >>= 1;
}
// Check if the given instruction is a 'type' marker.
// i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
}
// 1-bits in mask must be contiguous
DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
static inline int GetCodeMarker(Instr instr) {
int dst_reg_offset = 12;
int dst_mask = 0xf << dst_reg_offset;
int src_mask = 0xf;
int dst_reg = (instr & dst_mask) >> dst_reg_offset;
int src_reg = instr & src_mask;
uint32_t non_register_mask = ~(dst_mask | src_mask);
uint32_t mov_mask = al | 13 << 21;
ExtractBitRange(dst, src, start, end);
// Return <n> if we have a mov rn rn, else return -1.
int type = ((instr & non_register_mask) == mov_mask) &&
(dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
(dst_reg < LAST_CODE_MARKER)
? src_reg
: -1;
DCHECK((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
}
// ---------------------------------------------------------------------------
// Allocation support
// Test single bit in value.
inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
ExtractBitRange(scratch, value, bitNumber, bitNumber);
}
// Allocate an object in new space or old pointer space. The object_size is
// specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
// All registers are clobbered also when control continues at the gc_required
// label.
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
// Test consecutive bit range in value. Range is defined by
// rangeStart - rangeEnd.
inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
Register scratch = r0) {
ExtractBitRange(scratch, value, rangeStart, rangeEnd);
}
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// Test consecutive bit range in value. Range is defined by mask.
inline void TestBitMask(Register value, uintptr_t mask,
Register scratch = r0) {
ExtractBitMask(scratch, value, mask, SetRC);
}
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Register heap_number_map, Label* gc_required,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
Register scratch1, Register scratch2,
Register heap_number_map,
Label* gc_required);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch1, Register scratch2,
Label* gc_required);
// ---------------------------------------------------------------------------
// Smi utilities
......@@ -1408,12 +1334,6 @@ class MacroAssembler : public Assembler {
bne(not_smi_label /*, cr0*/);
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
void SmiUntag(Register dst, Register src) {
ShiftRightArithP(dst, src, Operand(kSmiShift));
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
......@@ -1501,19 +1421,6 @@ class MacroAssembler : public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
inline void TestIfSmi(MemOperand value) {
if (is_uint12(value.offset())) {
tm(value, Operand(1));
} else if (is_int20(value.offset())) {
tmy(value, Operand(1));
} else {
LoadB(r0, value);
tmll(r0, Operand(1));
}
}
inline void TestIfPositiveSmi(Register value, Register scratch) {
STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
(intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
......@@ -1521,11 +1428,6 @@ class MacroAssembler : public Assembler {
AndP(scratch, value);
}
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value);
beq(smi_label /*, cr0*/); // branch if SMI
}
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
......@@ -1540,11 +1442,6 @@ class MacroAssembler : public Assembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
inline void TestIfInt32(Register value) {
// High bits must be identical to fit into an 32-bit integer
cgfr(value, value);
}
#if V8_TARGET_ARCH_S390X
// Ensure it is permissable to read/write int value directly from
// upper half of the smi.
......@@ -1655,20 +1552,26 @@ class MacroAssembler : public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// Expects object in r2 and returns map with validated enum cache
// in r2. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.
// |count| is assumed to be non-zero.
void InitializeNFieldsWithFiller(Register current_address, Register count,
Register filler);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// ---------------------------------------------------------------------------
// GC Support
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
// to another type.
......@@ -1680,19 +1583,137 @@ class MacroAssembler : public Assembler {
Register scratch2_reg,
Label* no_memento_found);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CallJSEntry(Register target);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void PushObject(Handle<Object> handle);
void JumpToJSEntry(Register target);
// ---------------------------------------------------------------------------
// Number utilities
// Check whether the value of reg is a power of two and not zero. If not
// control continues at the label not_power_of_two. If reg is a power of two
// the register scratch contains the value of (reg - 1) when control falls
// through.
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
Label* not_power_of_two_or_zero);
// Check whether the value of reg is a power of two and not zero.
// Control falls through if it is, with scratch containing the mask
// value (reg - 1).
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
// strictly positive but not a power of two.
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
Label* zero_and_neg,
Label* not_power_of_two);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
void HasColor(Register object, Register scratch0, Register scratch1,
Label* has_color, int first_bit, int second_bit);
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
lr_status, save_fp, remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
......@@ -1713,11 +1734,6 @@ class MacroAssembler : public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool has_frame_;
Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment