Commit 5a9ecc12 authored by jyan's avatar jyan Committed by Commit bot

S390: Initial impl of turbofan compiler

R=danno@chromium.org,jkummerow@chromium.org,jochen@chromium.org,joransiu@ca.ibm.com,michael_dawson@ca.ibm.com,mbrandy@us.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1762743002

Cr-Commit-Position: refs/heads/master@{#34501}
parent 78e23b2b
...@@ -1640,6 +1640,10 @@ source_set("v8_base") { ...@@ -1640,6 +1640,10 @@ source_set("v8_base") {
] ]
} else if (v8_target_arch == "s390" || v8_target_arch == "s390x") { } else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
sources += [ sources += [
"src/compiler/s390/code-generator-s390.cc",
"src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc", "src/debug/s390/debug-s390.cc",
"src/ic/s390/access-compiler-s390.cc", "src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc", "src/ic/s390/handler-compiler-s390.cc",
......
...@@ -123,6 +123,26 @@ LinkageLocation regloc(Register reg) { ...@@ -123,6 +123,26 @@ LinkageLocation regloc(Register reg) {
d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \ d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit() d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
#elif V8_TARGET_ARCH_S390X
// ===========================================================================
// == s390x ==================================================================
// ===========================================================================
#define PARAM_REGISTERS r2, r3, r4, r5, r6
#define CALLEE_SAVE_REGISTERS \
r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
#define CALLEE_SAVE_FP_REGISTERS \
d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
d14.bit() | d15.bit()
#elif V8_TARGET_ARCH_S390
// ===========================================================================
// == s390 ===================================================================
// ===========================================================================
#define PARAM_REGISTERS r2, r3, r4, r5, r6
#define CALLEE_SAVE_REGISTERS \
r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
#define CALLEE_SAVE_FP_REGISTERS (d4.bit() | d6.bit())
#else #else
// =========================================================================== // ===========================================================================
// == unknown ================================================================ // == unknown ================================================================
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include "src/compiler/x64/instruction-codes-x64.h" #include "src/compiler/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC #elif V8_TARGET_ARCH_PPC
#include "src/compiler/ppc/instruction-codes-ppc.h" #include "src/compiler/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/s390/instruction-codes-s390.h"
#elif V8_TARGET_ARCH_X87 #elif V8_TARGET_ARCH_X87
#include "src/compiler/x87/instruction-codes-x87.h" #include "src/compiler/x87/instruction-codes-x87.h"
#else #else
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/code-generator.h"
#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/s390/macro-assembler-s390.h"
namespace v8 {
namespace internal {
namespace compiler {
#define __ masm()->
#define kScratchReg ip
// Adds S390-specific methods to convert InstructionOperands.
class S390OperandConverter final : public InstructionOperandConverter {
public:
S390OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
size_t OutputCount() { return instr_->OutputCount(); }
bool CompareLogical() const {
switch (instr_->flags_condition()) {
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan:
return true;
default:
return false;
}
UNREACHABLE();
return false;
}
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
return Operand(constant.ToInt64());
#endif
case Constant::kExternalReference:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
return Operand::Zero();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
*mode = AddressingModeField::decode(instr_->opcode());
switch (*mode) {
case kMode_None:
break;
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
case kMode_MRR:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
return MemOperand(r0);
}
MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
FrameOffset offset = frame_access_state()->GetFrameOffset(
AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
static inline bool HasRegisterInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsRegister();
}
namespace {
class OutOfLineLoadNAN32 final : public OutOfLineCode {
public:
OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
kScratchReg);
}
private:
DoubleRegister const result_;
};
class OutOfLineLoadNAN64 final : public OutOfLineCode {
public:
OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
kScratchReg);
}
private:
DoubleRegister const result_;
};
class OutOfLineLoadZero final : public OutOfLineCode {
public:
OutOfLineLoadZero(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
private:
Register const result_;
};
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
Register value, Register scratch0, Register scratch1,
RecordWriteMode mode)
: OutOfLineCode(gen),
object_(object),
offset_(offset),
offset_immediate_(0),
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
RecordWriteMode mode)
: OutOfLineCode(gen),
object_(object),
offset_(no_reg),
offset_immediate_(offset),
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (!frame()->needs_frame()) {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ AddP(scratch1_, object_, Operand(offset_immediate_));
} else {
DCHECK_EQ(0, offset_immediate_);
__ AddP(scratch1_, object_, offset_);
}
__ CallStub(&stub);
if (!frame()->needs_frame()) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
}
}
private:
Register const object_;
Register const offset_;
int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
Register const value_;
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
};
Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
switch (condition) {
case kEqual:
return eq;
case kNotEqual:
return ne;
case kSignedLessThan:
case kUnsignedLessThan:
return lt;
case kSignedGreaterThanOrEqual:
case kUnsignedGreaterThanOrEqual:
return ge;
case kSignedLessThanOrEqual:
case kUnsignedLessThanOrEqual:
return le;
case kSignedGreaterThan:
case kUnsignedGreaterThan:
return gt;
case kOverflow:
// Overflow checked for AddP/SubP only.
switch (op) {
#if V8_TARGET_ARCH_S390X
case kS390_Add:
case kS390_Sub:
return lt;
#endif
case kS390_AddWithOverflow32:
case kS390_SubWithOverflow32:
#if V8_TARGET_ARCH_S390X
return ne;
#else
return lt;
#endif
default:
break;
}
break;
case kNotOverflow:
switch (op) {
#if V8_TARGET_ARCH_S390X
case kS390_Add:
case kS390_Sub:
return ge;
#endif
case kS390_AddWithOverflow32:
case kS390_SubWithOverflow32:
#if V8_TARGET_ARCH_S390X
return eq;
#else
return ge;
#endif
default:
break;
}
break;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
} // namespace
#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
do { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
} while (0)
#define ASSEMBLE_FLOAT_BINOP(asm_instr) \
do { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
} while (0)
#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
do { \
if (HasRegisterInput(instr, 1)) { \
__ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
i.InputRegister(1)); \
} else { \
__ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
i.InputImmediate(1)); \
} \
} while (0)
#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm) \
do { \
if (HasRegisterInput(instr, 1)) { \
__ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
i.InputRegister(1)); \
} else { \
__ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
i.InputInt32(1)); \
} \
} while (0)
#define ASSEMBLE_ADD_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
__ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
i.InputRegister(1), kScratchReg, r0); \
} else { \
__ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
i.InputInt32(1), kScratchReg, r0); \
} \
} while (0)
#define ASSEMBLE_SUB_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
__ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
i.InputRegister(1), kScratchReg, r0); \
} else { \
__ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
-i.InputInt32(1), kScratchReg, r0); \
} \
} while (0)
#if V8_TARGET_ARCH_S390X
#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
do { \
ASSEMBLE_BINOP(AddP, AddP); \
__ TestIfInt32(i.OutputRegister(), r0); \
} while (0)
#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
do { \
ASSEMBLE_BINOP(SubP, SubP); \
__ TestIfInt32(i.OutputRegister(), r0); \
} while (0)
#else
#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
#endif
#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
do { \
if (HasRegisterInput(instr, 1)) { \
if (i.CompareLogical()) { \
__ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
} else { \
__ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
} \
} else { \
if (i.CompareLogical()) { \
__ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
} else { \
__ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
} \
} \
} while (0)
#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
do { \
__ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
} while (0)
// Divide instruction dr will implicity use register pair
// r0 & r1 below.
// R0:R1 = R1 / divisor - R0 remainder
// Copy remainder to output reg
#define ASSEMBLE_MODULO(div_instr, shift_instr) \
do { \
__ LoadRR(r0, i.InputRegister(0)); \
__ shift_instr(r0, Operand(32)); \
__ div_instr(r0, i.InputRegister(1)); \
__ ltr(i.OutputRegister(), r0); \
} while (0)
#define ASSEMBLE_FLOAT_MODULO() \
do { \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
0, 2); \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
do { \
Label ge, done; \
__ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
__ bge(&ge, Label::kNear); \
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
__ b(&done, Label::kNear); \
__ bind(&ge); \
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ bind(&done); \
} while (0)
#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
do { \
Label ge, done; \
__ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
__ bge(&ge, Label::kNear); \
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ b(&done, Label::kNear); \
__ bind(&ge); \
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
__ bind(&done); \
} while (0)
// Only MRI mode for these instructions available
#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
__ asm_instr(result, operand); \
} while (0)
#define ASSEMBLE_LOAD_INTEGER(asm_instr) \
do { \
Register result = i.OutputRegister(); \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
__ asm_instr(result, operand); \
} while (0)
#define ASSEMBLE_STORE_FLOAT32() \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
__ StoreFloat32(value, operand); \
} while (0)
#define ASSEMBLE_STORE_DOUBLE() \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
__ StoreDouble(value, operand); \
} while (0)
#define ASSEMBLE_STORE_INTEGER(asm_instr) \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
__ asm_instr(value, operand); \
} while (0)
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
__ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
__ CmpLogical32(offset, i.InputImmediate(2)); \
} \
auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
__ bge(ool->entry()); \
__ asm_instr(result, operand); \
__ bind(ool->exit()); \
} while (0)
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
Register result = i.OutputRegister(); \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
__ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
__ CmpLogical32(offset, i.InputImmediate(2)); \
} \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
__ bge(ool->entry()); \
__ asm_instr(result, operand); \
__ bind(ool->exit()); \
} while (0)
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
do { \
Label done; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
__ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
__ CmpLogical32(offset, i.InputImmediate(2)); \
} \
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
__ StoreFloat32(value, operand); \
__ bind(&done); \
} while (0)
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
do { \
Label done; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
__ CmpLogical32(offset, i.InputImmediate(2)); \
} \
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
__ StoreDouble(value, operand); \
__ bind(&done); \
} while (0)
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
Label done; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
__ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
__ CmpLogical32(offset, i.InputImmediate(2)); \
} \
__ bge(&done); \
Register value = i.InputRegister(3); \
__ asm_instr(value, operand); \
__ bind(&done); \
} while (0)
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
__ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
}
frame_access_state()->SetFrameAccessToDefault();
}
void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta < 0) {
__ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
if (frame()->needs_frame()) {
__ RestoreFrameStateForTailCall();
}
frame_access_state()->SetFrameAccessToSP();
}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
S390OperandConverter i(this, instr);
ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
switch (opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ AddP(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
} else {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
if (HasRegisterInput(instr, 0)) {
__ AddP(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
}
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpP(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(ip);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpP(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
frame_access_state()->ClearSPDelta();
break;
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
} else {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ LoadRR(i.OutputRegister(), sp);
break;
case kArchFramePointer:
__ LoadRR(i.OutputRegister(), fp);
break;
case kArchParentFramePointer:
if (frame_access_state()->frame()->needs_frame()) {
__ LoadP(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ LoadRR(i.OutputRegister(), fp);
}
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
OutOfLineRecordWrite* ool;
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_MRI) {
int32_t offset = i.InputInt32(1);
ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
scratch0, scratch1, mode);
__ StoreP(value, MemOperand(object, offset));
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
scratch0, scratch1, mode);
__ StoreP(value, MemOperand(object, offset));
}
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
__ bind(ool->exit());
break;
}
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
__ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
Operand(offset.offset()));
break;
}
case kS390_And:
ASSEMBLE_BINOP(AndP, AndP);
break;
case kS390_AndComplement:
__ NotP(i.InputRegister(1));
__ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kS390_Or:
ASSEMBLE_BINOP(OrP, OrP);
break;
case kS390_OrComplement:
__ NotP(i.InputRegister(1));
__ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kS390_Xor:
ASSEMBLE_BINOP(XorP, XorP);
break;
case kS390_ShiftLeft32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1))) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
}
} else {
ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
}
#if V8_TARGET_ARCH_S390X
__ lgfr(i.OutputRegister(0), i.OutputRegister(0));
#endif
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64:
ASSEMBLE_BINOP(sllg, sllg);
break;
#endif
case kS390_ShiftRight32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1))) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
ASSEMBLE_BINOP(ShiftRight, ShiftRight);
}
} else {
ASSEMBLE_BINOP(ShiftRight, ShiftRight);
}
#if V8_TARGET_ARCH_S390X
__ lgfr(i.OutputRegister(0), i.OutputRegister(0));
#endif
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64:
ASSEMBLE_BINOP(srlg, srlg);
break;
#endif
case kS390_ShiftRightAlg32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1))) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
kScratchReg);
} else {
ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
}
} else {
ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
}
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightAlg64:
ASSEMBLE_BINOP(srag, srag);
break;
#endif
case kS390_RotRight32:
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
__ rll(i.OutputRegister(), i.InputRegister(0),
Operand(32 - i.InputInt32(1)));
}
break;
#if V8_TARGET_ARCH_S390X
case kS390_RotRight64:
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg,
Operand(32));
__ lgfr(i.OutputRegister(), i.OutputRegister());
} else {
UNIMPLEMENTED(); // Not implemented for now
}
break;
#endif
case kS390_Not:
__ LoadRR(i.OutputRegister(), i.InputRegister(0));
__ NotP(i.OutputRegister());
break;
case kS390_RotLeftAndMask32:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
int endBit = 63 - i.InputInt32(3);
int startBit = 63 - i.InputInt32(2);
__ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
__ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
Operand(endBit), Operand::Zero(), true);
} else {
UNIMPLEMENTED();
}
break;
#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
UNIMPLEMENTED(); // Find correct instruction
break;
case kS390_RotLeftAndClearLeft64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
int endBit = 63;
int startBit = 63 - i.InputInt32(2);
__ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
Operand(endBit), Operand(shiftAmount), true);
} else {
UNIMPLEMENTED();
}
break;
case kS390_RotLeftAndClearRight64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
int endBit = 63 - i.InputInt32(2);
int startBit = 0;
__ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
Operand(endBit), Operand(shiftAmount), true);
} else {
UNIMPLEMENTED();
}
break;
#endif
case kS390_Add:
#if V8_TARGET_ARCH_S390X
if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
ASSEMBLE_ADD_WITH_OVERFLOW();
} else {
#endif
ASSEMBLE_BINOP(AddP, AddP);
#if V8_TARGET_ARCH_S390X
}
#endif
break;
case kS390_AddWithOverflow32:
ASSEMBLE_ADD_WITH_OVERFLOW32();
break;
case kS390_AddFloat:
// Ensure we don't clobber right/InputReg(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
ASSEMBLE_FLOAT_UNOP(aebr);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_AddDouble:
// Ensure we don't clobber right/InputReg(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
ASSEMBLE_FLOAT_UNOP(adbr);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_Sub:
#if V8_TARGET_ARCH_S390X
if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
ASSEMBLE_SUB_WITH_OVERFLOW();
} else {
#endif
ASSEMBLE_BINOP(SubP, SubP);
#if V8_TARGET_ARCH_S390X
}
#endif
break;
case kS390_SubWithOverflow32:
ASSEMBLE_SUB_WITH_OVERFLOW32();
break;
case kS390_SubFloat:
// OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
__ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
}
__ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_SubDouble:
// OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
__ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
}
__ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_Mul32:
#if V8_TARGET_ARCH_S390X
case kS390_Mul64:
#endif
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kS390_MulHigh32:
__ LoadRR(r1, i.InputRegister(0));
__ mr_z(r0, i.InputRegister(1));
__ LoadRR(i.OutputRegister(), r0);
break;
case kS390_MulHighU32:
__ LoadRR(r1, i.InputRegister(0));
__ mlr(r0, i.InputRegister(1));
__ LoadRR(i.OutputRegister(), r0);
break;
case kS390_MulFloat:
// Ensure we don't clobber right
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
ASSEMBLE_FLOAT_UNOP(meebr);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_MulDouble:
// Ensure we don't clobber right
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
ASSEMBLE_FLOAT_UNOP(mdbr);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
#if V8_TARGET_ARCH_S390X
case kS390_Div64:
#endif
case kS390_Div32:
__ LoadRR(r0, i.InputRegister(0));
__ srda(r0, Operand(32));
__ dr(r0, i.InputRegister(1));
__ ltr(i.OutputRegister(), r1);
break;
#if V8_TARGET_ARCH_S390X
case kS390_DivU64:
__ LoadRR(r1, i.InputRegister(0));
__ LoadImmP(r0, Operand::Zero());
__ dlgr(r0, i.InputRegister(1)); // R0:R1 = R1 / divisor -
__ ltgr(i.OutputRegister(), r1); // Copy remainder to output reg
break;
#endif
case kS390_DivU32:
__ LoadRR(r0, i.InputRegister(0));
__ srdl(r0, Operand(32));
__ dlr(r0, i.InputRegister(1)); // R0:R1 = R1 / divisor -
__ ltr(i.OutputRegister(), r1); // Copy remainder to output reg
break;
case kS390_DivFloat:
// InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
__ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_DivDouble:
// InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
__ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
} else {
if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
__ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
case kS390_Mod32:
ASSEMBLE_MODULO(dr, srda);
break;
case kS390_ModU32:
ASSEMBLE_MODULO(dlr, srdl);
break;
#if V8_TARGET_ARCH_S390X
case kS390_Mod64:
ASSEMBLE_MODULO(dr, srda);
break;
case kS390_ModU64:
ASSEMBLE_MODULO(dlr, srdl);
break;
#endif
case kS390_AbsFloat:
__ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_SqrtFloat:
ASSEMBLE_FLOAT_UNOP(sqebr);
break;
case kS390_FloorFloat:
// ASSEMBLE_FLOAT_UNOP_RC(frim);
__ FloatFloor32(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
kScratchReg);
break;
case kS390_CeilFloat:
__ FloatCeiling32(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
kScratchReg, kScratchDoubleReg);
break;
case kS390_TruncateFloat:
__ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
break;
// Double operations
case kS390_ModDouble:
ASSEMBLE_FLOAT_MODULO();
break;
case kS390_Neg:
__ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_MaxDouble:
ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
break;
case kS390_MinDouble:
ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
break;
case kS390_AbsDouble:
__ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_SqrtDouble:
ASSEMBLE_FLOAT_UNOP(sqdbr);
break;
case kS390_FloorDouble:
__ FloatFloor64(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
kScratchReg);
break;
case kS390_CeilDouble:
__ FloatCeiling64(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
kScratchReg, kScratchDoubleReg);
break;
case kS390_TruncateDouble:
__ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
break;
case kS390_RoundDouble:
__ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
break;
case kS390_NegDouble:
ASSEMBLE_FLOAT_UNOP(lcdbr);
break;
case kS390_Cntlz32: {
__ llgfr(i.OutputRegister(), i.InputRegister(0));
__ flogr(r0, i.OutputRegister());
__ LoadRR(i.OutputRegister(), r0);
__ SubP(i.OutputRegister(), Operand(32));
} break;
#if V8_TARGET_ARCH_S390X
case kS390_Cntlz64: {
__ flogr(r0, i.InputRegister(0));
__ LoadRR(i.OutputRegister(), r0);
} break;
#endif
case kS390_Popcnt32:
__ Popcnt32(i.OutputRegister(), i.InputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_Popcnt64:
__ Popcnt64(i.OutputRegister(), i.InputRegister(0));
break;
#endif
case kS390_Cmp32:
ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
break;
#if V8_TARGET_ARCH_S390X
case kS390_Cmp64:
ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
break;
#endif
case kS390_CmpFloat:
__ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
break;
case kS390_CmpDouble:
__ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
break;
case kS390_Tst32:
if (HasRegisterInput(instr, 1)) {
__ AndP(r0, i.InputRegister(0), i.InputRegister(1));
} else {
__ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
}
#if V8_TARGET_ARCH_S390X
// TODO(john.yan): use ltgfr here.
__ lgfr(r0, r0);
__ LoadAndTestP(r0, r0);
#endif
break;
#if V8_TARGET_ARCH_S390X
case kS390_Tst64:
if (HasRegisterInput(instr, 1)) {
__ AndP(r0, i.InputRegister(0), i.InputRegister(1));
} else {
__ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
}
break;
#endif
case kS390_Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ lay(sp, MemOperand(sp, -kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
}
break;
case kS390_PushFrame: {
int num_slots = i.InputInt32(1);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, -num_slots * kPointerSize));
} else {
__ StoreP(i.InputRegister(0),
MemOperand(sp, -num_slots * kPointerSize));
}
__ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
break;
}
case kS390_StoreToStackSlot: {
int slot = i.InputInt32(1);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, slot * kPointerSize));
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
}
break;
}
case kS390_ExtendSignWord8:
#if V8_TARGET_ARCH_S390X
__ lgbr(i.OutputRegister(), i.InputRegister(0));
#else
__ lbr(i.OutputRegister(), i.InputRegister(0));
#endif
break;
case kS390_ExtendSignWord16:
#if V8_TARGET_ARCH_S390X
__ lghr(i.OutputRegister(), i.InputRegister(0));
#else
__ lhr(i.OutputRegister(), i.InputRegister(0));
#endif
break;
#if V8_TARGET_ARCH_S390X
case kS390_ExtendSignWord32:
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_Uint32ToUint64:
// Zero extend
__ llgfr(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_Int64ToInt32:
// sign extend
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_Int64ToFloat32:
__ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
break;
case kS390_Int64ToDouble:
__ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
break;
case kS390_Uint64ToFloat32:
__ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
i.OutputDoubleRegister());
break;
case kS390_Uint64ToDouble:
__ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
i.OutputDoubleRegister());
break;
#endif
case kS390_Int32ToFloat32:
__ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
break;
case kS390_Int32ToDouble:
__ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
break;
case kS390_Uint32ToFloat32:
__ ConvertUnsignedIntToFloat(i.InputRegister(0),
i.OutputDoubleRegister());
break;
case kS390_Uint32ToDouble:
__ ConvertUnsignedIntToDouble(i.InputRegister(0),
i.OutputDoubleRegister());
break;
case kS390_DoubleToInt32:
case kS390_DoubleToUint32:
case kS390_DoubleToInt64: {
#if V8_TARGET_ARCH_S390X
bool check_conversion =
(opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
#endif
__ ConvertDoubleToInt64(i.InputDoubleRegister(0),
#if !V8_TARGET_ARCH_S390X
kScratchReg,
#endif
i.OutputRegister(0), kScratchDoubleReg);
#if V8_TARGET_ARCH_S390X
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
__ b(Condition(1), &conversion_done); // special case
__ LoadImmP(i.OutputRegister(1), Operand(1));
__ bind(&conversion_done);
}
#endif
break;
}
case kS390_Float32ToInt32: {
bool check_conversion = (i.OutputCount() > 1);
__ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
kScratchDoubleReg);
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
__ b(Condition(1), &conversion_done); // special case
__ LoadImmP(i.OutputRegister(1), Operand(1));
__ bind(&conversion_done);
}
break;
}
case kS390_Float32ToUint32: {
bool check_conversion = (i.OutputCount() > 1);
__ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
i.OutputRegister(0), kScratchDoubleReg);
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
__ b(Condition(1), &conversion_done); // special case
__ LoadImmP(i.OutputRegister(1), Operand(1));
__ bind(&conversion_done);
}
break;
}
#if V8_TARGET_ARCH_S390X
case kS390_Float32ToUint64: {
bool check_conversion = (i.OutputCount() > 1);
__ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
i.OutputRegister(0), kScratchDoubleReg);
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
__ b(Condition(1), &conversion_done); // special case
__ LoadImmP(i.OutputRegister(1), Operand(1));
__ bind(&conversion_done);
}
break;
}
#endif
case kS390_Float32ToInt64: {
#if V8_TARGET_ARCH_S390X
bool check_conversion =
(opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
#endif
__ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
#if !V8_TARGET_ARCH_S390X
kScratchReg,
#endif
i.OutputRegister(0), kScratchDoubleReg);
#if V8_TARGET_ARCH_S390X
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
__ b(Condition(1), &conversion_done); // special case
__ LoadImmP(i.OutputRegister(1), Operand(1));
__ bind(&conversion_done);
}
#endif
break;
}
#if V8_TARGET_ARCH_S390X
case kS390_DoubleToUint64: {
bool check_conversion = (i.OutputCount() > 1);
__ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
i.OutputRegister(0), kScratchDoubleReg);
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
__ b(Condition(1), &conversion_done); // special case
__ LoadImmP(i.OutputRegister(1), Operand(1));
__ bind(&conversion_done);
}
break;
}
#endif
case kS390_DoubleToFloat32:
__ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_Float32ToDouble:
__ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_DoubleExtractLowWord32:
// TODO(john.yan): this can cause problem when interrupting,
// use freg->greg instruction
__ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ LoadlW(i.OutputRegister(),
MemOperand(sp, -kDoubleSize + Register::kMantissaOffset));
break;
case kS390_DoubleExtractHighWord32:
// TODO(john.yan): this can cause problem when interrupting,
// use freg->greg instruction
__ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ LoadlW(i.OutputRegister(),
MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
break;
case kS390_DoubleInsertLowWord32:
__ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1));
break;
case kS390_DoubleInsertHighWord32:
__ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1));
break;
case kS390_DoubleConstruct:
// TODO(john.yan): this can cause problem when interrupting,
// use greg->freg instruction
#if V8_TARGET_LITTLE_ENDIAN
__ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize / 2));
__ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize));
#else
__ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize / 2));
__ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize));
#endif
__ ldy(i.OutputDoubleRegister(), MemOperand(sp, -kDoubleSize));
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadlB);
#if V8_TARGET_ARCH_S390X
__ lgbr(i.OutputRegister(), i.OutputRegister());
#else
__ lbr(i.OutputRegister(), i.OutputRegister());
#endif
break;
case kS390_BitcastFloat32ToInt32:
__ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kS390_BitcastInt32ToFloat32:
__ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_BitcastDoubleToInt64:
__ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kS390_BitcastInt64ToDouble:
__ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
break;
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadlB);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadW);
break;
#if V8_TARGET_ARCH_S390X
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
break;
#endif
case kS390_LoadFloat32:
ASSEMBLE_LOAD_FLOAT(LoadFloat32);
break;
case kS390_LoadDouble:
ASSEMBLE_LOAD_FLOAT(LoadDouble);
break;
case kS390_StoreWord8:
ASSEMBLE_STORE_INTEGER(StoreByte);
break;
case kS390_StoreWord16:
ASSEMBLE_STORE_INTEGER(StoreHalfWord);
break;
case kS390_StoreWord32:
ASSEMBLE_STORE_INTEGER(StoreW);
break;
#if V8_TARGET_ARCH_S390X
case kS390_StoreWord64:
ASSEMBLE_STORE_INTEGER(StoreP);
break;
#endif
case kS390_StoreFloat32:
ASSEMBLE_STORE_FLOAT32();
break;
case kS390_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
#if V8_TARGET_ARCH_S390X
__ lgbr(i.OutputRegister(), i.OutputRegister());
#else
__ lbr(i.OutputRegister(), i.OutputRegister());
#endif
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
break;
case kCheckedLoadWord64:
#if V8_TARGET_ARCH_S390X
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
#else
UNREACHABLE();
#endif
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
break;
case kCheckedStoreWord64:
#if V8_TARGET_ARCH_S390X
ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
#else
UNREACHABLE();
#endif
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT32();
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_DOUBLE();
break;
default:
UNREACHABLE();
break;
}
} // NOLINT(readability/fn_size)
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
S390OperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
ArchOpcode op = instr->arch_opcode();
FlagsCondition condition = branch->condition;
Condition cond = FlagsConditionToCondition(condition, op);
if (op == kS390_CmpDouble) {
// check for unordered if necessary
// Branching to flabel/tlabel according to what's expected by tests
if (cond == le || cond == eq || cond == lt) {
__ bunordered(flabel);
} else if (cond == gt || cond == ne || cond == ge) {
__ bunordered(tlabel);
}
}
__ b(cond, tlabel);
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
S390OperandConverter i(this, instr);
Label done;
ArchOpcode op = instr->arch_opcode();
bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
// Overflow checked for add/sub only.
DCHECK((condition != kOverflow && condition != kNotOverflow) ||
(op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cond = FlagsConditionToCondition(condition, op);
switch (cond) {
case ne:
case ge:
case gt:
if (check_unordered) {
__ LoadImmP(reg, Operand(1));
__ LoadImmP(kScratchReg, Operand::Zero());
__ bunordered(&done);
Label cond_true;
__ b(cond, &cond_true, Label::kNear);
__ LoadRR(reg, kScratchReg);
__ bind(&cond_true);
} else {
Label cond_true, done_here;
__ LoadImmP(reg, Operand(1));
__ b(cond, &cond_true, Label::kNear);
__ LoadImmP(reg, Operand::Zero());
__ bind(&cond_true);
}
break;
case eq:
case lt:
case le:
if (check_unordered) {
__ LoadImmP(reg, Operand::Zero());
__ LoadImmP(kScratchReg, Operand(1));
__ bunordered(&done);
Label cond_false;
__ b(NegateCondition(cond), &cond_false, Label::kNear);
__ LoadRR(reg, kScratchReg);
__ bind(&cond_false);
} else {
__ LoadImmP(reg, Operand::Zero());
Label cond_false;
__ b(NegateCondition(cond), &cond_false, Label::kNear);
__ LoadImmP(reg, Operand(1));
__ bind(&cond_false);
}
break;
default:
UNREACHABLE();
break;
}
__ bind(&done);
}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
S390OperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ CmpP(input, Operand(i.InputInt32(index + 0)));
__ beq(GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
}
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
S390OperandConverter i(this, instr);
Register input = i.InputRegister(0);
int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
Label** cases = zone()->NewArray<Label*>(case_count);
for (int32_t index = 0; index < case_count; ++index) {
cases[index] = GetLabel(i.InputRpo(index + 2));
}
Label* const table = AddJumpTable(cases, case_count);
__ CmpLogicalP(input, Operand(case_count));
__ bge(GetLabel(i.InputRpo(1)));
__ larl(kScratchReg, table);
__ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
__ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
__ Jump(kScratchReg);
}
void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall()) {
__ Push(r14, fp);
__ LoadRR(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
} else if (frame()->needs_frame()) {
if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
// TODO(mbrandy): Restrict only to the wasm wrapper case.
__ StubPrologue();
} else {
__ StubPrologue(ip);
}
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
if (double_saves != 0) {
stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
}
if (stack_shrink_slots > 0) {
__ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
}
// Save callee-saved Double registers.
if (double_saves != 0) {
__ MultiPushDoubles(double_saves);
DCHECK(kNumCalleeSavedDoubles ==
base::bits::CountPopulation32(double_saves));
frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
const int num_saves =
kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
DCHECK(num_saves == base::bits::CountPopulation32(saves));
frame()->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore double registers.
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
if (double_saves != 0) {
__ MultiPopDoubles(double_saves);
}
if (descriptor->IsCFunctionCall()) {
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
} else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
return;
} else {
__ bind(&return_label_);
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
}
} else {
__ Drop(pop_count);
}
__ Ret();
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
S390OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ Move(g.ToRegister(destination), src);
} else {
__ StoreP(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
__ LoadP(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
__ LoadP(temp, src, r0);
__ StoreP(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
__ mov(dst, Operand(src.ToInt32()));
break;
case Constant::kInt64:
__ mov(dst, Operand(src.ToInt64()));
break;
case Constant::kFloat32:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kFloat64:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
int offset;
if (IsMaterializableFromFrame(src_object, &offset)) {
__ LoadP(dst, MemOperand(fp, offset));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Move(dst, src_object);
}
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
break;
}
if (destination->IsStackSlot()) {
__ StoreP(dst, g.ToMemOperand(destination), r0);
}
} else {
DoubleRegister dst = destination->IsDoubleRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
: src.ToFloat64();
if (src.type() == Constant::kFloat32) {
__ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
} else {
__ LoadDoubleLiteral(dst, value, kScratchReg);
}
if (destination->IsDoubleStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsDoubleRegister()) {
DoubleRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ StoreDouble(src, g.ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ LoadDouble(g.ToDoubleRegister(destination), src);
} else {
DoubleRegister temp = kScratchDoubleReg;
__ LoadDouble(temp, src);
__ StoreDouble(temp, g.ToMemOperand(destination));
}
} else {
UNREACHABLE();
}
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
S390OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
// Register-register.
Register temp = kScratchReg;
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ LoadRR(temp, src);
__ LoadRR(src, dst);
__ LoadRR(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ LoadRR(temp, src);
__ LoadP(src, dst);
__ StoreP(temp, dst);
}
#if V8_TARGET_ARCH_S390X
} else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
#else
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
#endif
Register temp_0 = kScratchReg;
Register temp_1 = r0;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ LoadP(temp_0, src);
__ LoadP(temp_1, dst);
__ StoreP(temp_0, dst);
__ StoreP(temp_1, src);
} else if (source->IsDoubleRegister()) {
DoubleRegister temp = kScratchDoubleReg;
DoubleRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ ldr(temp, src);
__ ldr(src, dst);
__ ldr(dst, temp);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ ldr(temp, src);
__ LoadDouble(src, dst);
__ StoreDouble(temp, dst);
}
#if !V8_TARGET_ARCH_S390X
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleStackSlot());
DoubleRegister temp_0 = kScratchDoubleReg;
DoubleRegister temp_1 = d0;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
// TODO(joransiu): MVC opportunity
__ LoadDouble(temp_0, src);
__ LoadDouble(temp_1, dst);
__ StoreDouble(temp_0, dst);
__ StoreDouble(temp_1, src);
#endif
} else {
// No other combinations are possible.
UNREACHABLE();
}
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ emit_label_addr(targets[index]);
}
}
void CodeGenerator::AddNopForSmiCodeInlining() {
// We do not insert nops for inlined Smi code.
}
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
}
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % 2);
while (padding_size > 0) {
__ nop();
padding_size -= 2;
}
}
}
#undef __
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
#define V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
namespace v8 {
namespace internal {
namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(S390_And) \
V(S390_AndComplement) \
V(S390_Or) \
V(S390_OrComplement) \
V(S390_Xor) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftRight32) \
V(S390_ShiftRight64) \
V(S390_ShiftRightAlg32) \
V(S390_ShiftRightAlg64) \
V(S390_RotRight32) \
V(S390_RotRight64) \
V(S390_Not) \
V(S390_RotLeftAndMask32) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
V(S390_Add) \
V(S390_AddWithOverflow32) \
V(S390_AddFloat) \
V(S390_AddDouble) \
V(S390_Sub) \
V(S390_SubWithOverflow32) \
V(S390_SubFloat) \
V(S390_SubDouble) \
V(S390_Mul32) \
V(S390_Mul64) \
V(S390_MulHigh32) \
V(S390_MulHighU32) \
V(S390_MulFloat) \
V(S390_MulDouble) \
V(S390_Div32) \
V(S390_Div64) \
V(S390_DivU32) \
V(S390_DivU64) \
V(S390_DivFloat) \
V(S390_DivDouble) \
V(S390_Mod32) \
V(S390_Mod64) \
V(S390_ModU32) \
V(S390_ModU64) \
V(S390_ModDouble) \
V(S390_Neg) \
V(S390_NegDouble) \
V(S390_SqrtFloat) \
V(S390_FloorFloat) \
V(S390_CeilFloat) \
V(S390_TruncateFloat) \
V(S390_AbsFloat) \
V(S390_SqrtDouble) \
V(S390_FloorDouble) \
V(S390_CeilDouble) \
V(S390_TruncateDouble) \
V(S390_RoundDouble) \
V(S390_MaxDouble) \
V(S390_MinDouble) \
V(S390_AbsDouble) \
V(S390_Cntlz32) \
V(S390_Cntlz64) \
V(S390_Popcnt32) \
V(S390_Popcnt64) \
V(S390_Cmp32) \
V(S390_Cmp64) \
V(S390_CmpFloat) \
V(S390_CmpDouble) \
V(S390_Tst32) \
V(S390_Tst64) \
V(S390_Push) \
V(S390_PushFrame) \
V(S390_StoreToStackSlot) \
V(S390_ExtendSignWord8) \
V(S390_ExtendSignWord16) \
V(S390_ExtendSignWord32) \
V(S390_Uint32ToUint64) \
V(S390_Int64ToInt32) \
V(S390_Int64ToFloat32) \
V(S390_Int64ToDouble) \
V(S390_Uint64ToFloat32) \
V(S390_Uint64ToDouble) \
V(S390_Int32ToFloat32) \
V(S390_Int32ToDouble) \
V(S390_Uint32ToFloat32) \
V(S390_Uint32ToDouble) \
V(S390_Float32ToInt64) \
V(S390_Float32ToUint64) \
V(S390_Float32ToInt32) \
V(S390_Float32ToUint32) \
V(S390_Float32ToDouble) \
V(S390_DoubleToInt32) \
V(S390_DoubleToUint32) \
V(S390_DoubleToInt64) \
V(S390_DoubleToUint64) \
V(S390_DoubleToFloat32) \
V(S390_DoubleExtractLowWord32) \
V(S390_DoubleExtractHighWord32) \
V(S390_DoubleInsertLowWord32) \
V(S390_DoubleInsertHighWord32) \
V(S390_DoubleConstruct) \
V(S390_BitcastInt32ToFloat32) \
V(S390_BitcastFloat32ToInt32) \
V(S390_BitcastInt64ToDouble) \
V(S390_BitcastDoubleToInt64) \
V(S390_LoadWordS8) \
V(S390_LoadWordU8) \
V(S390_LoadWordS16) \
V(S390_LoadWordU16) \
V(S390_LoadWordS32) \
V(S390_LoadWord64) \
V(S390_LoadFloat32) \
V(S390_LoadDouble) \
V(S390_StoreWord8) \
V(S390_StoreWord16) \
V(S390_StoreWord32) \
V(S390_StoreWord64) \
V(S390_StoreFloat32) \
V(S390_StoreDouble)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// R = register
// O = register or stack slot
// D = double register
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kS390_And:
case kS390_AndComplement:
case kS390_Or:
case kS390_OrComplement:
case kS390_Xor:
case kS390_ShiftLeft32:
case kS390_ShiftLeft64:
case kS390_ShiftRight32:
case kS390_ShiftRight64:
case kS390_ShiftRightAlg32:
case kS390_ShiftRightAlg64:
case kS390_RotRight32:
case kS390_RotRight64:
case kS390_Not:
case kS390_RotLeftAndMask32:
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:
case kS390_Add:
case kS390_AddWithOverflow32:
case kS390_AddFloat:
case kS390_AddDouble:
case kS390_Sub:
case kS390_SubWithOverflow32:
case kS390_SubFloat:
case kS390_SubDouble:
case kS390_Mul32:
case kS390_Mul64:
case kS390_MulHigh32:
case kS390_MulHighU32:
case kS390_MulFloat:
case kS390_MulDouble:
case kS390_Div32:
case kS390_Div64:
case kS390_DivU32:
case kS390_DivU64:
case kS390_DivFloat:
case kS390_DivDouble:
case kS390_Mod32:
case kS390_Mod64:
case kS390_ModU32:
case kS390_ModU64:
case kS390_ModDouble:
case kS390_Neg:
case kS390_NegDouble:
case kS390_SqrtFloat:
case kS390_FloorFloat:
case kS390_CeilFloat:
case kS390_TruncateFloat:
case kS390_AbsFloat:
case kS390_SqrtDouble:
case kS390_FloorDouble:
case kS390_CeilDouble:
case kS390_TruncateDouble:
case kS390_RoundDouble:
case kS390_MaxDouble:
case kS390_MinDouble:
case kS390_AbsDouble:
case kS390_Cntlz32:
case kS390_Cntlz64:
case kS390_Popcnt32:
case kS390_Popcnt64:
case kS390_Cmp32:
case kS390_Cmp64:
case kS390_CmpFloat:
case kS390_CmpDouble:
case kS390_Tst32:
case kS390_Tst64:
case kS390_ExtendSignWord8:
case kS390_ExtendSignWord16:
case kS390_ExtendSignWord32:
case kS390_Uint32ToUint64:
case kS390_Int64ToInt32:
case kS390_Int64ToFloat32:
case kS390_Int64ToDouble:
case kS390_Uint64ToFloat32:
case kS390_Uint64ToDouble:
case kS390_Int32ToFloat32:
case kS390_Int32ToDouble:
case kS390_Uint32ToFloat32:
case kS390_Uint32ToDouble:
case kS390_Float32ToInt32:
case kS390_Float32ToUint32:
case kS390_Float32ToUint64:
case kS390_Float32ToDouble:
case kS390_DoubleToInt32:
case kS390_DoubleToUint32:
case kS390_Float32ToInt64:
case kS390_DoubleToInt64:
case kS390_DoubleToUint64:
case kS390_DoubleToFloat32:
case kS390_DoubleExtractLowWord32:
case kS390_DoubleExtractHighWord32:
case kS390_DoubleInsertLowWord32:
case kS390_DoubleInsertHighWord32:
case kS390_DoubleConstruct:
case kS390_BitcastInt32ToFloat32:
case kS390_BitcastFloat32ToInt32:
case kS390_BitcastInt64ToDouble:
case kS390_BitcastDoubleToInt64:
return kNoOpcodeFlags;
case kS390_LoadWordS8:
case kS390_LoadWordU8:
case kS390_LoadWordS16:
case kS390_LoadWordU16:
case kS390_LoadWordS32:
case kS390_LoadWord64:
case kS390_LoadFloat32:
case kS390_LoadDouble:
return kIsLoadOperation;
case kS390_StoreWord8:
case kS390_StoreWord16:
case kS390_StoreWord32:
case kS390_StoreWord64:
case kS390_StoreFloat32:
case kS390_StoreDouble:
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
}
UNREACHABLE();
return kNoOpcodeFlags;
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// TODO(all): Add instruction cost modeling.
return 1;
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/s390/frames-s390.h"
namespace v8 {
namespace internal {
namespace compiler {
enum ImmediateMode {
kInt16Imm,
kInt16Imm_Unsigned,
kInt16Imm_Negate,
kInt16Imm_4ByteAligned,
kShift32Imm,
kShift64Imm,
kNoImmediate
};
// Adds S390-specific methods for generating operands.
class S390OperandGenerator final : public OperandGenerator {
public:
explicit S390OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
if (CanBeImmediate(node, mode)) {
return UseImmediate(node);
}
return UseRegister(node);
}
bool CanBeImmediate(Node* node, ImmediateMode mode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
value = OpParameter<int32_t>(node);
else if (node->opcode() == IrOpcode::kInt64Constant)
value = OpParameter<int64_t>(node);
else
return false;
return CanBeImmediate(value, mode);
}
bool CanBeImmediate(int64_t value, ImmediateMode mode) {
switch (mode) {
case kInt16Imm:
return is_int16(value);
case kInt16Imm_Unsigned:
return is_uint16(value);
case kInt16Imm_Negate:
return is_int16(-value);
case kInt16Imm_4ByteAligned:
return is_int16(value) && !(value & 3);
case kShift32Imm:
return 0 <= value && value < 32;
case kShift64Imm:
return 0 <= value && value < 64;
case kNoImmediate:
return false;
}
return false;
}
};
namespace {
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
ImmediateMode operand_mode) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseOperand(node->InputAt(1), operand_mode));
}
#if V8_TARGET_ARCH_S390X
void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
S390OperandGenerator g(selector);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
}
selector->Emit(opcode, output_count, outputs, 1, inputs);
}
#endif
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Matcher m(node);
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
outputs[output_count++] = g.DefineAsRegister(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
}
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
ImmediateMode operand_mode) {
FlagsContinuation cont;
VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
ArchOpcode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kS390_LoadFloat32;
break;
case MachineRepresentation::kFloat64:
opcode = kS390_LoadDouble;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
break;
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kS390_LoadWordS32;
#if V8_TARGET_ARCH_S390X
// TODO(john.yan): Remove this mode since s390 do not has this restriction
mode = kInt16Imm_4ByteAligned;
#endif
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_LoadWord64;
mode = kInt16Imm_4ByteAligned;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(offset, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
} else if (g.CanBeImmediate(base, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
}
}
void InstructionSelector::VisitStore(Node* node) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(MachineRepresentation::kTagged, rep);
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
// OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
// for the store itself, so we must check compatibility with both.
if (g.CanBeImmediate(offset, kInt16Imm)
#if V8_TARGET_ARCH_S390X
&& g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
#endif
) {
inputs[input_count++] = g.UseImmediate(offset);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(offset);
addressing_mode = kMode_MRR;
}
inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
? g.UseRegister(value)
: g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
UNREACHABLE();
break;
case kMapWriteBarrier:
record_write_mode = RecordWriteMode::kValueIsMap;
break;
case kPointerWriteBarrier:
record_write_mode = RecordWriteMode::kValueIsPointer;
break;
case kFullWriteBarrier:
record_write_mode = RecordWriteMode::kValueIsAny;
break;
}
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kS390_StoreFloat32;
break;
case MachineRepresentation::kFloat64:
opcode = kS390_StoreDouble;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = kS390_StoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kS390_StoreWord16;
break;
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kS390_StoreWord32;
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_StoreWord64;
mode = kInt16Imm_4ByteAligned;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(offset, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
} else if (g.CanBeImmediate(base, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
}
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
#endif
case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
g.UseOperand(length, kInt16Imm_Unsigned));
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
#endif
case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
g.UseRegister(base), g.UseRegister(offset),
g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
}
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
bool right_can_cover, ImmediateMode imm_mode) {
S390OperandGenerator g(selector);
// Map instruction to equivalent operation with inverted right input.
ArchOpcode inv_opcode = opcode;
switch (opcode) {
case kS390_And:
inv_opcode = kS390_AndComplement;
break;
case kS390_Or:
inv_opcode = kS390_OrComplement;
break;
default:
UNREACHABLE();
}
// Select Logical(y, ~x) for Logical(Xor(x, -1), y).
if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
Matcher mleft(m->left().node());
if (mleft.right().Is(-1)) {
selector->Emit(inv_opcode, g.DefineAsRegister(node),
g.UseRegister(m->right().node()),
g.UseRegister(mleft.left().node()));
return;
}
}
// Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
right_can_cover) {
Matcher mright(m->right().node());
if (mright.right().Is(-1)) {
// TODO(all): support shifted operand on right.
selector->Emit(inv_opcode, g.DefineAsRegister(node),
g.UseRegister(m->left().node()),
g.UseRegister(mright.left().node()));
return;
}
}
VisitBinop<Matcher>(selector, node, opcode, imm_mode);
}
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation32(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
int mask_lsb = base::bits::CountTrailingZeros32(value);
if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
return false;
*mb = mask_lsb + mask_width - 1;
*me = mask_lsb;
return true;
}
#if V8_TARGET_ARCH_S390X
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation64(value);
int mask_msb = base::bits::CountLeadingZeros64(value);
int mask_lsb = base::bits::CountTrailingZeros64(value);
if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
return false;
*mb = mask_lsb + mask_width - 1;
*me = mask_lsb;
return true;
}
#endif
void InstructionSelector::VisitWord32And(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
int mb = 0;
int me = 0;
if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
CanCover(node, left)) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
left = mleft.left().node();
sh = mleft.right().Value();
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1f;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
}
}
}
if (mb >= me) {
Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
g.TempImmediate(me));
return;
}
}
VisitLogical<Int32BinopMatcher>(
this, node, &m, kS390_And, CanCover(node, m.left().node()),
CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64And(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
CanCover(node, left)) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
sh = mleft.right().Value();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3f;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
}
}
}
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
int mask;
if (me == 0) {
match = true;
opcode = kS390_RotLeftAndClearLeft64;
mask = mb;
} else if (mb == 63) {
match = true;
opcode = kS390_RotLeftAndClearRight64;
mask = me;
} else if (sh && me <= sh && m.left().IsWord64Shl()) {
match = true;
opcode = kS390_RotLeftAndClear64;
mask = mb;
}
if (match) {
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
g.TempImmediate(sh), g.TempImmediate(mask));
return;
}
}
}
VisitLogical<Int64BinopMatcher>(
this, node, &m, kS390_And, CanCover(node, m.left().node()),
CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
#endif
void InstructionSelector::VisitWord32Or(Node* node) {
Int32BinopMatcher m(node);
VisitLogical<Int32BinopMatcher>(
this, node, &m, kS390_Or, CanCover(node, m.left().node()),
CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node);
VisitLogical<Int64BinopMatcher>(
this, node, &m, kS390_Or, CanCover(node, m.left().node()),
CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
#endif
void InstructionSelector::VisitWord32Xor(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
}
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Xor(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.right().Is(-1)) {
Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
}
}
#endif
void InstructionSelector::VisitWord32Shl(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
Int32BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mb), g.TempImmediate(me));
return;
}
}
}
VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Shl(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
// TODO(mbrandy): eliminate left sign extension if right >= 32
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
int mask;
if (me == 0) {
match = true;
opcode = kS390_RotLeftAndClearLeft64;
mask = mb;
} else if (mb == 63) {
match = true;
opcode = kS390_RotLeftAndClearRight64;
mask = me;
} else if (sh && me <= sh) {
match = true;
opcode = kS390_RotLeftAndClear64;
mask = mb;
}
if (match) {
Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mask));
return;
}
}
}
}
VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Shr(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
Int32BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1f;
if (mb >= me) {
Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mb), g.TempImmediate(me));
return;
}
}
}
VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Shr(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3f;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
int mask;
if (me == 0) {
match = true;
opcode = kS390_RotLeftAndClearLeft64;
mask = mb;
} else if (mb == 63) {
match = true;
opcode = kS390_RotLeftAndClearRight64;
mask = me;
}
if (match) {
Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mask));
return;
}
}
}
}
VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Sar(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
// Replace with sign extension for (x << K) >> K where K is 16 or 24.
if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
return;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
return;
}
}
VisitRRO(this, kS390_ShiftRightAlg32, node, kShift32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Sar(Node* node) {
VisitRRO(this, kS390_ShiftRightAlg64, node, kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Clz(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_Cntlz32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Clz(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_Cntlz64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
#endif
void InstructionSelector::VisitWord32Popcnt(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_Popcnt32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Popcnt(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_Popcnt64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
#endif
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
#endif
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Add, kInt16Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm);
}
#endif
void InstructionSelector::VisitInt32Sub(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().Is(0)) {
Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
} else {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
}
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Sub(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().Is(0)) {
Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
} else {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
}
}
#endif
void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kS390_Mul32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Mul(Node* node) {
VisitRRR(this, kS390_Mul64, node);
}
#endif
void InstructionSelector::VisitInt32MulHigh(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_MulHigh32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_MulHighU32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitInt32Div(Node* node) {
VisitRRR(this, kS390_Div32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Div(Node* node) {
VisitRRR(this, kS390_Div64, node);
}
#endif
void InstructionSelector::VisitUint32Div(Node* node) {
VisitRRR(this, kS390_DivU32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitUint64Div(Node* node) {
VisitRRR(this, kS390_DivU64, node);
}
#endif
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitRRR(this, kS390_Mod32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Mod(Node* node) {
VisitRRR(this, kS390_Mod64, node);
}
#endif
void InstructionSelector::VisitUint32Mod(Node* node) {
VisitRRR(this, kS390_ModU32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitUint64Mod(Node* node) {
VisitRRR(this, kS390_ModU64, node);
}
#endif
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
VisitRR(this, kS390_Float32ToDouble, node);
}
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
VisitRR(this, kS390_Int32ToFloat32, node);
}
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
VisitRR(this, kS390_Uint32ToFloat32, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kS390_Int32ToDouble, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
VisitRR(this, kS390_Uint32ToDouble, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kS390_DoubleToInt32, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kS390_DoubleToUint32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
}
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
}
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kS390_ExtendSignWord32, node);
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kS390_Uint32ToUint64, node);
}
#endif
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kS390_DoubleToFloat32, node);
}
void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
switch (TruncationModeOf(node->op())) {
case TruncationMode::kJavaScript:
return VisitRR(this, kArchTruncateDoubleToI, node);
case TruncationMode::kRoundToZero:
return VisitRR(this, kS390_DoubleToInt32, node);
}
UNREACHABLE();
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
VisitRR(this, kS390_Float32ToInt32, node);
}
void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
VisitRR(this, kS390_Float32ToUint32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kS390_Int64ToInt32, node);
}
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
VisitRR(this, kS390_Int64ToFloat32, node);
}
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
VisitRR(this, kS390_Int64ToDouble, node);
}
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
VisitRR(this, kS390_Uint64ToFloat32, node);
}
void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
VisitRR(this, kS390_Uint64ToDouble, node);
}
#endif
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kS390_BitcastFloat32ToInt32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
VisitRR(this, kS390_BitcastDoubleToInt64, node);
}
#endif
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
VisitRR(this, kS390_BitcastInt32ToFloat32, node);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
VisitRR(this, kS390_BitcastInt64ToDouble, node);
}
#endif
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kS390_AddFloat, node);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
// TODO(mbrandy): detect multiply-add
VisitRRR(this, kS390_AddDouble, node);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
S390OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
Emit(kS390_NegDouble, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
VisitRRR(this, kS390_SubFloat, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
S390OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
if (m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
// -floor(-x) = ceil(x)
Emit(kS390_CeilDouble, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
return;
}
}
}
Emit(kS390_NegDouble, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
VisitRRR(this, kS390_SubDouble, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kS390_MulFloat, node);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
// TODO(mbrandy): detect negate
VisitRRR(this, kS390_MulDouble, node);
}
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitRRR(this, kS390_DivFloat, node);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kS390_DivDouble, node);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
->MarkAsCall();
}
void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kS390_AbsFloat, node);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kS390_AbsDouble, node);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kS390_SqrtFloat, node);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kS390_SqrtDouble, node);
}
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
VisitRR(this, kS390_FloorFloat, node);
}
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kS390_FloorDouble, node);
}
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
VisitRR(this, kS390_CeilFloat, node);
}
void InstructionSelector::VisitFloat64RoundUp(Node* node) {
VisitRR(this, kS390_CeilDouble, node);
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, kS390_TruncateFloat, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kS390_TruncateDouble, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
VisitRR(this, kS390_RoundDouble, node);
}
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32,
kInt16Imm, &cont);
}
FlagsContinuation cont;
VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32, kInt16Imm,
&cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
kInt16Imm_Negate, &cont);
}
FlagsContinuation cont;
VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
kInt16Imm_Negate, &cont);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm,
&cont);
}
FlagsContinuation cont;
VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm, &cont);
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub,
kInt16Imm_Negate, &cont);
}
FlagsContinuation cont;
VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate, &cont);
}
#endif
static bool CompareLogical(FlagsContinuation* cont) {
switch (cont->condition()) {
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan:
return true;
default:
return false;
}
UNREACHABLE();
return false;
}
namespace {
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
}
}
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
bool commutative, ImmediateMode immediate_mode) {
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, immediate_mode)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
} else if (g.CanBeImmediate(left, immediate_mode)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
}
}
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
}
#if V8_TARGET_ARCH_S390X
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
}
#endif
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
g.UseRegister(right), cont);
}
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
g.UseRegister(right), cont);
}
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, InstructionCode opcode,
FlagsContinuation* cont) {
while (selector->CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
// Combine with comparisons against 0 by simply inverting the
// continuation.
Int32BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
value = m.left().node();
cont->Negate();
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord32Compare(selector, value, cont);
}
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord64Compare(selector, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
selector, node, kS390_AddWithOverflow32, kInt16Imm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(selector, node,
kS390_SubWithOverflow32,
kInt16Imm_Negate, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Add,
kInt16Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Sub,
kInt16Imm_Negate, cont);
#endif
default:
break;
}
}
}
break;
case IrOpcode::kInt32Sub:
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt32Add:
// case IrOpcode::kWord32Or:
// case IrOpcode::kWord32Xor:
// case IrOpcode::kWord32Sar:
// case IrOpcode::kWord32Shl:
// case IrOpcode::kWord32Shr:
// case IrOpcode::kWord32Ror:
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64Sub:
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt64Add:
// case IrOpcode::kWord64Or:
// case IrOpcode::kWord64Xor:
// case IrOpcode::kWord64Sar:
// case IrOpcode::kWord64Shl:
// case IrOpcode::kWord64Shr:
// case IrOpcode::kWord64Ror:
#endif
default:
break;
}
break;
}
// Branch could not be combined with a compare, emit compare against 0.
S390OperandGenerator g(selector);
VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
cont);
}
void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
}
#if V8_TARGET_ARCH_S390X
void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
}
#endif
} // namespace
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
S390OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kS390_Sub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
return EmitLookupSwitch(sw, value_operand);
}
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
}
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
}
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
#endif
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
Node* node) {
S390OperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot));
++slot;
}
} else {
// Push any stack arguments.
int num_slots = static_cast<int>(descriptor->StackParameterCount());
int slot = 0;
for (PushParameter input : (*arguments)) {
if (slot == 0) {
DCHECK(input.node());
Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(num_slots));
} else {
// Skip any alignment holes in pushed nodes.
if (input.node()) {
Emit(kS390_StoreToStackSlot, g.NoOutput(),
g.UseRegister(input.node()), g.TempImmediate(slot));
}
}
++slot;
}
}
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
S390OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
g.UseRegister(right));
return;
}
Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
S390OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
g.UseRegister(left));
return;
}
Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat64RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
}
} // namespace compiler
} // namespace internal
} // namespace v8
...@@ -125,6 +125,24 @@ LinkageLocation stackloc(int i) { ...@@ -125,6 +125,24 @@ LinkageLocation stackloc(int i) {
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8 #define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2 #define FP_RETURN_REGISTERS d1, d2
#elif V8_TARGET_ARCH_S390X
// ===========================================================================
// == s390x ==================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
#define GP_RETURN_REGISTERS r2
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
#elif V8_TARGET_ARCH_S390
// ===========================================================================
// == s390 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2
#else #else
// =========================================================================== // ===========================================================================
// == unknown ================================================================ // == unknown ================================================================
......
...@@ -1560,6 +1560,10 @@ ...@@ -1560,6 +1560,10 @@
}], }],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', { ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [ ### gcmole(arch:s390) ### 'sources': [ ### gcmole(arch:s390) ###
'../../src/compiler/s390/code-generator-s390.cc',
'../../src/compiler/s390/instruction-codes-s390.h',
'../../src/compiler/s390/instruction-scheduler-s390.cc',
'../../src/compiler/s390/instruction-selector-s390.cc',
'../../src/debug/s390/debug-s390.cc', '../../src/debug/s390/debug-s390.cc',
'../../src/ic/s390/access-compiler-s390.cc', '../../src/ic/s390/access-compiler-s390.cc',
'../../src/ic/s390/handler-compiler-s390.cc', '../../src/ic/s390/handler-compiler-s390.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment