Commit ae9130eb authored by Dusan Milosavljevic's avatar Dusan Milosavljevic

MIPS64: Add turbofan support for mips64.

TEST=
BUG=
R=danno@chromium.org, paul.lind@imgtec.com

Review URL: https://codereview.chromium.org/732403002

Cr-Commit-Position: refs/heads/master@{#25424}
parent f07b0f21
......@@ -15,6 +15,8 @@
#include "src/compiler/ia32/instruction-codes-ia32.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/compiler/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/x64/instruction-codes-x64.h"
#else
......
paul.lind@imgtec.com
gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties-inl.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/scopes.h"
namespace v8 {
namespace internal {
namespace compiler {
#define __ masm()->
// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
#define kScratchReg2 kLithiumScratchReg2
#define kScratchDoubleReg kLithiumScratchDouble
// TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \
PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
__LINE__)
#define TRACE_UNIMPL() \
PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
__LINE__)
// Adds Mips-specific methods to convert InstructionOperands.
class MipsOperandConverter FINAL : public InstructionOperandConverter {
public:
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
FloatRegister OutputSingleRegister(int index = 0) {
return ToSingleRegister(instr_->OutputAt(index));
}
FloatRegister InputSingleRegister(int index) {
return ToSingleRegister(instr_->InputAt(index));
}
FloatRegister ToSingleRegister(InstructionOperand* op) {
// Single (Float) and Double register namespace is same on MIPS,
// both are typedefs of FPURegister.
return ToDoubleRegister(op);
}
Operand InputImmediate(int index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kInt64:
return Operand(constant.ToInt64());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
case Constant::kExternalReference:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
break;
}
UNREACHABLE();
return Operand(zero_reg);
}
Operand InputOperand(int index) {
InstructionOperand* op = instr_->InputAt(index);
if (op->IsRegister()) {
return Operand(ToRegister(op));
}
return InputImmediate(index);
}
MemOperand MemoryOperand(int* first_index) {
const int index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
case kMode_MRR:
// TODO(plind): r6 address mode, to be implemented ...
UNREACHABLE();
}
UNREACHABLE();
return MemOperand(no_reg);
}
MemOperand MemoryOperand() {
int index = 0;
return MemoryOperand(&index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
// The linkage computes where all spill slots are located.
FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
static inline bool HasRegisterInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsRegister();
}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
AddSafepointAndDeopt(instr);
break;
}
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
AddSafepointAndDeopt(instr);
break;
}
case kArchJmp:
__ Branch(GetLabel(i.InputRpo(0)));
break;
case kArchNop:
// don't emit code for nops.
break;
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dadd:
__ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dsub:
__ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64MulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64MulHighU:
__ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Div:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Mod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64ModU:
__ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dmul:
__ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Ddiv:
__ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DdivU:
__ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dmod:
__ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DmodU:
__ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int32_t imm = i.InputOperand(1).immediate();
__ sll(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int32_t imm = i.InputOperand(1).immediate();
__ srl(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int32_t imm = i.InputOperand(1).immediate();
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
case kMips64Ext:
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
break;
case kMips64Dext:
__ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
break;
case kMips64Dshl:
if (instr->InputAt(1)->IsRegister()) {
__ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int32_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
__ dsll(i.OutputRegister(), i.InputRegister(0), imm);
} else {
__ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
}
}
break;
case kMips64Dshr:
if (instr->InputAt(1)->IsRegister()) {
__ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int32_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
__ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
} else {
__ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
}
}
break;
case kMips64Dsar:
if (instr->InputAt(1)->IsRegister()) {
__ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int32_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
__ dsra(i.OutputRegister(), i.InputRegister(0), imm);
} else {
__ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
}
}
break;
case kMips64Ror:
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dror:
__ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Tst:
case kMips64Tst32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Cmp:
case kMips64Cmp32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Mov:
// TODO(plind): Should we combine mov/li like this, or use separate instr?
// - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
if (HasRegisterInput(instr, 0)) {
__ mov(i.OutputRegister(), i.InputRegister(0));
} else {
__ li(i.OutputRegister(), i.InputOperand(0));
}
break;
case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64AddD:
// TODO(plind): add special case: combine mult & add.
__ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64SubD:
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64DivD:
__ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64ModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
case kMips64FloorD: {
__ floor_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
case kMips64CeilD: {
__ ceil_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
case kMips64RoundTruncateD: {
__ trunc_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64CvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64CvtDS: {
__ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
break;
}
case kMips64CvtDW: {
FPURegister scratch = kScratchDoubleReg;
__ mtc1(i.InputRegister(0), scratch);
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
case kMips64CvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
break;
}
case kMips64TruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
__ trunc_w_d(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
break;
}
case kMips64TruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
// ... more basic instructions ...
case kMips64Lbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lb:
__ lb(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sb:
__ sb(i.InputRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lh:
__ lh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sh:
__ sh(i.InputRegister(2), i.MemoryOperand());
break;
case kMips64Lw:
__ lw(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ld:
__ ld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sw:
__ sw(i.InputRegister(2), i.MemoryOperand());
break;
case kMips64Sd:
__ sd(i.InputRegister(2), i.MemoryOperand());
break;
case kMips64Lwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
case kMips64Swc1: {
int index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
}
case kMips64Ldc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kMips64Sdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMips64Push:
__ Push(i.InputRegister(0));
break;
case kMips64StoreWriteBarrier:
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
__ daddu(index, object, index);
__ sd(value, MemOperand(index));
SaveFPRegsMode mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RAStatus ra_status = kRAHasNotBeenSaved;
__ RecordWrite(object, index, value, ra_status, mode);
break;
}
}
#define UNSUPPORTED_COND(opcode, condition) \
OFStream out(stdout); \
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr,
FlagsCondition condition) {
MipsOperandConverter i(this, instr);
Label done;
// Emit a branch. The true and false targets are always the last two inputs
// to the instruction.
BasicBlock::RpoNumber tblock =
i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
BasicBlock::RpoNumber fblock =
i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
bool fallthru = IsNextInAssemblyOrder(fblock);
Label* tlabel = GetLabel(tblock);
Label* flabel = fallthru ? &done : GetLabel(fblock);
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
// registers to compare psuedo-op are not modified before this branch op, as
// they are tested here.
// TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
// not separated by other instructions.
if (instr->arch_opcode() == kMips64Tst) {
switch (condition) {
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMips64Tst, condition);
break;
}
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Tst32) {
switch (condition) {
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMips64Tst32, condition);
break;
}
// Zero-extend registers on MIPS64 only 64-bit operand
// branch and compare op. is available.
// This is a disadvantage to perform 32-bit operation on MIPS64.
// Try to force globally in front-end Word64 representation to be preferred
// for MIPS64 even for Word32.
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Dext(at, at, 0, 32);
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
switch (condition) {
case kOverflow:
cc = ne;
break;
case kNotOverflow:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMips64Dadd, condition);
break;
}
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64Cmp) {
switch (condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
__ bind(&done);
} else if (instr->arch_opcode() == kMips64Cmp32) {
switch (condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMips64Cmp32, condition);
break;
}
switch (condition) {
case kEqual:
case kNotEqual:
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kSignedLessThanOrEqual:
case kSignedGreaterThan:
// Sign-extend registers on MIPS64 only 64-bit operand
// branch and compare op. is available.
__ sll(i.InputRegister(0), i.InputRegister(0), 0);
if (instr->InputAt(1)->IsRegister()) {
__ sll(i.InputRegister(1), i.InputRegister(1), 0);
}
break;
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan:
// Zero-extend registers on MIPS64 only 64-bit operand
// branch and compare op. is available.
__ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
if (instr->InputAt(1)->IsRegister()) {
__ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
}
break;
default:
UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
__ bind(&done);
} else if (instr->arch_opcode() == kMips64CmpD) {
// TODO(dusmil) optimize unordered checks to use less instructions
// even if we have to unfold BranchF macro.
Label* nan = flabel;
switch (condition) {
case kUnorderedEqual:
cc = eq;
break;
case kUnorderedNotEqual:
cc = ne;
nan = tlabel;
break;
case kUnorderedLessThan:
cc = lt;
break;
case kUnorderedGreaterThanOrEqual:
cc = ge;
nan = tlabel;
break;
case kUnorderedLessThanOrEqual:
cc = le;
break;
case kUnorderedGreaterThan:
cc = gt;
nan = tlabel;
break;
default:
UNSUPPORTED_COND(kMips64CmpD, condition);
break;
}
__ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
__ bind(&done);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
UNIMPLEMENTED();
}
}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
MipsOperandConverter i(this, instr);
Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label false_value;
DCHECK_NE(0, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips pseudo-instructions, which are checked and handled here.
// For materializations, we use delay slot to set the result true, and
// in the false case, where we fall through the branch, we reset the result
// false.
if (instr->arch_opcode() == kMips64Tst) {
switch (condition) {
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMips64Tst, condition);
break;
}
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Tst32) {
switch (condition) {
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMips64Tst, condition);
break;
}
// Zero-extend register on MIPS64 only 64-bit operand
// branch and compare op. is available.
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Dext(at, at, 0, 32);
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
switch (condition) {
case kOverflow:
cc = ne;
break;
case kNotOverflow:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMips64DAdd, condition);
break;
}
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Cmp) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
switch (condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Cmp32) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
switch (condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
switch (condition) {
case kEqual:
case kNotEqual:
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kSignedLessThanOrEqual:
case kSignedGreaterThan:
// Sign-extend registers on MIPS64 only 64-bit operand
// branch and compare op. is available.
__ sll(left, left, 0);
if (instr->InputAt(1)->IsRegister()) {
__ sll(i.InputRegister(1), i.InputRegister(1), 0);
}
break;
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan:
// Zero-extend registers on MIPS64 only 64-bit operand
// branch and compare op. is available.
__ Dext(left, left, 0, 32);
if (instr->InputAt(1)->IsRegister()) {
__ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
}
break;
default:
UNSUPPORTED_COND(kMips64Cmp32, condition);
break;
}
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64CmpD) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
// TODO(plind): Provide NaN-testing macro-asm function without need for
// BranchF.
FPURegister dummy1 = f0;
FPURegister dummy2 = f2;
switch (condition) {
case kUnorderedEqual:
// TODO(plind): improve the NaN testing throughout this function.
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = eq;
break;
case kUnorderedNotEqual:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = ne;
break;
case kUnorderedLessThan:
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = lt;
break;
case kUnorderedGreaterThanOrEqual:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = ge;
break;
case kUnorderedLessThanOrEqual:
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = le;
break;
case kUnorderedGreaterThan:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = gt;
break;
default:
UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
__ li(result, Operand(1)); // In delay slot - branch taken returns 1.
// Fall-thru (branch not taken) returns 0.
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
TRACE_UNIMPL();
UNIMPLEMENTED();
}
// Fallthru case is the false materialization.
__ bind(&false_value);
__ li(result, Operand(static_cast<int64_t>(0)));
__ bind(&done);
}
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
// TODO(plind): make callee save size const, possibly DCHECK it.
int register_save_area_size = 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
register_save_area_size += kPointerSize;
}
frame()->SetRegisterSaveAreaSize(register_save_area_size);
__ MultiPush(saves);
}
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +2 for return address and saved frame pointer.
int receiver_slot = info->scope()->num_parameters() + 2;
__ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&ok, ne, a2, Operand(at));
__ ld(a2, GlobalObjectOperand());
__ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
__ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
__ bind(&ok);
}
} else {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
}
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
}
__ mov(sp, fp);
__ Pop(ra, fp);
__ Ret();
} else {
__ mov(sp, fp);
__ Pop(ra, fp);
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ DropAndRet(pop_count);
}
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
MipsOperandConverter g(this, NULL);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ mov(g.ToRegister(destination), src);
} else {
__ sd(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
__ ld(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
__ ld(temp, src);
__ sd(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
__ li(dst, Operand(src.ToInt32()));
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
__ li(dst, Operand(src.ToInt64()));
break;
case Constant::kFloat64:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
break;
case Constant::kHeapObject:
__ li(dst, src.ToHeapObject());
break;
}
if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
FPURegister dst = destination->IsDoubleRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg.low();
// TODO(turbofan): Can we do better here?
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ mtc1(at, dst);
if (destination->IsDoubleStackSlot()) {
__ swc1(dst, g.ToMemOperand(destination));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DoubleRegister dst = destination->IsDoubleRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
if (destination->IsDoubleStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsDoubleRegister()) {
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ sdc1(src, g.ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
__ ldc1(temp, src);
__ sdc1(temp, g.ToMemOperand(destination));
}
} else {
UNREACHABLE();
}
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
MipsOperandConverter g(this, NULL);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
// Register-register.
Register temp = kScratchReg;
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
__ ld(src, dst);
__ sd(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
Register temp_1 = kScratchReg2;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ ld(temp_0, src);
__ ld(temp_1, dst);
__ sd(temp_0, dst);
__ sd(temp_1, src);
} else if (source->IsDoubleRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ ldc1(src, dst);
__ sdc1(temp, dst);
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
MemOperand dst0 = g.ToMemOperand(destination);
MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
__ lw(temp_0, src1);
__ sw(temp_0, dst1);
__ sdc1(temp_1, src0);
} else {
// No other combinations are possible.
UNREACHABLE();
}
}
void CodeGenerator::AddNopForSmiCodeInlining() {
// Unused on 32-bit ARM. Still exists on 64-bit arm.
// TODO(plind): Unclear when this is called now. Understand, fix if needed.
__ nop(); // Maybe PROPERTY_ACCESS_INLINED?
}
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
masm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
padding_size -= v8::internal::Assembler::kInstrSize;
}
}
}
MarkLazyDeoptSite();
}
#undef __
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
namespace v8 {
namespace internal {
namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Mips64Add) \
V(Mips64Dadd) \
V(Mips64Sub) \
V(Mips64Dsub) \
V(Mips64Mul) \
V(Mips64MulHigh) \
V(Mips64MulHighU) \
V(Mips64Dmul) \
V(Mips64Div) \
V(Mips64Ddiv) \
V(Mips64DivU) \
V(Mips64DdivU) \
V(Mips64Mod) \
V(Mips64Dmod) \
V(Mips64ModU) \
V(Mips64DmodU) \
V(Mips64And) \
V(Mips64Or) \
V(Mips64Xor) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
V(Mips64Ext) \
V(Mips64Dext) \
V(Mips64Dshl) \
V(Mips64Dshr) \
V(Mips64Dsar) \
V(Mips64Ror) \
V(Mips64Dror) \
V(Mips64Mov) \
V(Mips64Tst) \
V(Mips64Tst32) \
V(Mips64Cmp) \
V(Mips64Cmp32) \
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \
V(Mips64SqrtD) \
V(Mips64FloorD) \
V(Mips64CeilD) \
V(Mips64RoundTruncateD) \
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
V(Mips64TruncUwD) \
V(Mips64CvtDW) \
V(Mips64CvtDUw) \
V(Mips64Lb) \
V(Mips64Lbu) \
V(Mips64Sb) \
V(Mips64Lh) \
V(Mips64Lhu) \
V(Mips64Sh) \
V(Mips64Ld) \
V(Mips64Lw) \
V(Mips64Sw) \
V(Mips64Sd) \
V(Mips64Lwc1) \
V(Mips64Swc1) \
V(Mips64Ldc1) \
V(Mips64Sdc1) \
V(Mips64Push) \
V(Mips64StoreWriteBarrier)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// R = register
// O = register or stack slot
// D = double register
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
// TODO(plind): Add the new r6 address modes.
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE_UNIMPL() \
PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
// Adds Mips-specific methods for generating InstructionOperands.
class Mips64OperandGenerator FINAL : public OperandGenerator {
public:
explicit Mips64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
return UseRegister(node);
}
bool CanBeImmediate(Node* node, InstructionCode opcode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
value = OpParameter<int32_t>(node);
else if (node->opcode() == IrOpcode::kInt64Constant)
value = OpParameter<int64_t>(node);
else
return false;
switch (ArchOpcodeField::decode(opcode)) {
case kMips64Shl:
case kMips64Sar:
case kMips64Shr:
return is_uint5(value);
case kMips64Dshl:
case kMips64Dsar:
case kMips64Dshr:
return is_uint6(value);
case kMips64Xor:
return is_uint16(value);
case kMips64Ldc1:
case kMips64Sdc1:
return is_int16(value + kIntSize);
default:
return is_int16(value);
}
}
bool CanBeImmediate(Node* node, InstructionCode opcode,
FlagsContinuation* cont) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
value = OpParameter<int32_t>(node);
else if (node->opcode() == IrOpcode::kInt64Constant)
value = OpParameter<int64_t>(node);
else
return false;
switch (ArchOpcodeField::decode(opcode)) {
case kMips64Cmp32:
switch (cont->condition()) {
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan:
// Immediate operands for unsigned 32-bit compare operations
// should not be sign-extended.
return is_uint15(value);
default:
return false;
}
default:
return is_int16(value);
}
}
private:
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
TRACE_UNIMPL();
return false;
}
};
static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseOperand(node->InputAt(1), opcode));
}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
outputs[output_count++] = g.DefineAsRegister(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
DCHECK_NE(0, input_count);
DCHECK_NE(0, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
if (cont->IsBranch()) instr->MarkAsControl();
}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, &cont);
}
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (rep) {
case kRepFloat32:
opcode = kMips64Lwc1;
break;
case kRepFloat64:
opcode = kMips64Ldc1;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
break;
case kRepWord16:
opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
break;
case kRepWord32:
opcode = kMips64Lw;
break;
case kRepTagged: // Fall through.
case kRepWord64:
opcode = kMips64Ld;
break;
default:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
InstructionOperand* addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void InstructionSelector::VisitStore(Node* node) {
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
MachineType rep = RepresentationOf(store_rep.machine_type());
if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
DCHECK(rep == kRepTagged);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
return;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
ArchOpcode opcode;
switch (rep) {
case kRepFloat32:
opcode = kMips64Swc1;
break;
case kRepFloat64:
opcode = kMips64Sdc1;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kMips64Sb;
break;
case kRepWord16:
opcode = kMips64Sh;
break;
case kRepWord32:
opcode = kMips64Sw;
break;
case kRepTagged: // Fall through.
case kRepWord64:
opcode = kMips64Sd;
break;
default:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
InstructionOperand* addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
g.TempImmediate(0), g.UseRegister(value));
}
}
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kMips64And);
}
void InstructionSelector::VisitWord64And(Node* node) {
VisitBinop(this, node, kMips64And);
}
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kMips64Or);
}
void InstructionSelector::VisitWord64Or(Node* node) {
VisitBinop(this, node, kMips64Or);
}
void InstructionSelector::VisitWord32Xor(Node* node) {
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord64Xor(Node* node) {
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
VisitRRO(this, kMips64Shl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
VisitRRO(this, kMips64Shr, node);
}
void InstructionSelector::VisitWord32Sar(Node* node) {
VisitRRO(this, kMips64Sar, node);
}
void InstructionSelector::VisitWord64Shl(Node* node) {
VisitRRO(this, kMips64Dshl, node);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
VisitRRO(this, kMips64Dshr, node);
}
void InstructionSelector::VisitWord64Sar(Node* node) {
VisitRRO(this, kMips64Dsar, node);
}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMips64Ror, node);
}
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
// TODO(plind): Consider multiply & add optimization from arm port.
VisitBinop(this, node, kMips64Add);
}
void InstructionSelector::VisitInt64Add(Node* node) {
Mips64OperandGenerator g(this);
// TODO(plind): Consider multiply & add optimization from arm port.
VisitBinop(this, node, kMips64Dadd);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitBinop(this, node, kMips64Sub);
}
void InstructionSelector::VisitInt64Sub(Node* node) {
VisitBinop(this, node, kMips64Dsub);
}
void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
int32_t value = m.right().Value();
if (base::bits::IsPowerOfTwo32(value)) {
Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
InstructionOperand* temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
Emit(kMips64Add | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
InstructionOperand* temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
return;
}
}
Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64MulHigh, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
Mips64OperandGenerator g(this);
InstructionOperand* const dmul_operand = g.TempRegister();
Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
g.TempImmediate(32));
}
void InstructionSelector::VisitInt64Mul(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
int64_t value = m.right().Value();
if (base::bits::IsPowerOfTwo32(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
InstructionOperand* temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
InstructionOperand* temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
return;
}
}
Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitInt32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitUint32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitInt32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitUint32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitInt64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitUint64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitInt64Mod(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitUint64Mod(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtDUw, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64TruncWD, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64TruncUwD, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0));
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Add(Node* node) {
VisitRRR(this, kMips64AddD, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
VisitRRR(this, kMips64SubD, node);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMips64MulD, node);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMips64DivD, node);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64ModD, g.DefineAsFixed(node, f0),
g.UseFixed(node->InputAt(0), f12),
g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Floor(Node* node) {
VisitRR(this, kMips64FloorD, node);
}
void InstructionSelector::VisitFloat64Ceil(Node* node) {
VisitRR(this, kMips64CeilD, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMips64RoundTruncateD, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitCall(Node* node) {
Mips64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
FrameStateDescriptor* frame_state_descriptor = NULL;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, false);
// TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
input != buffer.pushed_nodes.rend(); input++) {
// TODO(plind): inefficient for MIPS, use MultiPush here.
// - Also need to align the stack. See arm64.
// - Maybe combine with arg slot stuff in DirectCEntry stub.
Emit(kMips64Push, NULL, g.UseRegister(*input));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
}
namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand* left, InstructionOperand* right,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
}
}
// Shared routine for multiple float compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
cont);
}
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
bool commutative) {
Mips64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode, cont)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
} else if (g.CanBeImmediate(left, opcode, cont)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
}
}
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
}
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitWordCompare(selector, node, kMips64Cmp, cont, false);
}
} // namespace
void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
Node* value, FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
InstructionOperand* const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
}
}
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
// Initially set comparison against 0 to be 64-bit variant for branches that
// cannot combine.
InstructionCode opcode = kMips64Cmp;
while (selector->CanCover(user, value)) {
if (user->opcode() == IrOpcode::kWord32Equal) {
opcode = kMips64Cmp32;
}
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
// Combine with comparisons against 0 by simply inverting the
// continuation.
Int32BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
value = m.left().node();
cont->Negate();
opcode = kMips64Cmp32;
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord32Compare(selector, value, cont);
}
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord64Equal: {
// Combine with comparisons against 0 by simply inverting the
// continuation.
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
value = m.left().node();
cont->Negate();
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord64Compare(selector, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
if (OpParameter<size_t>(value) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* node = value->InputAt(0);
Node* result = node->FindProjection(0);
if (result == NULL || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dadd, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
default:
break;
}
}
}
break;
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kMips64Tst, cont, true);
default:
break;
}
break;
}
// Continuation could not be combined with a compare, emit compare against 0.
EmitWordCompareZero(selector, opcode, value, cont);
}
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
// If we can fall through to the true block, invert the branch.
if (IsNextInAssemblyOrder(tbranch)) {
cont.Negate();
cont.SwapBlocks();
}
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
}
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = node->FindProjection(1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dadd, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kMips64Dadd, &cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = node->FindProjection(1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dsub, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kMips64Dsub, &cont);
}
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
}
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont(kUnorderedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kNoFlags;
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
#include "src/compiler/linkage-impl.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
struct MipsLinkageHelperTraits {
static Register ReturnValueReg() { return v0; }
static Register ReturnValue2Reg() { return v1; }
static Register JSCallFunctionReg() { return a1; }
static Register ContextReg() { return cp; }
static Register RuntimeCallFunctionReg() { return a1; }
static Register RuntimeCallArgCountReg() { return a0; }
static RegList CCalleeSaveRegisters() {
return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
s6.bit() | s7.bit();
}
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
};
typedef LinkageHelper<MipsLinkageHelperTraits> LH;
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
CallDescriptor::Flags flags) {
return LH::GetJSCallDescriptor(zone, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
Operator::Properties properties, Zone* zone) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
CallDescriptor::Flags flags, Zone* zone) {
return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
flags);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -27,7 +27,8 @@
#endif
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
#define V8_TURBOFAN_BACKEND 1
#else
#define V8_TURBOFAN_BACKEND 0
......
......@@ -207,6 +207,42 @@ class CallHelper {
Simulator::CallArgument::End()};
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
}
#elif USE_SIMULATOR && V8_TARGET_ARCH_MIPS64
uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
int64_t p3 = 0, int64_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
return static_cast<uintptr_t>(simulator->Call(f, 4, p1, p2, p3, p4));
}
template <typename R, typename F>
R DoCall(F* f) {
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f)));
}
template <typename R, typename F, typename P1>
R DoCall(F* f, P1 p1) {
return ReturnValueTraits<R>::Cast(
CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
}
template <typename R, typename F, typename P1, typename P2>
R DoCall(F* f, P1 p1, P2 p2) {
return ReturnValueTraits<R>::Cast(
CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
ParameterTraits<P2>::Cast(p2)));
}
template <typename R, typename F, typename P1, typename P2, typename P3>
R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
return ReturnValueTraits<R>::Cast(CallSimulator(
FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
}
template <typename R, typename F, typename P1, typename P2, typename P3,
typename P4>
R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
return ReturnValueTraits<R>::Cast(CallSimulator(
FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
ParameterTraits<P4>::Cast(p4)));
}
#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
int32_t p3 = 0, int32_t p4 = 0) {
......
paul.lind@imgtec.com
gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file
#include "test/unittests/compiler/instruction-selector-unittest.h"
namespace v8 {
namespace internal {
namespace compiler {
namespace {
template <typename T>
struct MachInst {
T constructor;
const char* constructor_name;
ArchOpcode arch_opcode;
MachineType machine_type;
};
template <typename T>
std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
return os << mi.constructor_name;
}
typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
// To avoid duplicated code IntCmp helper structure
// is created. It contains MachInst2 with two nodes and expected_size
// because different cmp instructions have different size.
struct IntCmp {
MachInst2 mi;
uint32_t expected_size;
};
struct FPCmp {
MachInst2 mi;
FlagsCondition cond;
};
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
kMachFloat64},
kUnorderedEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
kMachFloat64},
kUnorderedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMips64CmpD, kMachFloat64},
kUnorderedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
kMips64CmpD, kMachFloat64},
kUnorderedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
kUnorderedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
MachInst1 mi;
MachineType src_machine_type;
};
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
const MachInst2 kLogicalInstructions[] = {
{&RawMachineAssembler::Word32And, "Word32And", kMips64And, kMachInt32},
{&RawMachineAssembler::Word64And, "Word64And", kMips64And, kMachInt64},
{&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or, kMachInt32},
{&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or, kMachInt64},
{&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor, kMachInt32},
{&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor, kMachInt64}};
// ----------------------------------------------------------------------------
// Shift instructions.
// ----------------------------------------------------------------------------
const MachInst2 kShiftInstructions[] = {
{&RawMachineAssembler::Word32Shl, "Word32Shl", kMips64Shl, kMachInt32},
{&RawMachineAssembler::Word64Shl, "Word64Shl", kMips64Dshl, kMachInt64},
{&RawMachineAssembler::Word32Shr, "Word32Shr", kMips64Shr, kMachInt32},
{&RawMachineAssembler::Word64Shr, "Word64Shr", kMips64Dshr, kMachInt64},
{&RawMachineAssembler::Word32Sar, "Word32Sar", kMips64Sar, kMachInt32},
{&RawMachineAssembler::Word64Sar, "Word64Sar", kMips64Dsar, kMachInt64},
{&RawMachineAssembler::Word32Ror, "Word32Ror", kMips64Ror, kMachInt32},
{&RawMachineAssembler::Word64Ror, "Word64Ror", kMips64Dror, kMachInt64}};
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
const MachInst2 kMulDivInstructions[] = {
{&RawMachineAssembler::Int32Mul, "Int32Mul", kMips64Mul, kMachInt32},
{&RawMachineAssembler::Int32Div, "Int32Div", kMips64Div, kMachInt32},
{&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU, kMachUint32},
{&RawMachineAssembler::Int64Mul, "Int64Mul", kMips64Dmul, kMachInt64},
{&RawMachineAssembler::Int64Div, "Int64Div", kMips64Ddiv, kMachInt64},
{&RawMachineAssembler::Uint64Div, "Uint64Div", kMips64DdivU, kMachUint64},
{&RawMachineAssembler::Float64Mul, "Float64Mul", kMips64MulD, kMachFloat64},
{&RawMachineAssembler::Float64Div, "Float64Div", kMips64DivD,
kMachFloat64}};
// ----------------------------------------------------------------------------
// MOD instructions.
// ----------------------------------------------------------------------------
const MachInst2 kModInstructions[] = {
{&RawMachineAssembler::Int32Mod, "Int32Mod", kMips64Mod, kMachInt32},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU, kMachInt32},
{&RawMachineAssembler::Float64Mod, "Float64Mod", kMips64ModD,
kMachFloat64}};
// ----------------------------------------------------------------------------
// Arithmetic FPU instructions.
// ----------------------------------------------------------------------------
const MachInst2 kFPArithInstructions[] = {
{&RawMachineAssembler::Float64Add, "Float64Add", kMips64AddD, kMachFloat64},
{&RawMachineAssembler::Float64Sub, "Float64Sub", kMips64SubD,
kMachFloat64}};
// ----------------------------------------------------------------------------
// IntArithTest instructions, two nodes.
// ----------------------------------------------------------------------------
const MachInst2 kAddSubInstructions[] = {
{&RawMachineAssembler::Int32Add, "Int32Add", kMips64Add, kMachInt32},
{&RawMachineAssembler::Int64Add, "Int64Add", kMips64Dadd, kMachInt64},
{&RawMachineAssembler::Int32Sub, "Int32Sub", kMips64Sub, kMachInt32},
{&RawMachineAssembler::Int64Sub, "Int64Sub", kMips64Dsub, kMachInt64}};
// ----------------------------------------------------------------------------
// IntArithTest instructions, one node.
// ----------------------------------------------------------------------------
const MachInst1 kAddSubOneInstructions[] = {
{&RawMachineAssembler::Int32Neg, "Int32Neg", kMips64Sub, kMachInt32},
{&RawMachineAssembler::Int64Neg, "Int64Neg", kMips64Dsub, kMachInt64}};
// ----------------------------------------------------------------------------
// Arithmetic compare instructions.
// ----------------------------------------------------------------------------
const IntCmp kCmpInstructions[] = {
{{&RawMachineAssembler::WordEqual, "WordEqual", kMips64Cmp, kMachInt64},
1U},
{{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMips64Cmp,
kMachInt64},
1U},
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp32,
kMachInt32},
1U},
{{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp32,
kMachInt32},
1U},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp32,
kMachInt32},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kMips64Cmp32, kMachInt32},
1U},
{{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp32,
kMachInt32},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
kMips64Cmp32, kMachInt32},
1U},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp32,
kMachUint32},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kMips64Cmp32, kMachUint32},
1U}};
// ----------------------------------------------------------------------------
// Conversion instructions.
// ----------------------------------------------------------------------------
const Conversion kConversionInstructions[] = {
// Conversion instructions are related to machine_operator.h:
// FPU conversions:
// Convert representation of integers between float64 and int32/uint32.
// The precise rounding mode and handling of out of range inputs are *not*
// defined for these operators, since they are intended only for use with
// integers.
// mips instructions:
// mtc1, cvt.d.w
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
kMips64CvtDW, kMachFloat64},
kMachInt32},
// mips instructions:
// cvt.d.uw
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
kMips64CvtDUw, kMachFloat64},
kMachInt32},
// mips instructions:
// mfc1, trunc double to word, for more details look at mips macro
// asm and mips asm file
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
kMips64TruncWD, kMachFloat64},
kMachInt32},
// mips instructions:
// trunc double to unsigned word, for more details look at mips macro
// asm and mips asm file
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
kMips64TruncUwD, kMachFloat64},
kMachInt32}};
} // namespace
typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
::testing::ValuesIn(kFPCmpInstructions));
// ----------------------------------------------------------------------------
// Arithmetic compare instructions integers
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
TEST_P(InstructionSelectorCmpTest, Parameter) {
const IntCmp cmp = GetParam();
const MachineType type = cmp.mi.machine_type;
StreamBuilder m(this, type, type, type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(cmp.expected_size, s.size());
EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
::testing::ValuesIn(kCmpInstructions));
// ----------------------------------------------------------------------------
// Shift instructions.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst2>
InstructionSelectorShiftTest;
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
::testing::ValuesIn(kShiftInstructions));
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst2>
InstructionSelectorLogicalTest;
TEST_P(InstructionSelectorLogicalTest, Parameter) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
::testing::ValuesIn(kLogicalInstructions));
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst2>
InstructionSelectorMulDivTest;
TEST_P(InstructionSelectorMulDivTest, Parameter) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
::testing::ValuesIn(kMulDivInstructions));
// ----------------------------------------------------------------------------
// MOD instructions.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
TEST_P(InstructionSelectorModTest, Parameter) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
::testing::ValuesIn(kModInstructions));
// ----------------------------------------------------------------------------
// Floating point instructions.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst2>
InstructionSelectorFPArithTest;
TEST_P(InstructionSelectorFPArithTest, Parameter) {
const MachInst2 fpa = GetParam();
StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
::testing::ValuesIn(kFPArithInstructions));
// ----------------------------------------------------------------------------
// Integer arithmetic
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst2>
InstructionSelectorIntArithTwoTest;
TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
const MachInst2 intpa = GetParam();
StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
intpa.machine_type);
m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorIntArithTwoTest,
::testing::ValuesIn(kAddSubInstructions));
// ----------------------------------------------------------------------------
// One node.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MachInst1>
InstructionSelectorIntArithOneTest;
TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
const MachInst1 intpa = GetParam();
StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
intpa.machine_type);
m.Return((m.*intpa.constructor)(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorIntArithOneTest,
::testing::ValuesIn(kAddSubOneInstructions));
// ----------------------------------------------------------------------------
// Conversions.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<Conversion>
InstructionSelectorConversionTest;
TEST_P(InstructionSelectorConversionTest, Parameter) {
const Conversion conv = GetParam();
StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
// ----------------------------------------------------------------------------
// Loads and stores.
// ----------------------------------------------------------------------------
namespace {
struct MemoryAccess {
MachineType type;
ArchOpcode load_opcode;
ArchOpcode store_opcode;
};
static const MemoryAccess kMemoryAccesses[] = {
{kMachInt8, kMips64Lb, kMips64Sb},
{kMachUint8, kMips64Lbu, kMips64Sb},
{kMachInt16, kMips64Lh, kMips64Sh},
{kMachUint16, kMips64Lhu, kMips64Sh},
{kMachInt32, kMips64Lw, kMips64Sw},
{kRepFloat32, kMips64Lwc1, kMips64Swc1},
{kRepFloat64, kMips64Ldc1, kMips64Sdc1},
{kMachInt64, kMips64Ld, kMips64Sd}};
struct MemoryAccessImm {
MachineType type;
ArchOpcode load_opcode;
ArchOpcode store_opcode;
bool (InstructionSelectorTest::Stream::*val_predicate)(
const InstructionOperand*) const;
const int32_t immediates[40];
};
std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
return os << acc.type;
}
struct MemoryAccessImm1 {
MachineType type;
ArchOpcode load_opcode;
ArchOpcode store_opcode;
bool (InstructionSelectorTest::Stream::*val_predicate)(
const InstructionOperand*) const;
const int32_t immediates[5];
};
std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
return os << acc.type;
}
// ----------------------------------------------------------------------------
// Loads and stores immediate values
// ----------------------------------------------------------------------------
const MemoryAccessImm kMemoryAccessesImm[] = {
{kMachInt8,
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachUint8,
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachInt16,
kMips64Lh,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachUint16,
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachInt32,
kMips64Lw,
kMips64Sw,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachFloat32,
kMips64Lwc1,
kMips64Swc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachFloat64,
kMips64Ldc1,
kMips64Sdc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachInt64,
kMips64Ld,
kMips64Sd,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
{kMachInt8,
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
{kMachInt8,
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
{kMachInt16,
kMips64Lh,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
{kMachInt16,
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
{kMachInt32,
kMips64Lw,
kMips64Sw,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
{kMachFloat32,
kMips64Lwc1,
kMips64Swc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
{kMachFloat64,
kMips64Ldc1,
kMips64Sdc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
{kMachInt64,
kMips64Ld,
kMips64Sd,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}}};
} // namespace
typedef InstructionSelectorTestWithParam<MemoryAccess>
InstructionSelectorMemoryAccessTest;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
m.Return(m.Load(memacc.type, m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
// ----------------------------------------------------------------------------
// Load immediate.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MemoryAccessImm>
InstructionSelectorMemoryAccessImmTest;
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, memacc.type, kMachPtr);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
}
}
// ----------------------------------------------------------------------------
// Store immediate.
// ----------------------------------------------------------------------------
TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmTest,
::testing::ValuesIn(kMemoryAccessesImm));
// ----------------------------------------------------------------------------
// Load/store offsets more than 16 bits.
// ----------------------------------------------------------------------------
typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
InstructionSelectorMemoryAccessImmMoreThan16bitTest;
TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
LoadWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, memacc.type, kMachPtr);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
// kMips64Dadd is expected opcode
// size more than 16 bits wide
EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
StoreWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
// kMips64Add is expected opcode
// size more than 16 bits wide
EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmMoreThan16bitTest,
::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
// ----------------------------------------------------------------------------
// kMips64Cmp with zero testing.
// ----------------------------------------------------------------------------
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
StreamBuilder m(this, kMachInt32, kMachInt32);
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
StreamBuilder m(this, kMachInt32, kMachInt32);
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
}
TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
{
StreamBuilder m(this, kMachInt64, kMachInt64);
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
StreamBuilder m(this, kMachInt64, kMachInt64);
m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -87,6 +87,11 @@
'compiler/mips/instruction-selector-mips-unittest.cc',
],
}],
['v8_target_arch=="mips64el"', {
'sources': [ ### gcmole(arch:mips64el) ###
'compiler/mips64/instruction-selector-mips64-unittest.cc',
],
}],
['v8_target_arch=="x64"', {
'sources': [ ### gcmole(arch:x64) ###
'compiler/x64/instruction-selector-x64-unittest.cc',
......
......@@ -1116,6 +1116,10 @@
'../../src/mips64/regexp-macro-assembler-mips64.cc',
'../../src/mips64/regexp-macro-assembler-mips64.h',
'../../src/mips64/simulator-mips64.cc',
'../../src/compiler/mips64/code-generator-mips64.cc',
'../../src/compiler/mips64/instruction-codes-mips64.h',
'../../src/compiler/mips64/instruction-selector-mips64.cc',
'../../src/compiler/mips64/linkage-mips64.cc',
'../../src/ic/mips64/access-compiler-mips64.cc',
'../../src/ic/mips64/handler-compiler-mips64.cc',
'../../src/ic/mips64/ic-mips64.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment