Commit 27c56efb authored by jyan's avatar jyan Committed by Commit bot

S390: Decouple TF Operator kS390_And/Or/Xor/Not to 32/64

Separate 32 and 64 And/Or/Xor/Not Operation

R=joransiu@ca.ibm.com, michael_dawson@ca.ibm.com, mbrandy@us.ibm.com, bjaideep@ca.ibm.com
BUG=

Review-Url: https://codereview.chromium.org/2216883003
Cr-Commit-Position: refs/heads/master@{#38397}
parent b5fc36ee
...@@ -279,15 +279,15 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { ...@@ -279,15 +279,15 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
} while (0) } while (0)
#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \ #define ASSEMBLE_BINOP(asm_instr) \
do { \ do { \
if (HasRegisterInput(instr, 1)) { \ if (HasRegisterInput(instr, 1)) { \
__ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \ __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
i.InputRegister(1)); \ i.InputRegister(1)); \
} else { \ } else { \
__ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \ __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
i.InputImmediate(1)); \ i.InputImmediate(1)); \
} \ } \
} while (0) } while (0)
#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm) \ #define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm) \
...@@ -986,14 +986,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -986,14 +986,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset())); Operand(offset.offset()));
break; break;
} }
case kS390_And: case kS390_And32:
ASSEMBLE_BINOP(AndP, AndP); ASSEMBLE_BINOP(And);
break;
case kS390_And64:
ASSEMBLE_BINOP(AndP);
break; break;
case kS390_Or: case kS390_Or32:
ASSEMBLE_BINOP(OrP, OrP); ASSEMBLE_BINOP(Or);
case kS390_Or64:
ASSEMBLE_BINOP(OrP);
break; break;
case kS390_Xor: case kS390_Xor32:
ASSEMBLE_BINOP(XorP, XorP); ASSEMBLE_BINOP(Xor);
break;
case kS390_Xor64:
ASSEMBLE_BINOP(XorP);
break; break;
case kS390_ShiftLeft32: case kS390_ShiftLeft32:
if (HasRegisterInput(instr, 1)) { if (HasRegisterInput(instr, 1)) {
...@@ -1002,16 +1010,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1002,16 +1010,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadRR(kScratchReg, i.InputRegister(1)); __ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg); __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else { } else {
ASSEMBLE_BINOP(ShiftLeft, ShiftLeft); ASSEMBLE_BINOP(ShiftLeft);
} }
} else { } else {
ASSEMBLE_BINOP(ShiftLeft, ShiftLeft); ASSEMBLE_BINOP(ShiftLeft);
} }
__ LoadlW(i.OutputRegister(0), i.OutputRegister(0)); __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break; break;
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64: case kS390_ShiftLeft64:
ASSEMBLE_BINOP(sllg, sllg); ASSEMBLE_BINOP(sllg);
break; break;
#endif #endif
case kS390_ShiftRight32: case kS390_ShiftRight32:
...@@ -1021,16 +1029,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1021,16 +1029,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadRR(kScratchReg, i.InputRegister(1)); __ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg); __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else { } else {
ASSEMBLE_BINOP(ShiftRight, ShiftRight); ASSEMBLE_BINOP(ShiftRight);
} }
} else { } else {
ASSEMBLE_BINOP(ShiftRight, ShiftRight); ASSEMBLE_BINOP(ShiftRight);
} }
__ LoadlW(i.OutputRegister(0), i.OutputRegister(0)); __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break; break;
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64: case kS390_ShiftRight64:
ASSEMBLE_BINOP(srlg, srlg); ASSEMBLE_BINOP(srlg);
break; break;
#endif #endif
case kS390_ShiftRightArith32: case kS390_ShiftRightArith32:
...@@ -1041,16 +1049,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1041,16 +1049,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ShiftRightArith(i.OutputRegister(), i.InputRegister(0), __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
kScratchReg); kScratchReg);
} else { } else {
ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith); ASSEMBLE_BINOP(ShiftRightArith);
} }
} else { } else {
ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith); ASSEMBLE_BINOP(ShiftRightArith);
} }
__ LoadlW(i.OutputRegister(), i.OutputRegister()); __ LoadlW(i.OutputRegister(), i.OutputRegister());
break; break;
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
case kS390_ShiftRightArith64: case kS390_ShiftRightArith64:
ASSEMBLE_BINOP(srag, srag); ASSEMBLE_BINOP(srag);
break; break;
#endif #endif
#if !V8_TARGET_ARCH_S390X #if !V8_TARGET_ARCH_S390X
...@@ -1141,9 +1149,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1141,9 +1149,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
#endif #endif
case kS390_Not: case kS390_Not32:
__ LoadRR(i.OutputRegister(), i.InputRegister(0)); __ Not32(i.OutputRegister(), i.InputRegister(0));
__ NotP(i.OutputRegister()); break;
case kS390_Not64:
__ Not64(i.OutputRegister(), i.InputRegister(0));
break; break;
case kS390_RotLeftAndMask32: case kS390_RotLeftAndMask32:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
...@@ -1205,7 +1215,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1205,7 +1215,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ADD_WITH_OVERFLOW(); ASSEMBLE_ADD_WITH_OVERFLOW();
} else { } else {
#endif #endif
ASSEMBLE_BINOP(AddP, AddP); ASSEMBLE_BINOP(AddP);
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
} }
#endif #endif
...@@ -1239,7 +1249,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1239,7 +1249,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SUB_WITH_OVERFLOW(); ASSEMBLE_SUB_WITH_OVERFLOW();
} else { } else {
#endif #endif
ASSEMBLE_BINOP(SubP, SubP); ASSEMBLE_BINOP(SubP);
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
} }
#endif #endif
......
...@@ -12,9 +12,12 @@ namespace compiler { ...@@ -12,9 +12,12 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit. // S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction. // Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \ #define TARGET_ARCH_OPCODE_LIST(V) \
V(S390_And) \ V(S390_And32) \
V(S390_Or) \ V(S390_And64) \
V(S390_Xor) \ V(S390_Or32) \
V(S390_Or64) \
V(S390_Xor32) \
V(S390_Xor64) \
V(S390_ShiftLeft32) \ V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \ V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \ V(S390_ShiftLeftPair) \
...@@ -26,7 +29,8 @@ namespace compiler { ...@@ -26,7 +29,8 @@ namespace compiler {
V(S390_ShiftRightArithPair) \ V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \ V(S390_RotRight32) \
V(S390_RotRight64) \ V(S390_RotRight64) \
V(S390_Not) \ V(S390_Not32) \
V(S390_Not64) \
V(S390_RotLeftAndMask32) \ V(S390_RotLeftAndMask32) \
V(S390_RotLeftAndClear64) \ V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \ V(S390_RotLeftAndClearLeft64) \
......
...@@ -13,9 +13,12 @@ bool InstructionScheduler::SchedulerSupported() { return true; } ...@@ -13,9 +13,12 @@ bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags( int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const { const Instruction* instr) const {
switch (instr->arch_opcode()) { switch (instr->arch_opcode()) {
case kS390_And: case kS390_And32:
case kS390_Or: case kS390_And64:
case kS390_Xor: case kS390_Or32:
case kS390_Or64:
case kS390_Xor32:
case kS390_Xor64:
case kS390_ShiftLeft32: case kS390_ShiftLeft32:
case kS390_ShiftLeft64: case kS390_ShiftLeft64:
case kS390_ShiftLeftPair: case kS390_ShiftLeftPair:
...@@ -27,7 +30,8 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -27,7 +30,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_ShiftRightArithPair: case kS390_ShiftRightArithPair:
case kS390_RotRight32: case kS390_RotRight32:
case kS390_RotRight64: case kS390_RotRight64:
case kS390_Not: case kS390_Not32:
case kS390_Not64:
case kS390_RotLeftAndMask32: case kS390_RotLeftAndMask32:
case kS390_RotLeftAndClear64: case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64: case kS390_RotLeftAndClearLeft64:
......
...@@ -477,7 +477,7 @@ void InstructionSelector::VisitWord32And(Node* node) { ...@@ -477,7 +477,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
return; return;
} }
} }
VisitBinop<Int32BinopMatcher>(this, node, kS390_And, kInt16Imm_Unsigned); VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kInt16Imm_Unsigned);
} }
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
...@@ -529,19 +529,19 @@ void InstructionSelector::VisitWord64And(Node* node) { ...@@ -529,19 +529,19 @@ void InstructionSelector::VisitWord64And(Node* node) {
} }
} }
} }
VisitBinop<Int64BinopMatcher>(this, node, kS390_And, kInt16Imm_Unsigned); VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kInt16Imm_Unsigned);
} }
#endif #endif
void InstructionSelector::VisitWord32Or(Node* node) { void InstructionSelector::VisitWord32Or(Node* node) {
Int32BinopMatcher m(node); Int32BinopMatcher m(node);
VisitBinop<Int32BinopMatcher>(this, node, kS390_Or, kInt16Imm_Unsigned); VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kInt16Imm_Unsigned);
} }
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Or(Node* node) { void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node); Int64BinopMatcher m(node);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Or, kInt16Imm_Unsigned); VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kInt16Imm_Unsigned);
} }
#endif #endif
...@@ -549,9 +549,9 @@ void InstructionSelector::VisitWord32Xor(Node* node) { ...@@ -549,9 +549,9 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
S390OperandGenerator g(this); S390OperandGenerator g(this);
Int32BinopMatcher m(node); Int32BinopMatcher m(node);
if (m.right().Is(-1)) { if (m.right().Is(-1)) {
Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node())); Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else { } else {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned); VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kInt16Imm_Unsigned);
} }
} }
...@@ -560,9 +560,9 @@ void InstructionSelector::VisitWord64Xor(Node* node) { ...@@ -560,9 +560,9 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
S390OperandGenerator g(this); S390OperandGenerator g(this);
Int64BinopMatcher m(node); Int64BinopMatcher m(node);
if (m.right().Is(-1)) { if (m.right().Is(-1)) {
Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node())); Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else { } else {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned); VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kInt16Imm_Unsigned);
} }
} }
#endif #endif
......
...@@ -4585,12 +4585,22 @@ void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) { ...@@ -4585,12 +4585,22 @@ void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
XorP(dst, opnd); XorP(dst, opnd);
} }
void MacroAssembler::NotP(Register dst) { void MacroAssembler::Not32(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
xilf(dst, Operand(0xFFFFFFFF));
}
void MacroAssembler::Not64(Register dst, Register src) {
if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF)); xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF)); xilf(dst, Operand(0xFFFFFFFF));
}
void MacroAssembler::NotP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
Not64(dst, src);
#else #else
XorP(dst, Operand(0xFFFFFFFF)); Not32(dst, src);
#endif #endif
} }
......
...@@ -406,13 +406,14 @@ class MacroAssembler : public Assembler { ...@@ -406,13 +406,14 @@ class MacroAssembler : public Assembler {
void Xor(Register dst, Register src, const Operand& opnd); void Xor(Register dst, Register src, const Operand& opnd);
void XorP(Register dst, Register src, const Operand& opnd); void XorP(Register dst, Register src, const Operand& opnd);
void Popcnt32(Register dst, Register src); void Popcnt32(Register dst, Register src);
void Not32(Register dst, Register src = no_reg);
void Not64(Register dst, Register src = no_reg);
void NotP(Register dst, Register src = no_reg);
#ifdef V8_TARGET_ARCH_S390X #ifdef V8_TARGET_ARCH_S390X
void Popcnt64(Register dst, Register src); void Popcnt64(Register dst, Register src);
#endif #endif
void NotP(Register dst);
void mov(Register dst, const Operand& src); void mov(Register dst, const Operand& src);
void CleanUInt32(Register x) { void CleanUInt32(Register x) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment