Commit 129ef0a1 authored by Zhao Jiazhong's avatar Zhao Jiazhong Committed by V8 LUCI CQ

[mips64][loong64][codegen] Sign-extend uint32 values to 64-bit

Due to MIPS64 ISA feature, 32-bit values should be sign-extended
in 64-bit registers, no matter it's signed or unsigned.

Besides, LoongArch64 also has this feature, and a similar change
has been made before loong64 port's land in V8. This CL also make
a small fix for loong64.

Change-Id: Ib284662931082365f727925af61781e3653debc8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3193595Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Liu yu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#77154}
parent 6e6385a0
...@@ -1355,37 +1355,21 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { ...@@ -1355,37 +1355,21 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
} }
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// On LoongArch64, int32 values should all be sign-extended to 64-bit, so
// no need to sign-extend them here.
// But when call to a host function in simulator, if the function return an
// int32 value, the simulator do not sign-extend to int64, because in
// simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
Node* value = node->InputAt(0); Node* value = node->InputAt(0);
if ((value->opcode() == IrOpcode::kLoad || if (value->opcode() == IrOpcode::kCall) {
value->opcode() == IrOpcode::kLoadImmutable) &&
CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
break;
case MachineRepresentation::kWord32:
opcode = kLoong64Ld_w;
break;
default:
UNREACHABLE();
}
EmitLoad(this, value, opcode, node);
} else {
Loong64OperandGenerator g(this); Loong64OperandGenerator g(this);
Emit(kLoong64Sll_w, g.DefineAsRegister(node), Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
g.UseRegister(node->InputAt(0)), g.TempImmediate(0)); g.TempImmediate(0));
return;
} }
#else
EmitIdentity(node);
#endif #endif
EmitIdentity(node);
} }
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
......
...@@ -1032,14 +1032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1032,14 +1032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
case kMips64And32: case kMips64And32:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break; break;
case kMips64Or: case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break; break;
case kMips64Or32: case kMips64Or32:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break; break;
case kMips64Nor: case kMips64Nor:
if (instr->InputAt(1)->IsRegister()) { if (instr->InputAt(1)->IsRegister()) {
...@@ -1052,11 +1050,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1052,11 +1050,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Nor32: case kMips64Nor32:
if (instr->InputAt(1)->IsRegister()) { if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
} else { } else {
DCHECK_EQ(0, i.InputOperand(1).immediate()); DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
} }
break; break;
case kMips64Xor: case kMips64Xor:
...@@ -1103,23 +1099,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1103,23 +1099,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
case kMips64Shr: case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) { if (instr->InputAt(1)->IsRegister()) {
__ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else { } else {
int64_t imm = i.InputOperand(1).immediate(); int64_t imm = i.InputOperand(1).immediate();
__ sll(i.OutputRegister(), i.InputRegister(0), 0x0); __ srl(i.OutputRegister(), i.InputRegister(0),
__ srl(i.OutputRegister(), i.OutputRegister(),
static_cast<uint16_t>(imm)); static_cast<uint16_t>(imm));
} }
break; break;
case kMips64Sar: case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) { if (instr->InputAt(1)->IsRegister()) {
__ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else { } else {
int64_t imm = i.InputOperand(1).immediate(); int64_t imm = i.InputOperand(1).immediate();
__ sll(i.OutputRegister(), i.InputRegister(0), 0x0); __ sra(i.OutputRegister(), i.InputRegister(0),
__ sra(i.OutputRegister(), i.OutputRegister(),
static_cast<uint16_t>(imm)); static_cast<uint16_t>(imm));
} }
break; break;
......
...@@ -311,14 +311,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node, ...@@ -311,14 +311,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode); inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
} }
if (cont->IsDeoptimize()) {
// If we can deoptimize as a result of the binop, we need to make sure that
// the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
outputs[output_count++] = g.DefineSameAsFirst(node);
} else {
outputs[output_count++] = g.DefineAsRegister(node); outputs[output_count++] = g.DefineAsRegister(node);
}
DCHECK_NE(0u, input_count); DCHECK_NE(0u, input_count);
DCHECK_EQ(1u, output_count); DCHECK_EQ(1u, output_count);
...@@ -498,7 +491,7 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -498,7 +491,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh; opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break; break;
case MachineRepresentation::kWord32: case MachineRepresentation::kWord32:
opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw; opcode = kMips64Lw;
break; break;
case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through.
...@@ -854,7 +847,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) { ...@@ -854,7 +847,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) { m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper // There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway. // 32 bits anyway.
Emit(kMips64Dshl, g.DefineSameAsFirst(node), Emit(kMips64Dshl, g.DefineAsRegister(node),
g.UseRegister(m.left().node()->InputAt(0)), g.UseRegister(m.left().node()->InputAt(0)),
g.UseImmediate(m.right().node())); g.UseImmediate(m.right().node()));
return; return;
...@@ -1446,44 +1439,49 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { ...@@ -1446,44 +1439,49 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
} }
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// On MIPS64, int32 values should all be sign-extended to 64-bit, so
// no need to sign-extend them here.
// But when call to a host function in simulator, if the function return an
// int32 value, the simulator do not sign-extend to int64, because in
// simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0); Node* value = node->InputAt(0);
if ((value->opcode() == IrOpcode::kLoad || if (value->opcode() == IrOpcode::kCall) {
value->opcode() == IrOpcode::kLoadImmutable) &&
CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
default:
UNREACHABLE();
}
EmitLoad(this, value, opcode, node);
} else {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0)); g.TempImmediate(0));
return;
} }
#endif
EmitIdentity(node);
} }
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
DCHECK_NE(node->opcode(), IrOpcode::kPhi); DCHECK_NE(node->opcode(), IrOpcode::kPhi);
switch (node->opcode()) { switch (node->opcode()) {
// 32-bit operations will write their result in a 64 bit register, // Comparisons only emit 0/1, so the upper 32 bits must be zero.
// clearing the top 32 bits of the destination register. case IrOpcode::kWord32Equal:
case IrOpcode::kUint32Div: case IrOpcode::kInt32LessThan:
case IrOpcode::kUint32Mod: case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kUint32MulHigh: case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
return true; return true;
case IrOpcode::kWord32And: {
Int32BinopMatcher m(node);
if (m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
return is_uint31(mask);
}
return false;
}
case IrOpcode::kWord32Shr: {
Int32BinopMatcher m(node);
if (m.right().HasResolvedValue()) {
uint8_t sa = m.right().ResolvedValue() & 0x1f;
return sa > 0;
}
return false;
}
case IrOpcode::kLoad: case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable: { case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op()); LoadRepresentation load_rep = LoadRepresentationOf(node->op());
...@@ -1491,7 +1489,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { ...@@ -1491,7 +1489,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) { switch (load_rep.representation()) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16: case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
return true; return true;
default: default:
return false; return false;
...@@ -1507,10 +1504,24 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { ...@@ -1507,10 +1504,24 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
Node* value = node->InputAt(0); Node* value = node->InputAt(0);
IrOpcode::Value opcode = value->opcode();
if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
ArchOpcode arch_opcode =
opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
if (load_rep.IsUnsigned() &&
load_rep.representation() == MachineRepresentation::kWord32) {
EmitLoad(this, value, arch_opcode, node);
return;
}
}
if (ZeroExtendsWord32ToWord64(value)) { if (ZeroExtendsWord32ToWord64(value)) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); EmitIdentity(node);
return; return;
} }
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32)); g.TempImmediate(0), g.TempImmediate(32));
} }
...@@ -1528,7 +1539,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ...@@ -1528,7 +1539,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Int64BinopMatcher m(value); Int64BinopMatcher m(value);
if (m.right().IsInRange(32, 63)) { if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence. // After smi untagging no need for truncate. Combine sequence.
Emit(kMips64Dsar, g.DefineSameAsFirst(node), Emit(kMips64Dsar, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node())); g.UseImmediate(m.right().node()));
return; return;
...@@ -1540,8 +1551,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ...@@ -1540,8 +1551,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break; break;
} }
} }
Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32)); g.TempImmediate(0));
} }
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
...@@ -1836,7 +1847,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { ...@@ -1836,7 +1847,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh; opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break; break;
case MachineRepresentation::kWord32: case MachineRepresentation::kWord32:
opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw; opcode = kMips64Ulw;
break; break;
case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through.
......
...@@ -382,6 +382,10 @@ ...@@ -382,6 +382,10 @@
'test-serialize/StartupSerializerTwice': [SKIP], 'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP], 'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP], 'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
# The uint32 values are sign-extended on MIPS64.
'test-run-load-store/RunLoadStoreZeroExtend64': [SKIP],
'test-run-load-store/RunUnalignedLoadStoreZeroExtend64': [SKIP],
}], # 'arch == mips64el or arch == mips64' }], # 'arch == mips64el or arch == mips64'
############################################################################## ##############################################################################
......
...@@ -235,12 +235,17 @@ const Conversion kConversionInstructions[] = { ...@@ -235,12 +235,17 @@ const Conversion kConversionInstructions[] = {
// LOONG64 instructions that clear the top 32 bits of the destination. // LOONG64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = { const MachInst2 kCanElideChangeUint32ToUint64[] = {
{&RawMachineAssembler::Uint32Div, "Uint32Div", kLoong64Div_wu, {&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
MachineType::Uint32()}, MachineType::Uint32()},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod", kLoong64Mod_wu, {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kLoong64Cmp, MachineType::Uint32()},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
MachineType::Uint32()}, MachineType::Uint32()},
{&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kLoong64Mulh_wu, {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
MachineType::Uint32()}}; kLoong64Cmp, MachineType::Uint32()},
};
} // namespace } // namespace
...@@ -991,13 +996,10 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) { ...@@ -991,13 +996,10 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
(m.*binop.constructor)(m.Parameter(0), m.Parameter(1)))); (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build(); Stream s = m.Build();
// Make sure the `ChangeUint32ToUint64` node turned into a no-op. // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
ASSERT_EQ(2U, s.size()); ASSERT_EQ(1U, s.size());
EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode()); EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kLoong64Bstrpick_d, s[1]->arch_opcode());
EXPECT_EQ(3U, s[1]->InputCount());
EXPECT_EQ(1U, s[1]->OutputCount());
} }
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
......
...@@ -289,12 +289,17 @@ const Conversion kFloat32RoundInstructions[] = { ...@@ -289,12 +289,17 @@ const Conversion kFloat32RoundInstructions[] = {
// MIPS64 instructions that clear the top 32 bits of the destination. // MIPS64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = { const MachInst2 kCanElideChangeUint32ToUint64[] = {
{&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU, {&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp,
MachineType::Uint32()}, MachineType::Uint32()},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU, {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kMips64Cmp, MachineType::Uint32()},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp,
MachineType::Uint32()}, MachineType::Uint32()},
{&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kMips64MulHighU, {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
MachineType::Uint32()}}; kMips64Cmp, MachineType::Uint32()},
};
} // namespace } // namespace
...@@ -1159,10 +1164,22 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) { ...@@ -1159,10 +1164,22 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
(m.*binop.constructor)(m.Parameter(0), m.Parameter(1)))); (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build(); Stream s = m.Build();
// Make sure the `ChangeUint32ToUint64` node turned into a no-op. // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
if (FLAG_debug_code && binop.arch_opcode == kMips64Cmp) {
ASSERT_EQ(6U, s.size());
EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMips64Dshl, s[1]->arch_opcode());
EXPECT_EQ(kMips64Dshl, s[2]->arch_opcode());
EXPECT_EQ(kMips64Cmp, s[3]->arch_opcode());
EXPECT_EQ(kMips64AssertEqual, s[4]->arch_opcode());
EXPECT_EQ(kMips64Cmp, s[5]->arch_opcode());
EXPECT_EQ(2U, s[5]->InputCount());
EXPECT_EQ(1U, s[5]->OutputCount());
} else {
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode()); EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(1U, s[0]->OutputCount());
}
} }
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment