Commit 129ef0a1 authored by Zhao Jiazhong's avatar Zhao Jiazhong Committed by V8 LUCI CQ

[mips64][loong64][codegen] Sign-extend uint32 values to 64-bit

Due to MIPS64 ISA feature, 32-bit values should be sign-extended
in 64-bit registers, no matter it's signed or unsigned.

Besides, LoongArch64 also has this feature, and a similar change
has been made before loong64 port's land in V8. This CL also make
a small fix for loong64.

Change-Id: Ib284662931082365f727925af61781e3653debc8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3193595Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Liu yu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#77154}
parent 6e6385a0
......@@ -1355,37 +1355,21 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// On LoongArch64, int32 values should all be sign-extended to 64-bit, so
// no need to sign-extend them here.
// But when call to a host function in simulator, if the function return an
// int32 value, the simulator do not sign-extend to int64, because in
// simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
if ((value->opcode() == IrOpcode::kLoad ||
value->opcode() == IrOpcode::kLoadImmutable) &&
CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
break;
case MachineRepresentation::kWord32:
opcode = kLoong64Ld_w;
break;
default:
UNREACHABLE();
}
EmitLoad(this, value, opcode, node);
} else {
if (value->opcode() == IrOpcode::kCall) {
Loong64OperandGenerator g(this);
Emit(kLoong64Sll_w, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.TempImmediate(0));
Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
return;
}
#else
EmitIdentity(node);
#endif
EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
......
......@@ -1032,14 +1032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64And32:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Or32:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Nor:
if (instr->InputAt(1)->IsRegister()) {
......@@ -1052,11 +1050,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Nor32:
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
} else {
DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
__ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
}
break;
case kMips64Xor:
......@@ -1103,23 +1099,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
__ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
__ srl(i.OutputRegister(), i.OutputRegister(),
__ srl(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
__ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
__ sra(i.OutputRegister(), i.OutputRegister(),
__ sra(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
......
......@@ -311,14 +311,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
}
if (cont->IsDeoptimize()) {
// If we can deoptimize as a result of the binop, we need to make sure that
// the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
outputs[output_count++] = g.DefineSameAsFirst(node);
} else {
outputs[output_count++] = g.DefineAsRegister(node);
}
outputs[output_count++] = g.DefineAsRegister(node);
DCHECK_NE(0u, input_count);
DCHECK_EQ(1u, output_count);
......@@ -498,7 +491,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
opcode = kMips64Lw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
......@@ -854,7 +847,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kMips64Dshl, g.DefineSameAsFirst(node),
Emit(kMips64Dshl, g.DefineAsRegister(node),
g.UseRegister(m.left().node()->InputAt(0)),
g.UseImmediate(m.right().node()));
return;
......@@ -1446,44 +1439,49 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// On MIPS64, int32 values should all be sign-extended to 64-bit, so
// no need to sign-extend them here.
// But when call to a host function in simulator, if the function return an
// int32 value, the simulator do not sign-extend to int64, because in
// simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
if ((value->opcode() == IrOpcode::kLoad ||
value->opcode() == IrOpcode::kLoadImmutable) &&
CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
default:
UNREACHABLE();
}
EmitLoad(this, value, opcode, node);
} else {
if (value->opcode() == IrOpcode::kCall) {
Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
return;
}
#endif
EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
DCHECK_NE(node->opcode(), IrOpcode::kPhi);
switch (node->opcode()) {
// 32-bit operations will write their result in a 64 bit register,
// clearing the top 32 bits of the destination register.
case IrOpcode::kUint32Div:
case IrOpcode::kUint32Mod:
case IrOpcode::kUint32MulHigh:
// Comparisons only emit 0/1, so the upper 32 bits must be zero.
case IrOpcode::kWord32Equal:
case IrOpcode::kInt32LessThan:
case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
return true;
case IrOpcode::kWord32And: {
Int32BinopMatcher m(node);
if (m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
return is_uint31(mask);
}
return false;
}
case IrOpcode::kWord32Shr: {
Int32BinopMatcher m(node);
if (m.right().HasResolvedValue()) {
uint8_t sa = m.right().ResolvedValue() & 0x1f;
return sa > 0;
}
return false;
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
......@@ -1491,7 +1489,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
return true;
default:
return false;
......@@ -1507,10 +1504,24 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
IrOpcode::Value opcode = value->opcode();
if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
ArchOpcode arch_opcode =
opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
if (load_rep.IsUnsigned() &&
load_rep.representation() == MachineRepresentation::kWord32) {
EmitLoad(this, value, arch_opcode, node);
return;
}
}
if (ZeroExtendsWord32ToWord64(value)) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
EmitIdentity(node);
return;
}
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
......@@ -1528,7 +1539,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Int64BinopMatcher m(value);
if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence.
Emit(kMips64Dsar, g.DefineSameAsFirst(node),
Emit(kMips64Dsar, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
......@@ -1540,8 +1551,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0));
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
......@@ -1836,7 +1847,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
case MachineRepresentation::kWord32:
opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
opcode = kMips64Ulw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
......
......@@ -382,6 +382,10 @@
'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
# The uint32 values are sign-extended on MIPS64.
'test-run-load-store/RunLoadStoreZeroExtend64': [SKIP],
'test-run-load-store/RunUnalignedLoadStoreZeroExtend64': [SKIP],
}], # 'arch == mips64el or arch == mips64'
##############################################################################
......
......@@ -235,12 +235,17 @@ const Conversion kConversionInstructions[] = {
// LOONG64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
{&RawMachineAssembler::Uint32Div, "Uint32Div", kLoong64Div_wu,
{&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod", kLoong64Mod_wu,
{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kLoong64Cmp, MachineType::Uint32()},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kLoong64Mulh_wu,
MachineType::Uint32()}};
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kLoong64Cmp, MachineType::Uint32()},
};
} // namespace
......@@ -991,13 +996,10 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
(m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
// Make sure the `ChangeUint32ToUint64` node turned into a no-op.
ASSERT_EQ(2U, s.size());
ASSERT_EQ(1U, s.size());
EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kLoong64Bstrpick_d, s[1]->arch_opcode());
EXPECT_EQ(3U, s[1]->InputCount());
EXPECT_EQ(1U, s[1]->OutputCount());
}
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
......
......@@ -289,12 +289,17 @@ const Conversion kFloat32RoundInstructions[] = {
// MIPS64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
{&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU,
{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU,
{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kMips64Cmp, MachineType::Uint32()},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp,
MachineType::Uint32()},
{&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kMips64MulHighU,
MachineType::Uint32()}};
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kMips64Cmp, MachineType::Uint32()},
};
} // namespace
......@@ -1159,10 +1164,22 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
(m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
// Make sure the `ChangeUint32ToUint64` node turned into a no-op.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
if (FLAG_debug_code && binop.arch_opcode == kMips64Cmp) {
ASSERT_EQ(6U, s.size());
EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMips64Dshl, s[1]->arch_opcode());
EXPECT_EQ(kMips64Dshl, s[2]->arch_opcode());
EXPECT_EQ(kMips64Cmp, s[3]->arch_opcode());
EXPECT_EQ(kMips64AssertEqual, s[4]->arch_opcode());
EXPECT_EQ(kMips64Cmp, s[5]->arch_opcode());
EXPECT_EQ(2U, s[5]->InputCount());
EXPECT_EQ(1U, s[5]->OutputCount());
} else {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment