Commit edd50ad2 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[turbofan][64][ptr-compr] Optimize Smi Tagging for 31 bit smis

In both ChangeIntPtrToSmi and ChangeInt32ToSmi we can use bitcasts
instead of change nodes for Smi Tagging, when we are using 31 bit
smis in 64 bit architectures with pointer compression enabled.

In ChangeIntPtrToSmi we can ignore the truncation as well.

Updated DecompressionOptimizer to match the new pattern.

Change-Id: I4487ba40ba9fda7b1ab31da95ff7bd144407d02d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1924355
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65168}
parent 3b25378e
......@@ -1651,6 +1651,12 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
DCHECK(SmiValuesAre31Bits());
DCHECK(COMPRESS_POINTERS_BOOL);
EmitIdentity(node);
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
......
......@@ -1570,6 +1570,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
case IrOpcode::kTryTruncateFloat64ToUint64:
return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kBitcastWord32ToWord64:
return MarkAsWord64(node), VisitBitcastWord32ToWord64(node);
case IrOpcode::kChangeInt32ToInt64:
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
......@@ -2437,6 +2439,10 @@ void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
UNIMPLEMENTED();
}
......
......@@ -1225,6 +1225,12 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
DCHECK(SmiValuesAre31Bits());
DCHECK(COMPRESS_POINTERS_BOOL);
EmitIdentity(node);
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
......
......@@ -80,6 +80,7 @@ void DecompressionOptimizer::MarkNodeInputs(Node* node) {
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kWord32And:
case IrOpcode::kWord32Equal:
case IrOpcode::kWord32Shl:
DCHECK_EQ(node->op()->ValueInputCount(), 2);
MaybeMarkAndQueueForRevisit(node->InputAt(0),
State::kOnly32BitsObserved); // value_0
......
......@@ -4425,8 +4425,11 @@ Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
Node* EffectControlLinearizer::ChangeIntPtrToSmi(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
return __ ChangeInt32ToInt64(
__ Word32Shl(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()));
Node* smi_value = __ Word32Shl(value, SmiShiftBitsConstant());
// In pointer compression, we smi-corrupt. Then, the upper bits are not
// important.
return COMPRESS_POINTERS_BOOL ? __ BitcastWord32ToWord64(smi_value)
: __ ChangeInt32ToInt64(smi_value);
}
return __ WordShl(value, SmiShiftBitsConstant());
}
......@@ -4448,7 +4451,7 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
return __ ChangeInt32ToInt64(__ Word32Shl(value, SmiShiftBitsConstant()));
return ChangeIntPtrToSmi(value);
}
return ChangeIntPtrToSmi(ChangeInt32ToIntPtr(value));
}
......@@ -4468,7 +4471,11 @@ Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
return __ ChangeUint32ToUint64(__ Word32Shl(value, SmiShiftBitsConstant()));
Node* smi_value = __ Word32Shl(value, SmiShiftBitsConstant());
// In pointer compression, we smi-corrupt. Then, the upper bits are not
// important.
return COMPRESS_POINTERS_BOOL ? __ BitcastWord32ToWord64(smi_value)
: __ ChangeUint32ToUint64(smi_value);
} else {
return __ WordShl(ChangeUint32ToUintPtr(value), SmiShiftBitsConstant());
}
......
......@@ -25,6 +25,7 @@ class BasicBlock;
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastWord32ToWord64) \
V(BitcastInt64ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
......
......@@ -455,6 +455,7 @@ class MachineRepresentationChecker {
case IrOpcode::kRoundInt32ToFloat32:
case IrOpcode::kRoundUint32ToFloat32:
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastWord32ToWord64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
......
......@@ -632,6 +632,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceFloat64(FastI2D(m.Value()));
break;
}
case IrOpcode::kBitcastWord32ToWord64: {
Int32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt64(m.Value());
break;
}
case IrOpcode::kChangeInt32ToInt64: {
Int32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt64(m.Value());
......
This diff is collapsed.
......@@ -385,6 +385,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* TryTruncateFloat32ToUint64();
const Operator* TryTruncateFloat64ToUint64();
const Operator* ChangeInt32ToFloat64();
const Operator* BitcastWord32ToWord64();
const Operator* ChangeInt32ToInt64();
const Operator* ChangeInt64ToFloat64();
const Operator* ChangeUint32ToFloat64();
......
......@@ -648,96 +648,97 @@
V(Word64AtomicExchange) \
V(Word64AtomicCompareExchange)
#define MACHINE_OP_LIST(V) \
MACHINE_UNOP_32_LIST(V) \
MACHINE_BINOP_32_LIST(V) \
MACHINE_BINOP_64_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
MACHINE_FLOAT32_BINOP_LIST(V) \
MACHINE_FLOAT32_UNOP_LIST(V) \
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_ATOMIC_OP_LIST(V) \
V(AbortCSAAssert) \
V(DebugBreak) \
V(Comment) \
V(Load) \
V(PoisonedLoad) \
V(Store) \
V(StackSlot) \
V(Word32Popcnt) \
V(Word64Popcnt) \
V(Word64Clz) \
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
V(Simd128ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
V(BitcastTaggedToWordForTagAndSmiBits) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToInt64) \
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat32ToUint64) \
V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeInt64ToFloat64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(ChangeTaggedToCompressed) \
V(TruncateFloat64ToFloat32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
V(RoundUint32ToFloat32) \
V(RoundUint64ToFloat32) \
V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastInt64ToFloat64) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(TaggedPoisonOnSpeculation) \
V(Word32PoisonOnSpeculation) \
V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
V(Int32PairSub) \
V(Int32PairMul) \
V(Word32PairShl) \
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
V(MemoryBarrier) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
V(SignExtendWord8ToInt64) \
V(SignExtendWord16ToInt64) \
V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd) \
#define MACHINE_OP_LIST(V) \
MACHINE_UNOP_32_LIST(V) \
MACHINE_BINOP_32_LIST(V) \
MACHINE_BINOP_64_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
MACHINE_FLOAT32_BINOP_LIST(V) \
MACHINE_FLOAT32_UNOP_LIST(V) \
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_ATOMIC_OP_LIST(V) \
V(AbortCSAAssert) \
V(DebugBreak) \
V(Comment) \
V(Load) \
V(PoisonedLoad) \
V(Store) \
V(StackSlot) \
V(Word32Popcnt) \
V(Word64Popcnt) \
V(Word64Clz) \
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
V(Simd128ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
V(BitcastTaggedToWordForTagAndSmiBits) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToInt64) \
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat32ToUint64) \
V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(BitcastWord32ToWord64) \
V(ChangeInt32ToInt64) \
V(ChangeInt64ToFloat64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(ChangeTaggedToCompressed) \
V(TruncateFloat64ToFloat32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
V(RoundUint32ToFloat32) \
V(RoundUint64ToFloat32) \
V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastInt64ToFloat64) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(TaggedPoisonOnSpeculation) \
V(Word32PoisonOnSpeculation) \
V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
V(Int32PairSub) \
V(Int32PairMul) \
V(Word32PairShl) \
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
V(MemoryBarrier) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
V(SignExtendWord8ToInt64) \
V(SignExtendWord16ToInt64) \
V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd) \
V(StackPointerGreaterThan)
#define MACHINE_SIMD_OP_LIST(V) \
......
......@@ -1822,6 +1822,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kBitcastWord32ToWord64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeTaggedToCompressed:
......
......@@ -195,13 +195,12 @@ TEST_F(DecompressionOptimizerTest, Word32ShlSmiTag) {
// Create the graph.
Node* load = graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
object, index, effect, control);
Node* truncation = graph()->NewNode(machine()->TruncateInt64ToInt32(), load);
Node* smi_shift_bits =
graph()->NewNode(common()->Int32Constant(kSmiShiftSize + kSmiTagSize));
Node* word32_shl =
graph()->NewNode(machine()->Word32Shl(), truncation, smi_shift_bits);
graph()->NewNode(machine()->Word32Shl(), load, smi_shift_bits);
graph()->SetEnd(
graph()->NewNode(machine()->ChangeInt32ToInt64(), word32_shl));
graph()->NewNode(machine()->BitcastWord32ToWord64(), word32_shl));
// Change the nodes, and test the change.
Reduce();
EXPECT_EQ(LoadMachRep(load), CompressedMachRep(MachineType::AnyTagged()));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment