Commit c0556c16 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[Turbofan][ptr-compr] Handle "compress changes" in the instruction selector

Said instructions look like ChangeTaggedXXXToCompressedXXX and
ChangeCompressedXXXToTaggedXXX for XXX in ("", "Pointer", "Signed").

This change only affects 64 bit architectures (both for x64 and arm64).

Also added tests for the machine operators.

Cq-Include-Trybots: luci.v8.try:v8_linux64_pointer_compression_rel_ng,v8_linux64_arm64_pointer_compression_rel_ng
Bug: v8:8977
Change-Id: I239d9de7f214424852e75b5d56996e8dfdacd400
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1526009
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60393}
parent f0a95688
......@@ -1572,6 +1572,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64DecompressSigned: {
__ Sxtw(i.OutputRegister(), i.InputRegister(0));
break;
}
case kArm64DecompressPointer: {
__ Add(i.OutputRegister(), kRootRegister,
Operand(i.InputRegister(0), SXTW));
break;
}
case kArm64DecompressAny: {
// TODO(solanes): Do branchful compute?
// Branchlessly compute |masked_root|:
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
UseScratchRegisterScope temps(tasm());
Register masked_root = temps.AcquireX();
// Sign extend tag bit to entire register.
__ Sbfx(masked_root, i.InputRegister(0), 0, kSmiTagSize);
__ And(masked_root, masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is a
// smi or add the isolate root if it is a heap object.
__ Add(i.OutputRegister(), masked_root,
Operand(i.InputRegister(0), SXTW));
break;
}
// TODO(solanes): Combine into one Compress? They seem to be identical.
// TODO(solanes): We might get away with doing a no-op in these three cases.
// The Uxtw instruction is the conservative way for the moment.
case kArm64CompressSigned: {
__ Uxtw(i.OutputRegister(), i.InputRegister(0));
break;
}
case kArm64CompressPointer: {
__ Uxtw(i.OutputRegister(), i.InputRegister(0));
break;
}
case kArm64CompressAny: {
__ Uxtw(i.OutputRegister(), i.InputRegister(0));
break;
}
case kArm64LdrS:
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
......
......@@ -163,6 +163,12 @@ namespace compiler {
V(Arm64LdrDecompressAnyTagged) \
V(Arm64Str) \
V(Arm64StrCompressTagged) \
V(Arm64DecompressSigned) \
V(Arm64DecompressPointer) \
V(Arm64DecompressAny) \
V(Arm64CompressSigned) \
V(Arm64CompressPointer) \
V(Arm64CompressAny) \
V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
......
......@@ -282,6 +282,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
case kArm64CompareAndBranch:
case kArm64DecompressSigned:
case kArm64DecompressPointer:
case kArm64DecompressAny:
case kArm64CompressSigned:
case kArm64CompressPointer:
case kArm64CompressAny:
return kNoOpcodeFlags;
case kArm64LdrS:
......
......@@ -1657,6 +1657,46 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
}
void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArm64CompressAny, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer(
Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArm64CompressPointer, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArm64CompressSigned, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
Emit(kArm64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
Emit(kArm64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
......
......@@ -1517,6 +1517,22 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kChangeTaggedToCompressed:
return MarkAsWord32(node), VisitChangeTaggedToCompressed(node);
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
return MarkAsWord32(node),
VisitChangeTaggedPointerToCompressedPointer(node);
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
return MarkAsWord32(node),
VisitChangeTaggedSignedToCompressedSigned(node);
case IrOpcode::kChangeCompressedToTagged:
return MarkAsWord64(node), VisitChangeCompressedToTagged(node);
case IrOpcode::kChangeCompressedPointerToTaggedPointer:
return MarkAsWord64(node),
VisitChangeCompressedPointerToTaggedPointer(node);
case IrOpcode::kChangeCompressedSignedToTaggedSigned:
return MarkAsWord64(node),
VisitChangeCompressedSignedToTaggedSigned(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToWord32:
......@@ -2282,6 +2298,34 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer(
Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
......
......@@ -1951,6 +1951,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kX64DecompressSigned: {
CHECK(instr->HasOutput());
__ movsxlq(i.OutputRegister(), i.InputRegister(0));
break;
}
case kX64DecompressPointer: {
CHECK(instr->HasOutput());
__ movsxlq(i.OutputRegister(), i.InputRegister(0));
__ addq(i.OutputRegister(), kRootRegister);
break;
}
case kX64DecompressAny: {
CHECK(instr->HasOutput());
__ movsxlq(i.OutputRegister(), i.InputRegister(0));
// TODO(solanes): Do branchful compute?
// Branchlessly compute |masked_root|:
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
Register masked_root = kScratchRegister;
__ movl(masked_root, i.OutputRegister());
__ andl(masked_root, Immediate(kSmiTagMask));
__ negq(masked_root);
__ andq(masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is a
// smi or add the isolate root if it is a heap object.
__ addq(i.OutputRegister(), masked_root);
break;
}
// TODO(solanes): Combine into one Compress? They seem to be identical.
// TODO(solanes): We might get away with doing a no-op in these three cases.
// The movl instruction is the conservative way for the moment.
case kX64CompressSigned: {
__ movl(i.OutputRegister(), i.InputRegister(0));
break;
}
case kX64CompressPointer: {
__ movl(i.OutputRegister(), i.InputRegister(0));
break;
}
case kX64CompressAny: {
__ movl(i.OutputRegister(), i.InputRegister(0));
break;
}
case kX64Movq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
......
......@@ -136,6 +136,12 @@ namespace compiler {
V(X64MovqDecompressTaggedPointer) \
V(X64MovqDecompressAnyTagged) \
V(X64MovqCompressTagged) \
V(X64DecompressSigned) \
V(X64DecompressPointer) \
V(X64DecompressAny) \
V(X64CompressSigned) \
V(X64CompressPointer) \
V(X64CompressAny) \
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
......
......@@ -265,6 +265,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x2Reverse:
case kX64S1x16AnyTrue:
case kX64S1x16AllTrue:
case kX64DecompressSigned:
case kX64DecompressPointer:
case kX64DecompressAny:
case kX64CompressSigned:
case kX64CompressPointer:
case kX64CompressAny:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
......
......@@ -1265,6 +1265,46 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kX64CompressAny, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer(
Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kX64CompressPointer, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kX64CompressSigned, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
}
namespace {
void VisitRO(InstructionSelector* selector, Node* node,
......
......@@ -703,6 +703,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* ChangeUint32ToUint64(Node* a) {
return AddNode(machine()->ChangeUint32ToUint64(), a);
}
Node* ChangeTaggedToCompressed(Node* a) {
return AddNode(machine()->ChangeTaggedToCompressed(), a);
}
Node* ChangeTaggedPointerToCompressedPointer(Node* a) {
return AddNode(machine()->ChangeTaggedPointerToCompressedPointer(), a);
}
Node* ChangeTaggedSignedToCompressedSigned(Node* a) {
return AddNode(machine()->ChangeTaggedSignedToCompressedSigned(), a);
}
Node* ChangeCompressedToTagged(Node* a) {
return AddNode(machine()->ChangeCompressedToTagged(), a);
}
Node* ChangeCompressedPointerToTaggedPointer(Node* a) {
return AddNode(machine()->ChangeCompressedPointerToTaggedPointer(), a);
}
Node* ChangeCompressedSignedToTaggedSigned(Node* a) {
return AddNode(machine()->ChangeCompressedSignedToTaggedSigned(), a);
}
Node* TruncateFloat64ToFloat32(Node* a) {
return AddNode(machine()->TruncateFloat64ToFloat32(), a);
}
......
......@@ -397,6 +397,53 @@ TEST(RunWord64Popcnt) {
CHECK_EQ(22, m.Call(uint64_t(0xE00DC103E00DC103)));
CHECK_EQ(18, m.Call(uint64_t(0x000DC107000DC107)));
}
#ifdef V8_COMPRESS_POINTERS
TEST(CompressDecompressTaggedAnyPointer) {
RawMachineAssemblerTester<void*> m;
Handle<HeapNumber> value = m.isolate()->factory()->NewHeapNumber(11.2);
Node* node = m.HeapConstant(value);
m.Return(m.ChangeCompressedToTagged(m.ChangeTaggedToCompressed(node)));
HeapObject result =
HeapObject::cast(Object(reinterpret_cast<Address>(m.Call())));
CHECK_EQ(result, *value);
}
TEST(CompressDecompressTaggedAnySigned) {
RawMachineAssemblerTester<int64_t> m;
Smi smi = Smi::FromInt(123);
int64_t smiPointer = static_cast<int64_t>(smi.ptr());
Node* node = m.Int64Constant(smiPointer);
m.Return(m.ChangeCompressedToTagged(m.ChangeTaggedToCompressed(node)));
CHECK_EQ(smiPointer, m.Call());
}
TEST(CompressDecompressTaggedPointer) {
RawMachineAssemblerTester<void*> m;
Handle<HeapNumber> value = m.isolate()->factory()->NewHeapNumber(11.2);
Node* node = m.HeapConstant(value);
m.Return(m.ChangeCompressedPointerToTaggedPointer(
m.ChangeTaggedPointerToCompressedPointer(node)));
HeapObject result =
HeapObject::cast(Object(reinterpret_cast<Address>(m.Call())));
CHECK_EQ(result, *value);
}
TEST(CompressDecompressTaggedSigned) {
RawMachineAssemblerTester<int64_t> m;
Smi smi = Smi::FromInt(123);
int64_t smiPointer = static_cast<int64_t>(smi.ptr());
Node* node = m.Int64Constant(smiPointer);
m.Return(m.ChangeCompressedSignedToTaggedSigned(
m.ChangeTaggedSignedToCompressedSigned(node)));
CHECK_EQ(smiPointer, m.Call());
}
#endif // V8_COMPRESS_POINTERS
#endif // V8_TARGET_ARCH_64_BIT
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment