Commit 88f17ba6 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][arm64] Implement decompression snippets for Arm64

Currently, in debug mode the snippets check that the result of decompression
equals to the full value stored in the field.

Bug: v8:7703
Change-Id: I43d20f15510de57582ee00ca23d676dfd4d06636
Reviewed-on: https://chromium-review.googlesource.com/c/1440049Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59200}
parent 9d61228b
...@@ -2002,13 +2002,18 @@ void TurboAssembler::Call(ExternalReference target) { ...@@ -2002,13 +2002,18 @@ void TurboAssembler::Call(ExternalReference target) {
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
// The builtin_pointer register contains the builtin index as a Smi. // The builtin_pointer register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below. // Untagging is folded into the indexing operand below.
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kSmiShiftSize == 0);
Lsl(builtin_pointer, builtin_pointer, kSystemPointerSizeLog2 - kSmiShift);
#else
STATIC_ASSERT(kSmiShiftSize == 31);
Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2); Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2);
#endif
Add(builtin_pointer, builtin_pointer, Add(builtin_pointer, builtin_pointer,
IsolateData::builtin_entry_table_offset()); IsolateData::builtin_entry_table_offset());
Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
...@@ -2733,6 +2738,89 @@ void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index, ...@@ -2733,6 +2738,89 @@ void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index,
B(ne, if_not_equal); B(ne, if_not_equal);
} }
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedSigned");
#ifdef DEBUG
UseScratchRegisterScope temps(this);
Register expected_value = temps.AcquireX();
DCHECK(!AreAliased(destination, expected_value));
Ldr(expected_value, field_operand);
mov(destination, expected_value);
#else
// TODO(ishell): use Ldrsw instead of Ldr,SXTW once kTaggedSize is shrinked
Ldr(destination, field_operand);
#endif
Sxtw(destination, destination);
#ifdef DEBUG
Label check_passed;
Cmp(destination, expected_value);
B(eq, &check_passed);
RecordComment("DecompressTaggedSigned failed");
brk(0);
bind(&check_passed);
#endif
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
#ifdef DEBUG
UseScratchRegisterScope temps(this);
Register expected_value = temps.AcquireX();
DCHECK(!AreAliased(destination, expected_value));
Ldr(expected_value, field_operand);
mov(destination, expected_value);
#else
// TODO(ishell): use Ldrsw instead of Ldr,SXTW once kTaggedSize is shrinked
Ldr(destination, field_operand);
#endif
Add(destination, kRootRegister, Operand(destination, SXTW));
#ifdef DEBUG
Label check_passed;
Cmp(destination, expected_value);
B(eq, &check_passed);
RecordComment("DecompressTaggedPointer failed");
brk(0);
bind(&check_passed);
#endif
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
UseScratchRegisterScope temps(this);
#ifdef DEBUG
Register expected_value = temps.AcquireX();
DCHECK(!AreAliased(destination, expected_value));
Ldr(expected_value, field_operand);
mov(destination, expected_value);
#else
// TODO(ishell): use Ldrsw instead of Ldr,SXTW once kTaggedSize is shrinked
Ldr(destination, field_operand);
#endif
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
Register masked_root = temps.AcquireX();
// Sign extend tag bit to entire register.
Sbfx(masked_root, destination, 0, kSmiTagSize);
And(masked_root, masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is a smi
// or add the isolate root if it is a heap object.
Add(destination, masked_root, Operand(destination, SXTW));
#ifdef DEBUG
Label check_passed;
Cmp(destination, expected_value);
B(eq, &check_passed);
RecordComment("Decompression failed: Tagged");
brk(0);
bind(&check_passed);
#endif
RecordComment("]");
}
void MacroAssembler::CompareAndSplit(const Register& lhs, void MacroAssembler::CompareAndSplit(const Register& lhs,
const Operand& rhs, const Operand& rhs,
......
...@@ -1175,6 +1175,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1175,6 +1175,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister(); void ResetSpeculationPoisonRegister();
// ---------------------------------------------------------------------------
// Pointer compresstion Support
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand);
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
protected: protected:
// The actual Push and Pop implementations. These don't generate any code // The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows // other than that required for the push or pop. This allows
......
...@@ -1555,6 +1555,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1555,6 +1555,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldr(i.OutputRegister(), i.MemoryOperand()); __ Ldr(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str: case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break; break;
......
...@@ -158,6 +158,9 @@ namespace compiler { ...@@ -158,6 +158,9 @@ namespace compiler {
V(Arm64LdrW) \ V(Arm64LdrW) \
V(Arm64StrW) \ V(Arm64StrW) \
V(Arm64Ldr) \ V(Arm64Ldr) \
V(Arm64LdrDecompressTaggedSigned) \
V(Arm64LdrDecompressTaggedPointer) \
V(Arm64LdrDecompressAnyTagged) \
V(Arm64Str) \ V(Arm64Str) \
V(Arm64DsbIsb) \ V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \ V(Arm64F32x4Splat) \
......
...@@ -294,6 +294,9 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -294,6 +294,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsw: case kArm64Ldrsw:
case kArm64LdrW: case kArm64LdrW:
case kArm64Ldr: case kArm64Ldr:
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
case kArm64Peek: case kArm64Peek:
return kIsLoadOperation; return kIsLoadOperation;
...@@ -415,6 +418,9 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { ...@@ -415,6 +418,9 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Ror32: case kArm64Ror32:
return 1; return 1;
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
case kArm64Ldr: case kArm64Ldr:
case kArm64LdrD: case kArm64LdrD:
case kArm64LdrS: case kArm64LdrS:
......
...@@ -620,9 +620,24 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -620,9 +620,24 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64LdrW; opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32; immediate_mode = kLoadStoreImm32;
break; break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
opcode = kArm64LdrDecompressTaggedSigned;
immediate_mode = kLoadStoreImm32;
break;
case MachineRepresentation::kTaggedPointer:
opcode = kArm64LdrDecompressTaggedPointer;
immediate_mode = kLoadStoreImm32;
break;
case MachineRepresentation::kTagged:
opcode = kArm64LdrDecompressAnyTagged;
immediate_mode = kLoadStoreImm32;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through. case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord64: case MachineRepresentation::kWord64:
opcode = kArm64Ldr; opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64; immediate_mode = kLoadStoreImm64;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment