Commit b6c625f3 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][x64][arm64] Add support for smi-corrupting decompression

... in disabled state. It will be enabled in a follow-up CL.

Bug: v8:9706
Change-Id: I43b482a4fd1bf9af0c6ba535b453e72463bee0f8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1826731Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarClemens Backes [né Hammacher] <clemensb@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64039}
parent 02d0b14f
......@@ -1025,7 +1025,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
} else {
DCHECK(SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
Ldrsw(dst, src);
if (kUseSmiCorruptingPtrDecompression) {
Ldr(dst.W(), src);
} else {
Ldrsw(dst, src);
}
} else {
Ldr(dst, src);
}
......
......@@ -2736,18 +2736,8 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldrsw(destination, field_operand);
if (kUseBranchlessPtrDecompressionInGeneratedCode) {
UseScratchRegisterScope temps(this);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
Register masked_root = temps.AcquireX();
// Sign extend tag bit to entire register.
Sbfx(masked_root, destination, 0, kSmiTagSize);
And(masked_root, masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is a
// smi or add the isolate root if it is a heap object.
Add(destination, masked_root, destination);
if (kUseSmiCorruptingPtrDecompression) {
Add(destination, kRootRegister, destination);
} else {
Label done;
JumpIfSmi(destination, &done);
......@@ -2760,18 +2750,8 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
if (kUseBranchlessPtrDecompressionInGeneratedCode) {
UseScratchRegisterScope temps(this);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
Register masked_root = temps.AcquireX();
// Sign extend tag bit to entire register.
Sbfx(masked_root, source, 0, kSmiTagSize);
And(masked_root, masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is a
// smi or add the isolate root if it is a heap object.
Add(destination, masked_root, Operand(source, SXTW));
if (kUseSmiCorruptingPtrDecompression) {
Add(destination, kRootRegister, Operand(source, SXTW));
} else {
Label done;
Sxtw(destination, source);
......
......@@ -747,10 +747,13 @@ TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
}
TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant()));
return smi;
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1),
"Use shifting instead of add");
return BitcastWordToTaggedSigned(
ChangeUint32ToWord(Int32Add(value, value)));
}
return SmiTag(ChangeInt32ToIntPtr(value));
}
TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) {
......@@ -776,6 +779,9 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
return SmiFromInt32(TruncateIntPtrToInt32(value));
}
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
return smi;
......@@ -786,11 +792,19 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
return ChangeInt32ToIntPtr(SmiToInt32(value));
}
return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
return Signed(Word32Sar(
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
SmiShiftBitsConstant32()));
}
TNode<IntPtrT> result = SmiUntag(value);
return TruncateIntPtrToInt32(result);
}
......@@ -9653,7 +9667,11 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
index = smi_index.value();
} else {
if (COMPRESS_POINTERS_BOOL) {
CSA_ASSERT(this, IsValidSmiIndex(smi_index_node));
if (kUseSmiCorruptingPtrDecompression) {
smi_index_node = NormalizeSmiIndex(smi_index_node);
} else {
CSA_ASSERT(this, IsValidSmiIndex(smi_index_node));
}
}
}
intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node);
......
......@@ -313,17 +313,8 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
Register scratch) {
if (kUseBranchlessPtrDecompressionInGeneratedCode) {
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
Register masked_root = scratch;
xorq(masked_root, masked_root);
Condition smi = CheckSmi(destination);
cmovq(NegateCondition(smi), masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is
// a smi or add the isolate root if it is a heap object.
addq(destination, masked_root);
if (kUseSmiCorruptingPtrDecompression) {
addq(destination, kRootRegister);
} else {
Label done;
JumpIfSmi(destination, &done);
......
......@@ -246,10 +246,9 @@ using AtomicTagged_t = base::AtomicWord;
#endif // V8_COMPRESS_POINTERS
// Defines whether the branchless or branchful implementation of pointer
// Defines whether to use smi-corrupting or branchful implementation of pointer
// decompression should be used.
constexpr bool kUseBranchlessPtrDecompressionInRuntime = false;
constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false;
constexpr bool kUseSmiCorruptingPtrDecompression = false;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES);
......
......@@ -65,19 +65,12 @@ V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
if (kUseBranchlessPtrDecompressionInRuntime) {
// Current compression scheme requires |raw_value| to be sign-extended
// from int32_t to intptr_t.
intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = static_cast<Address>(-(value & kSmiTagMask));
Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr);
return root_or_zero + static_cast<Address>(value);
} else {
return HAS_SMI_TAG(raw_value)
? DecompressTaggedSigned(raw_value)
: DecompressTaggedPointer(on_heap_addr, raw_value);
if (kUseSmiCorruptingPtrDecompression) {
return DecompressTaggedPointer(on_heap_addr, raw_value);
}
return HAS_SMI_TAG(raw_value)
? DecompressTaggedSigned(raw_value)
: DecompressTaggedPointer(on_heap_addr, raw_value);
}
#ifdef V8_COMPRESS_POINTERS
......
......@@ -1474,7 +1474,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler {
} // namespace compiler
#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
#elif defined(V8_HOST_ARCH_64_BIT)
......
......@@ -3030,6 +3030,14 @@ bool CanCover(Node* value, IrOpcode::Value opcode) {
return true;
}
Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) {
if (mcgraph()->machine()->Is64()) {
value =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
}
return value;
}
Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
if (mcgraph()->machine()->Is64()) {
value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
......@@ -3038,12 +3046,20 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
BuildSmiShiftBitsConstant32());
}
value = BuildChangeInt32ToIntPtr(value);
return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
BuildSmiShiftBitsConstant32());
}
return graph()->NewNode(mcgraph()->machine()->WordShl(),
Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
}
......@@ -3052,16 +3068,32 @@ Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
return mcgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
return mcgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
value = graph()->NewNode(mcgraph()->machine()->WordSar(), value,
BuildSmiShiftBitsConstant());
if (mcgraph()->machine()->Is64()) {
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
value =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
value = graph()->NewNode(mcgraph()->machine()->Word32Sar(), value,
BuildSmiShiftBitsConstant32());
} else {
value = BuildChangeSmiToIntPtr(value);
value = BuildTruncateIntPtrToInt32(value);
}
return value;
}
Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
if (COMPRESS_POINTERS_BOOL && kUseSmiCorruptingPtrDecompression) {
value = BuildChangeSmiToInt32(value);
return BuildChangeInt32ToIntPtr(value);
}
return graph()->NewNode(mcgraph()->machine()->WordSar(), value,
BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
......@@ -3278,10 +3310,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
Node* result =
graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
if (mcgraph()->machine()->Is64()) {
result =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), result);
}
result = BuildTruncateIntPtrToInt32(result);
return result;
}
......@@ -5597,7 +5626,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* function_index_smi = LOAD_RAW(
function_data,
WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag,
MachineType::TypeCompressedTagged());
MachineType::TypeCompressedTaggedSigned());
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
......@@ -5606,8 +5635,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* jump_table_offset_smi = LOAD_RAW(
function_data,
WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag,
MachineType::TypeCompressedTagged());
Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
MachineType::TypeCompressedTaggedSigned());
Node* jump_table_offset = BuildChangeSmiToIntPtr(jump_table_offset_smi);
return jump_table_offset;
}
......
......@@ -574,11 +574,14 @@ class WasmGraphBuilder {
MachineType result_type, wasm::TrapReason trap_zero,
wasm::WasmCodePosition position);
Node* BuildTruncateIntPtrToInt32(Node* value);
Node* BuildChangeInt32ToIntPtr(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
Node* BuildSmiShiftBitsConstant32();
Node* BuildChangeSmiToInt32(Node* value);
Node* BuildChangeSmiToIntPtr(Node* value);
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment