Commit 19dfa835 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[ptr-compr][turbofan] Removing unaligned tagged loads and stores

This is the last string of CLs! We eliminated all Tagged loads and stores.

That's why I also cleaned up that TODO in machine-type.h

Cq-Include-Trybots: luci.v8.try:v8_linux64_pointer_compression_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux64_arm64_pointer_compression_rel_ng
Bug: v8:8977, v8:7703
Change-Id: Icb23b396d0cbb6ee914637e34e26b52435f0000c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1593085
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61300}
parent 1a04b5ee
...@@ -165,12 +165,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -165,12 +165,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
} }
return load; return load;
} }
Node* Store(MachineRepresentation rep, Node* base, Node* value, std::pair<MachineRepresentation, Node*> InsertCompressionIfNeeded(
WriteBarrierKind write_barrier) { MachineRepresentation rep, Node* value) {
return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
WriteBarrierKind write_barrier) {
if (COMPRESS_POINTERS_BOOL) { if (COMPRESS_POINTERS_BOOL) {
switch (rep) { switch (rep) {
case MachineRepresentation::kTaggedPointer: case MachineRepresentation::kTaggedPointer:
...@@ -191,31 +187,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -191,31 +187,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
break; break;
} }
} }
return std::make_pair(rep, value);
}
Node* Store(MachineRepresentation rep, Node* base, Node* value,
WriteBarrierKind write_barrier) {
return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
WriteBarrierKind write_barrier) {
std::tie(rep, value) = InsertCompressionIfNeeded(rep, value);
return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)), return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
base, index, value); base, index, value);
} }
void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset, void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset,
Node* value, WriteBarrierKind write_barrier) { Node* value, WriteBarrierKind write_barrier) {
if (COMPRESS_POINTERS_BOOL) { std::tie(rep, value) = InsertCompressionIfNeeded(rep, value);
switch (rep) {
case MachineRepresentation::kTaggedPointer:
rep = MachineRepresentation::kCompressedPointer;
value = AddNode(machine()->ChangeTaggedPointerToCompressedPointer(),
value);
break;
case MachineRepresentation::kTaggedSigned:
rep = MachineRepresentation::kCompressedSigned;
value =
AddNode(machine()->ChangeTaggedSignedToCompressedSigned(), value);
break;
case MachineRepresentation::kTagged:
rep = MachineRepresentation::kCompressed;
value = AddNode(machine()->ChangeTaggedToCompressed(), value);
break;
default:
break;
}
}
AddNode(simplified()->StoreField(FieldAccess( AddNode(simplified()->StoreField(FieldAccess(
BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(), BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
MaybeHandle<Map>(), Type::Any(), MaybeHandle<Map>(), Type::Any(),
...@@ -235,7 +221,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -235,7 +221,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return UnalignedLoad(type, base, IntPtrConstant(0)); return UnalignedLoad(type, base, IntPtrConstant(0));
} }
Node* UnalignedLoad(MachineType type, Node* base, Node* index) { Node* UnalignedLoad(MachineType type, Node* base, Node* index) {
if (machine()->UnalignedLoadSupported(type.representation())) { MachineRepresentation rep = type.representation();
// Tagged or compressed should never be unaligned
DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
if (machine()->UnalignedLoadSupported(rep)) {
return AddNode(machine()->Load(type), base, index); return AddNode(machine()->Load(type), base, index);
} else { } else {
return AddNode(machine()->UnalignedLoad(type), base, index); return AddNode(machine()->UnalignedLoad(type), base, index);
...@@ -246,6 +235,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -246,6 +235,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
} }
Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index, Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) { Node* value) {
// Tagged or compressed should never be unaligned
DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
if (machine()->UnalignedStoreSupported(rep)) { if (machine()->UnalignedStoreSupported(rep)) {
return AddNode(machine()->Store(StoreRepresentation( return AddNode(machine()->Store(StoreRepresentation(
rep, WriteBarrierKind::kNoWriteBarrier)), rep, WriteBarrierKind::kNoWriteBarrier)),
......
...@@ -404,11 +404,9 @@ inline bool IsAnyCompressed(MachineRepresentation rep) { ...@@ -404,11 +404,9 @@ inline bool IsAnyCompressed(MachineRepresentation rep) {
rep == MachineRepresentation::kCompressedSigned; rep == MachineRepresentation::kCompressedSigned;
} }
// TODO(solanes): remove '|| IsAnyTagged(rep)' when all the representation
// changes are in place
inline bool IsAnyCompressedTagged(MachineRepresentation rep) { inline bool IsAnyCompressedTagged(MachineRepresentation rep) {
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
return IsAnyCompressed(rep) || IsAnyTagged(rep); return IsAnyCompressed(rep);
#else #else
return IsAnyTagged(rep); return IsAnyTagged(rep);
#endif #endif
......
...@@ -391,10 +391,6 @@ TEST(RunUnalignedLoadImmIndex) { ...@@ -391,10 +391,6 @@ TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned); RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -428,10 +424,6 @@ TEST(RunUnalignedLoadStore) { ...@@ -428,10 +424,6 @@ TEST(RunUnalignedLoadStore) {
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned); RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -445,10 +437,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) { ...@@ -445,10 +437,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32()); RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32()); RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer()); RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
RunUnalignedLoadStoreUnalignedAccess<HeapObject>(
MachineType::TaggedPointer());
RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32()); RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64()); RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment