Commit 86ab4039 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[turbofan][ptr-compr] Remove redundant ChangeTaggedToCompressed"

This reverts commit c924f54e.

Reason for revert: Suspect for GPU failures:
https://ci.chromium.org/p/v8/builders/ci/Linux%20V8%20FYI%20Release%20(NVIDIA)/7641

Original change's description:
> [turbofan][ptr-compr] Remove redundant ChangeTaggedToCompressed
> 
> The final goal is to eliminate it altogether. This CL just
> eliminate the redundant ones.
> 
> Bug: v8:7703
> Change-Id: If6e718c373fca7c65ce46c347533ec4550fbc444
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1950968
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#65398}

TBR=tebbi@chromium.org,solanes@chromium.org

Change-Id: Id280d7b0a850e3aacd56f839a0fc36d02a88f34c
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7703
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1960286Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65403}
parent cb08d966
......@@ -4988,6 +4988,16 @@ Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base,
return external;
} else {
if (COMPRESS_POINTERS_BOOL) {
// TurboFan does not support loading of compressed fields without
// decompression so we add the following operations to workaround that.
// We can't load the base value as word32 because in that case the
// value will not be marked as tagged in the pointer map and will not
// survive GC.
// Compress base value back to in order to be able to decompress by
// doing an unsafe add below. Both decompression and compression
// will be removed by the decompression elimination pass.
base = __ ChangeTaggedToCompressed(base);
base = __ BitcastTaggedToWord(base);
// Zero-extend Tagged_t to UintPtr according to current compression
// scheme so that the addition with |external_pointer| (which already
// contains compensated offset value) will decompress the tagged value.
......
......@@ -447,26 +447,26 @@ Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) {
Node* GraphAssembler::TaggedEqual(Node* left, Node* right) {
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(left, right);
} else {
return WordEqual(left, right);
return Word32Equal(ChangeTaggedToCompressed(left),
ChangeTaggedToCompressed(right));
}
return WordEqual(left, right);
}
Node* GraphAssembler::SmiSub(Node* left, Node* right) {
if (COMPRESS_POINTERS_BOOL) {
return Int32Sub(left, right);
} else {
return IntSub(left, right);
return Int32Sub(ChangeTaggedToCompressed(left),
ChangeTaggedToCompressed(right));
}
return IntSub(left, right);
}
Node* GraphAssembler::SmiLessThan(Node* left, Node* right) {
if (COMPRESS_POINTERS_BOOL) {
return Int32LessThan(left, right);
} else {
return IntLessThan(left, right);
return Int32LessThan(ChangeTaggedToCompressed(left),
ChangeTaggedToCompressed(right));
}
return IntLessThan(left, right);
}
Node* GraphAssembler::Float64RoundDown(Node* value) {
......
......@@ -40,6 +40,7 @@ class BasicBlock;
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeInt64ToFloat64) \
V(ChangeTaggedToCompressed) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(Float64Abs) \
......
......@@ -402,6 +402,9 @@ bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
switch (value->opcode()) {
case IrOpcode::kBitcastWordToTaggedSigned:
return false;
case IrOpcode::kChangeTaggedToCompressed:
value = NodeProperties::GetValueInput(value, 0);
continue;
case IrOpcode::kHeapConstant: {
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
......
......@@ -240,6 +240,22 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) {
if (COMPRESS_POINTERS_BOOL &&
node->opcode() == IrOpcode::kChangeTaggedToCompressed) {
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. This case is trickier since we
// have to check all of the Compress node edges to test for a StoreField.
for (Edge const new_edge : node->use_edges()) {
if (AllocationTypeNeedsUpdateToOld(new_edge.from(), new_edge)) {
return true;
}
}
// If we arrived here, we tested all the edges of the Compress node and
// didn't find it necessary to update the AllocationType.
return false;
}
// Test to see if we need to update the AllocationType.
if (node->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* parent = node->InputAt(0);
......@@ -267,6 +283,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. If so, we need to update
// child to point to the StoreField.
if (COMPRESS_POINTERS_BOOL &&
child->opcode() == IrOpcode::kChangeTaggedToCompressed) {
child = child->InputAt(0);
}
if (child->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(child->op()) == AllocationType::kYoung) {
NodeProperties::ChangeOp(child, node->op());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment