Commit 0a424ac1 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[ptr-compr][gc] Propagate tenuring in StoreField with Compress nodes.

The propagation of tenuring from outer allocations to inner allocations
was blocked in pointer compression since we now had Compress nodes between
AllocateRaw and StoreField.

This was causing issues in GC. It popped up in the Octane2.1/Splay
benchmark, where we had big regressions.

This CL updates the memory optimizer so that it can use the Compress nodes
as bridges and can perform the optimization successfully. Note that the
Compress nodes only appear on the value input of the StoreField.

Cq-Include-Trybots: luci.v8.try:v8_linux64_pointer_compression_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux64_arm64_pointer_compression_rel_ng
Bug: v8:7703, v8:9519
Change-Id: I6b0cc67955c6cc696e8c426b85c87a1794098ed0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1714650Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62938}
parent e25fcd26
......@@ -101,6 +101,12 @@ bool CanAllocate(const Node* node) {
switch (node->opcode()) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kChangeCompressedToTagged:
case IrOpcode::kChangeCompressedSignedToTaggedSigned:
case IrOpcode::kChangeCompressedPointerToTaggedPointer:
case IrOpcode::kChangeTaggedToCompressed:
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
case IrOpcode::kComment:
case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
......@@ -258,6 +264,35 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
#define __ gasm()->
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) {
if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. This case is trickier since we
// have to check all of the Compress node edges to test for a StoreField.
for (Edge const new_edge : node->use_edges()) {
if (AllocationTypeNeedsUpdateToOld(new_edge.from(), new_edge)) {
return true;
}
}
// If we arrived here, we tested all the edges of the Compress node and
// didn't find it necessary to update the AllocationType.
return false;
}
// Test to see if we need to update the AllocationType.
if (node->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* parent = node->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
return true;
}
}
return false;
}
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
......@@ -278,8 +313,17 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* const child = user->InputAt(1);
Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. If so, we need to update
// child to point to the StoreField.
if (COMPRESS_POINTERS_BOOL &&
IrOpcode::IsCompressOpcode(child->opcode())) {
child = child->InputAt(0);
}
if (child->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(child->op()) == AllocationType::kYoung) {
NodeProperties::ChangeOp(child, node->op());
......@@ -291,13 +335,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* const parent = user->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
allocation_type = AllocationType::kOld;
break;
}
if (AllocationTypeNeedsUpdateToOld(user, edge)) {
allocation_type = AllocationType::kOld;
break;
}
}
}
......
......@@ -142,6 +142,11 @@ class MemoryOptimizer final {
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
// Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations.
bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge);
AllocationState const* empty_state() const { return empty_state_; }
Graph* graph() const;
Isolate* isolate() const;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment