Commit 6d5e9b79 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[TurboProp] Split out MemoryLowering from MemoryOptimizer

Seperates the memory lowering operations into a seperate MemoryLowering
class which is used by the MemoryOptimizer. This will enable TurboProp
to reduce memory operations without having to do a full memory
optimization pass.

BUG=v8:9684

Change-Id: I1b333f1360fd342612672842bf879f44ab1ee60c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1815243Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63966}
parent 63e9a7d9
...@@ -1880,6 +1880,8 @@ v8_compiler_sources = [ ...@@ -1880,6 +1880,8 @@ v8_compiler_sources = [
"src/compiler/machine-operator.h", "src/compiler/machine-operator.h",
"src/compiler/map-inference.cc", "src/compiler/map-inference.cc",
"src/compiler/map-inference.h", "src/compiler/map-inference.h",
"src/compiler/memory-lowering.cc",
"src/compiler/memory-lowering.h",
"src/compiler/memory-optimizer.cc", "src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h", "src/compiler/memory-optimizer.h",
"src/compiler/node-aux-data.h", "src/compiler/node-aux-data.h",
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/memory-lowering.h"
#include "src/codegen/interface-descriptors.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
// An allocation group represents a set of allocations that have been folded
// together.
class MemoryLowering::AllocationGroup final : public ZoneObject {
public:
AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
AllocationGroup(Node* node, AllocationType allocation, Node* size,
Zone* zone);
~AllocationGroup() = default;
void Add(Node* object);
bool Contains(Node* object) const;
bool IsYoungGenerationAllocation() const {
return allocation() == AllocationType::kYoung;
}
AllocationType allocation() const { return allocation_; }
Node* size() const { return size_; }
private:
ZoneSet<NodeId> node_ids_;
AllocationType const allocation_;
Node* const size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
};
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
: jsgraph_(jsgraph),
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
allocation_folding_(allocation_folding),
poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
Reduction MemoryLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kAllocate:
// Allocate nodes were purged from the graph in effect-control
// linearization.
UNREACHABLE();
case IrOpcode::kAllocateRaw:
return ReduceAllocateRaw(node);
case IrOpcode::kLoadFromObject:
return ReduceLoadFromObject(node);
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kStoreToObject:
return ReduceStoreToObject(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
case IrOpcode::kStoreField:
return ReduceStoreField(node);
case IrOpcode::kStore:
return ReduceStore(node);
default:
return NoChange();
}
}
#define __ gasm()->
Reduction MemoryLowering::ReduceAllocateRaw(
Node* node, AllocationType allocation_type,
AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
state_ptr != nullptr);
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
gasm()->Reset(effect, control);
Node* allocate_builtin;
if (allocation_type == AllocationType::kYoung) {
if (allow_large_objects == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInYoungGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
}
} else {
if (allow_large_objects == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInOldGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
}
}
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
intptr_t const object_size = m.Value();
AllocationState const* state = *state_ptr;
if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
if (machine()->Is64()) {
if (OpParameter<int64_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int64Constant(state_size));
}
} else {
if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(
group->size(),
common()->Int32Constant(static_cast<int32_t>(state_size)));
}
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the effective inner allocated address.
value = __ BitcastWordToTagged(
__ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
// Extend the allocation {group}.
group->Add(value);
*state_ptr =
AllocationState::Open(group, state_size, top, effect, zone());
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineType::PointerRepresentation());
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
__ Bind(&call_runtime);
{
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
__ Bind(&done);
// Compute the new top and write it back.
top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the initial object address.
value = __ BitcastWordToTagged(
__ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
// Start a new allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, size, zone());
*state_ptr =
AllocationState::Open(group, object_size, top, effect, zone());
}
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
if (allow_large_objects == AllowLargeObjects::kTrue) {
__ GotoIfNot(
__ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
&call_runtime);
}
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
__ Goto(&done, __ BitcastWordToTagged(
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
if (state_ptr) {
// Create an unfoldable allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, zone());
*state_ptr = AllocationState::Closed(group, effect, zone());
}
}
// Replace all effect uses of {node} with the {effect} and replace
// all value uses of {node} with the {value}.
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
} else if (NodeProperties::IsValueEdge(edge)) {
edge.UpdateTo(value);
} else {
DCHECK(NodeProperties::IsControlEdge(edge));
edge.UpdateTo(control);
}
}
// Kill the {node} to make sure we don't leave dangling dead uses.
node->Kill();
return Replace(value);
}
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
return Changed(node);
}
Reduction MemoryLowering::ReduceLoadElement(Node* node) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
return Changed(node);
}
Reduction MemoryLowering::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
node->ReplaceInput(1, ComputeIndex(access, index));
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
return Changed(node);
}
Reduction MemoryLowering::ReduceStore(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, representation.write_barrier_kind());
if (write_barrier_kind != representation.write_barrier_kind()) {
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
representation.representation(), write_barrier_kind)));
return Changed(node);
}
return NoChange();
}
Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
}
int const fixed_offset = access.header_size - access.tag();
if (fixed_offset) {
index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
}
return index;
}
#undef __
namespace {
bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
while (true) {
switch (value->opcode()) {
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedToCompressedSigned:
return false;
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
case IrOpcode::kChangeTaggedToCompressed:
value = NodeProperties::GetValueInput(value, 0);
continue;
case IrOpcode::kHeapConstant: {
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
&root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
return false;
}
break;
}
default:
break;
}
return true;
}
}
} // namespace
Reduction MemoryLowering::ReduceAllocateRaw(Node* node) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
const AllocateParameters& allocation = AllocateParametersOf(node->op());
return ReduceAllocateRaw(node, allocation.allocation_type(),
allocation.allow_large_objects(), nullptr);
}
WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
Node* node, Node* object, Node* value, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state && state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
if (!ValueNeedsWriteBarrier(value, isolate())) {
write_barrier_kind = kNoWriteBarrier;
}
if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
write_barrier_assert_failed_(node, object, function_debug_name_, zone());
}
return write_barrier_kind;
}
bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
// Safe loads do not need poisoning.
if (load_sensitivity == LoadSensitivity::kSafe) return false;
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return true;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return load_sensitivity == LoadSensitivity::kCritical;
}
UNREACHABLE();
}
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(nullptr) {
node_ids_.insert(node->id());
}
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Node* size, Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(size) {
node_ids_.insert(node->id());
}
void MemoryLowering::AllocationGroup::Add(Node* node) {
node_ids_.insert(node->id());
}
bool MemoryLowering::AllocationGroup::Contains(Node* node) const {
// Additions should stay within the same allocated object, so it's safe to
// ignore them.
while (node_ids_.find(node->id()) == node_ids_.end()) {
switch (node->opcode()) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
node = NodeProperties::GetValueInput(node, 0);
break;
default:
return false;
}
}
return true;
}
MemoryLowering::AllocationState::AllocationState()
: group_(nullptr),
size_(std::numeric_limits<int>::max()),
top_(nullptr),
effect_(nullptr) {}
MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
Node* effect)
: group_(group),
size_(std::numeric_limits<int>::max()),
top_(nullptr),
effect_(effect) {}
MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
intptr_t size, Node* top,
Node* effect)
: group_(group), size_(size), top_(top), effect_(effect) {}
bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const {
return group() && group()->IsYoungGenerationAllocation();
}
Graph* MemoryLowering::graph() const { return jsgraph()->graph(); }
Isolate* MemoryLowering::isolate() const { return jsgraph()->isolate(); }
CommonOperatorBuilder* MemoryLowering::common() const {
return jsgraph()->common();
}
MachineOperatorBuilder* MemoryLowering::machine() const {
return jsgraph()->machine();
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_MEMORY_LOWERING_H_
#define V8_COMPILER_MEMORY_LOWERING_H_
#include "src/compiler/graph-assembler.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
struct ElementAccess;
class Graph;
class JSGraph;
class MachineOperatorBuilder;
class Node;
class Operator;
// Provides operations to lower all simplified memory access and allocation
// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
// operators.
class MemoryLowering final : public Reducer {
public:
enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
class AllocationGroup;
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
static AllocationState const* Empty(Zone* zone) {
return new (zone) AllocationState();
}
static AllocationState const* Closed(AllocationGroup* group, Node* effect,
Zone* zone) {
return new (zone) AllocationState(group, effect);
}
static AllocationState const* Open(AllocationGroup* group, intptr_t size,
Node* top, Node* effect, Zone* zone) {
return new (zone) AllocationState(group, size, top, effect);
}
bool IsYoungGenerationAllocation() const;
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
Node* effect() const { return effect_; }
intptr_t size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group, Node* effect);
AllocationState(AllocationGroup* group, intptr_t size, Node* top,
Node* effect);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
intptr_t const size_;
Node* const top_;
Node* const effect_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
using WriteBarrierAssertFailedCallback = std::function<void(
Node* node, Node* object, const char* name, Zone* temp_zone)>;
MemoryLowering(
JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
Zone*) { UNREACHABLE(); },
const char* function_debug_name = nullptr);
~MemoryLowering() = default;
const char* reducer_name() const override { return "MemoryReducer"; }
// Perform memory lowering reduction on the given Node.
Reduction Reduce(Node* node) override;
// Specific reducers for each optype to enable keeping track of
// AllocationState by the MemoryOptimizer.
Reduction ReduceAllocateRaw(Node* node, AllocationType allocation_type,
AllowLargeObjects allow_large_objects,
AllocationState const** state);
Reduction ReduceLoadFromObject(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreToObject(Node* node,
AllocationState const* state = nullptr);
Reduction ReduceStoreElement(Node* node,
AllocationState const* state = nullptr);
Reduction ReduceStoreField(Node* node,
AllocationState const* state = nullptr);
Reduction ReduceStore(Node* node, AllocationState const* state = nullptr);
private:
Reduction ReduceAllocateRaw(Node* node);
WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
Node* value,
AllocationState const* state,
WriteBarrierKind);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
Graph* graph() const;
Isolate* isolate() const;
Zone* zone() const { return zone_; }
JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
GraphAssembler* gasm() { return &graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_;
JSGraph* const jsgraph_;
Zone* zone_;
GraphAssembler graph_assembler_;
AllocationFolding allocation_folding_;
PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryLowering);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_MEMORY_LOWERING_H_
...@@ -11,90 +11,12 @@ ...@@ -11,90 +11,12 @@
#include "src/compiler/node-matchers.h" #include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h" #include "src/compiler/node-properties.h"
#include "src/compiler/node.h" #include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h" #include "src/roots/roots-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
const char* function_debug_name,
TickCounter* tick_counter)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
allocation_folding_(allocation_folding),
function_debug_name_(function_debug_name),
tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
while (!tokens_.empty()) {
Token const token = tokens_.front();
tokens_.pop();
VisitNode(token.node, token.state);
}
DCHECK(pending_.empty());
DCHECK(tokens_.empty());
}
MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(nullptr) {
node_ids_.insert(node->id());
}
MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Node* size, Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(size) {
node_ids_.insert(node->id());
}
void MemoryOptimizer::AllocationGroup::Add(Node* node) {
node_ids_.insert(node->id());
}
bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
// Additions should stay within the same allocated object, so it's safe to
// ignore them.
while (node_ids_.find(node->id()) == node_ids_.end()) {
switch (node->opcode()) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
node = NodeProperties::GetValueInput(node, 0);
break;
default:
return false;
}
}
return true;
}
MemoryOptimizer::AllocationState::AllocationState()
: group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
: group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
intptr_t size, Node* top)
: group_(group), size_(size), top_(top) {}
bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const {
return group() && group()->IsYoungGenerationAllocation();
}
namespace { namespace {
bool CanAllocate(const Node* node) { bool CanAllocate(const Node* node) {
...@@ -221,8 +143,67 @@ Node* EffectPhiForPhi(Node* phi) { ...@@ -221,8 +143,67 @@ Node* EffectPhiForPhi(Node* phi) {
return nullptr; return nullptr;
} }
void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
Zone* temp_zone) {
std::stringstream str;
str << "MemoryOptimizer could not remove write barrier for node #"
<< node->id() << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< node->id() << " to break in CSA code.\n";
Node* object_position = object;
if (object_position->opcode() == IrOpcode::kPhi) {
object_position = EffectPhiForPhi(object_position);
}
Node* allocating_node = nullptr;
if (object_position && object_position->op()->EffectOutputCount() > 0) {
allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
}
if (allocating_node) {
str << "\n There is a potentially allocating node in between:\n";
str << " " << *allocating_node << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< allocating_node->id() << " to break there.\n";
if (allocating_node->opcode() == IrOpcode::kCall) {
str << " If this is a never-allocating runtime call, you can add an "
"exception to Runtime::MayAllocate.\n";
}
} else {
str << "\n It seems the store happened to something different than a "
"direct "
"allocation:\n";
str << " " << *object << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< object->id() << " to break there.\n";
}
FATAL("%s", str.str().c_str());
}
} // namespace } // namespace
MemoryOptimizer::MemoryOptimizer(
JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: memory_lowering_(jsgraph, zone, poisoning_level, allocation_folding,
WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
while (!tokens_.empty()) {
Token const token = tokens_.front();
tokens_.pop();
VisitNode(token.node, token.state);
}
DCHECK(pending_.empty());
DCHECK(tokens_.empty());
}
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
tick_counter_->DoTick(); tick_counter_->DoTick();
DCHECK(!node->IsDead()); DCHECK(!node->IsDead());
...@@ -259,8 +240,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { ...@@ -259,8 +240,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK_EQ(0, node->op()->EffectOutputCount()); DCHECK_EQ(0, node->op()->EffectOutputCount());
} }
#define __ gasm()->
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) { const Edge edge) {
if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) { if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
...@@ -293,13 +272,6 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, ...@@ -293,13 +272,6 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
void MemoryOptimizer::VisitAllocateRaw(Node* node, void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) { AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode()); DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
gasm()->Reset(effect, control);
const AllocateParameters& allocation = AllocateParametersOf(node->op()); const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type(); AllocationType allocation_type = allocation.allocation_type();
...@@ -310,7 +282,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -310,7 +282,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
if (allocation_type == AllocationType::kOld) { if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) { for (Edge const edge : node->use_edges()) {
Node* const user = edge.from(); Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* child = user->InputAt(1); Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an // In Pointer Compression we might have a Compress node between an
...@@ -339,299 +310,62 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -339,299 +310,62 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
} }
} }
Node* allocate_builtin; memory_lowering()->ReduceAllocateRaw(
if (allocation_type == AllocationType::kYoung) { node, allocation_type, allocation.allow_large_objects(), &state);
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { EnqueueUses(state->effect(), state);
allocate_builtin = __ AllocateInYoungGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
}
} else {
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInOldGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
}
}
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) {
intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
if (machine()->Is64()) {
if (OpParameter<int64_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int64Constant(state_size));
}
} else {
if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(
group->size(),
common()->Int32Constant(static_cast<int32_t>(state_size)));
}
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the effective inner allocated address.
value = __ BitcastWordToTagged(
__ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
// Extend the allocation {group}.
group->Add(value);
state = AllocationState::Open(group, state_size, top, zone());
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineType::PointerRepresentation());
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
__ Bind(&call_runtime);
{
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
__ Bind(&done);
// Compute the new top and write it back.
top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the initial object address.
value = __ BitcastWordToTagged(
__ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
// Start a new allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, size, zone());
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
__ GotoIfNot(
__ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
&call_runtime);
}
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
__ Goto(&done, __ BitcastWordToTagged(
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
// Create an unfoldable allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, zone());
state = AllocationState::Closed(group, zone());
}
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
// Replace all effect uses of {node} with the {effect}, enqueue the
// effect uses for further processing, and replace all value uses of
// {node} with the {value}.
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
EnqueueUse(edge.from(), edge.index(), state);
edge.UpdateTo(effect);
} else if (NodeProperties::IsValueEdge(edge)) {
edge.UpdateTo(value);
} else {
DCHECK(NodeProperties::IsControlEdge(edge));
edge.UpdateTo(control);
}
}
// Kill the {node} to make sure we don't leave dangling dead uses.
node->Kill();
} }
void MemoryOptimizer::VisitLoadFromObject(Node* node, void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) { AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op()); memory_lowering()->ReduceLoadFromObject(node);
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state); EnqueueUses(node, state);
} }
void MemoryOptimizer::VisitStoreToObject(Node* node, void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) { AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode()); DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op()); memory_lowering()->ReduceStoreToObject(node, state);
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state);
}
#undef __
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCall, node->opcode());
// If the call can allocate, we start with a fresh state.
if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
state = empty_state();
}
EnqueueUses(node, state); EnqueueUses(node, state);
} }
void MemoryOptimizer::VisitLoadElement(Node* node, void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) { AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode()); DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op()); memory_lowering()->ReduceLoadElement(node);
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state); EnqueueUses(node, state);
} }
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) { void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode()); DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op()); memory_lowering()->ReduceLoadField(node);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state); EnqueueUses(node, state);
} }
void MemoryOptimizer::VisitStoreElement(Node* node, void MemoryOptimizer::VisitStoreElement(Node* node,
AllocationState const* state) { AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode()); DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op()); memory_lowering()->ReduceStoreElement(node, state);
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state); EnqueueUses(node, state);
} }
void MemoryOptimizer::VisitStoreField(Node* node, void MemoryOptimizer::VisitStoreField(Node* node,
AllocationState const* state) { AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode()); DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op()); memory_lowering()->ReduceStoreField(node, state);
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state); EnqueueUses(node, state);
} }
void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) { void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode()); DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op()); memory_lowering()->ReduceStore(node, state);
Node* object = node->InputAt(0); EnqueueUses(node, state);
Node* value = node->InputAt(2); }
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, representation.write_barrier_kind()); void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
if (write_barrier_kind != representation.write_barrier_kind()) { DCHECK_EQ(IrOpcode::kCall, node->opcode());
NodeProperties::ChangeOp( // If the call can allocate, we start with a fresh state.
node, machine()->Store(StoreRepresentation( if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
representation.representation(), write_barrier_kind))); state = empty_state();
} }
EnqueueUses(node, state); EnqueueUses(node, state);
} }
...@@ -641,109 +375,12 @@ void MemoryOptimizer::VisitOtherEffect(Node* node, ...@@ -641,109 +375,12 @@ void MemoryOptimizer::VisitOtherEffect(Node* node,
EnqueueUses(node, state); EnqueueUses(node, state);
} }
Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
index = graph()->NewNode(machine()->WordShl(), index,
jsgraph()->IntPtrConstant(element_size_shift));
}
int const fixed_offset = access.header_size - access.tag();
if (fixed_offset) {
index = graph()->NewNode(machine()->IntAdd(), index,
jsgraph()->IntPtrConstant(fixed_offset));
}
return index;
}
namespace {
bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
while (true) {
switch (value->opcode()) {
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedToCompressedSigned:
return false;
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
case IrOpcode::kChangeTaggedToCompressed:
value = NodeProperties::GetValueInput(value, 0);
continue;
case IrOpcode::kHeapConstant: {
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
&root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
return false;
}
break;
}
default:
break;
}
return true;
}
}
void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
Zone* temp_zone) {
std::stringstream str;
str << "MemoryOptimizer could not remove write barrier for node #"
<< node->id() << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< node->id() << " to break in CSA code.\n";
Node* object_position = object;
if (object_position->opcode() == IrOpcode::kPhi) {
object_position = EffectPhiForPhi(object_position);
}
Node* allocating_node = nullptr;
if (object_position && object_position->op()->EffectOutputCount() > 0) {
allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
}
if (allocating_node) {
str << "\n There is a potentially allocating node in between:\n";
str << " " << *allocating_node << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< allocating_node->id() << " to break there.\n";
if (allocating_node->opcode() == IrOpcode::kCall) {
str << " If this is a never-allocating runtime call, you can add an "
"exception to Runtime::MayAllocate.\n";
}
} else {
str << "\n It seems the store happened to something different than a "
"direct "
"allocation:\n";
str << " " << *object << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< object->id() << " to break there.\n";
}
FATAL("%s", str.str().c_str());
}
} // namespace
WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
Node* node, Node* object, Node* value, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
if (!ValueNeedsWriteBarrier(value, isolate())) {
write_barrier_kind = kNoWriteBarrier;
}
if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
}
return write_barrier_kind;
}
MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
AllocationStates const& states) { AllocationStates const& states) {
// Check if all states are the same; or at least if all allocation // Check if all states are the same; or at least if all allocation
// states belong to the same allocation group. // states belong to the same allocation group.
AllocationState const* state = states.front(); AllocationState const* state = states.front();
AllocationGroup* group = state->group(); MemoryLowering::AllocationGroup* group = state->group();
for (size_t i = 1; i < states.size(); ++i) { for (size_t i = 1; i < states.size(); ++i) {
if (states[i] != state) state = nullptr; if (states[i] != state) state = nullptr;
if (states[i]->group() != group) group = nullptr; if (states[i]->group() != group) group = nullptr;
...@@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( ...@@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
// TODO(bmeurer): We could potentially just create a Phi here to merge // TODO(bmeurer): We could potentially just create a Phi here to merge
// the various tops; but we need to pay special attention not to create // the various tops; but we need to pay special attention not to create
// an unschedulable graph. // an unschedulable graph.
state = AllocationState::Closed(group, zone()); state = AllocationState::Closed(group, nullptr, zone());
} else { } else {
// The states are from different allocation groups. // The states are from different allocation groups.
state = empty_state(); state = empty_state();
...@@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index, ...@@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index,
Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); } Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
CommonOperatorBuilder* MemoryOptimizer::common() const {
return jsgraph()->common();
}
MachineOperatorBuilder* MemoryOptimizer::machine() const {
return jsgraph()->machine();
}
bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
// Safe loads do not need poisoning.
if (load_sensitivity == LoadSensitivity::kSafe) return false;
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return true;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return load_sensitivity == LoadSensitivity::kCritical;
}
UNREACHABLE();
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_ #ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_ #define V8_COMPILER_MEMORY_OPTIMIZER_H_
#include "src/compiler/graph-assembler.h" #include "src/compiler/memory-lowering.h"
#include "src/zone/zone-containers.h" #include "src/zone/zone-containers.h"
namespace v8 { namespace v8 {
...@@ -15,95 +15,29 @@ class TickCounter; ...@@ -15,95 +15,29 @@ class TickCounter;
namespace compiler { namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
struct ElementAccess;
class Graph;
class JSGraph; class JSGraph;
class MachineOperatorBuilder; class Graph;
class Node;
class Operator;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary // NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node. // out-of-line data associated with each node.
using NodeId = uint32_t; using NodeId = uint32_t;
// Lowers all simplified memory access and allocation related nodes (i.e.
// Allocate, LoadField, StoreField and friends) to machine operators.
// Performs allocation folding and store write barrier elimination // Performs allocation folding and store write barrier elimination
// implicitly. // implicitly, while lowering all simplified memory access and allocation
// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
// operators.
class MemoryOptimizer final { class MemoryOptimizer final {
public: public:
enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
MemoryOptimizer(JSGraph* jsgraph, Zone* zone, MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level, PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding, MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter); const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default; ~MemoryOptimizer() = default;
void Optimize(); void Optimize();
private: private:
// An allocation group represents a set of allocations that have been folded using AllocationState = MemoryLowering::AllocationState;
// together.
class AllocationGroup final : public ZoneObject {
public:
AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
AllocationGroup(Node* node, AllocationType allocation, Node* size,
Zone* zone);
~AllocationGroup() = default;
void Add(Node* object);
bool Contains(Node* object) const;
bool IsYoungGenerationAllocation() const {
return allocation() == AllocationType::kYoung;
}
AllocationType allocation() const { return allocation_; }
Node* size() const { return size_; }
private:
ZoneSet<NodeId> node_ids_;
AllocationType const allocation_;
Node* const size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
};
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
static AllocationState const* Empty(Zone* zone) {
return new (zone) AllocationState();
}
static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
return new (zone) AllocationState(group);
}
static AllocationState const* Open(AllocationGroup* group, intptr_t size,
Node* top, Zone* zone) {
return new (zone) AllocationState(group, size, top);
}
bool IsYoungGenerationAllocation() const;
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
intptr_t size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group);
AllocationState(AllocationGroup* group, intptr_t size, Node* top);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
intptr_t const size_;
Node* const top_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
// An array of allocation states used to collect states on merges. // An array of allocation states used to collect states on merges.
using AllocationStates = ZoneVector<AllocationState const*>; using AllocationStates = ZoneVector<AllocationState const*>;
...@@ -127,44 +61,29 @@ class MemoryOptimizer final { ...@@ -127,44 +61,29 @@ class MemoryOptimizer final {
void VisitStore(Node*, AllocationState const*); void VisitStore(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*); void VisitOtherEffect(Node*, AllocationState const*);
Node* ComputeIndex(ElementAccess const&, Node*);
WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
Node* value,
AllocationState const* state,
WriteBarrierKind);
AllocationState const* MergeStates(AllocationStates const& states); AllocationState const* MergeStates(AllocationStates const& states);
void EnqueueMerge(Node*, int, AllocationState const*); void EnqueueMerge(Node*, int, AllocationState const*);
void EnqueueUses(Node*, AllocationState const*); void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*); void EnqueueUse(Node*, int, AllocationState const*);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
// Returns true if the AllocationType of the current AllocateRaw node that we // Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring // are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations. // from outer to inner allocations.
bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge); bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge);
AllocationState const* empty_state() const { return empty_state_; } AllocationState const* empty_state() const { return empty_state_; }
MemoryLowering* memory_lowering() { return &memory_lowering_; }
Graph* graph() const; Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; } JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
GraphAssembler* gasm() { return &graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_; MemoryLowering memory_lowering_;
JSGraph* const jsgraph_; JSGraph* jsgraph_;
AllocationState const* const empty_state_; AllocationState const* const empty_state_;
ZoneMap<NodeId, AllocationStates> pending_; ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_; ZoneQueue<Token> tokens_;
Zone* const zone_; Zone* const zone_;
GraphAssembler graph_assembler_;
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
const char* function_debug_name_;
TickCounter* const tick_counter_; TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer); DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
......
...@@ -1727,8 +1727,8 @@ struct MemoryOptimizationPhase { ...@@ -1727,8 +1727,8 @@ struct MemoryOptimizationPhase {
MemoryOptimizer optimizer( MemoryOptimizer optimizer(
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(), data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
data->info()->is_allocation_folding_enabled() data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding ? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding, : MemoryLowering::AllocationFolding::kDontAllocationFolding,
data->debug_name(), &data->info()->tick_counter()); data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize(); optimizer.Optimize();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment