Commit 5f3cc635 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Rudimentary check elimination

Add a simple forward check elimination based on a side hashmap of "known
node aspects", namely the node type and node map (if any). This set of
aspects is cloned when merge states are created, and destructively
merged when merged into existing merge states -- destructive cloning
here means removing any mismatching information. This allows information
in dominators to be preserved.

Maps are kept separate from node types because we want to distinguish
between stable and unstable maps, where the former need a dependency and
the latter must be flushed across side-effecting calls.

The representation of this known information is currently very
inefficient, and won't win us any compilation speed prizes -- just
ZoneMaps keyed on ValueNode*. We should optimize this to take into
account some sort of liveness information, and clear out nodes that
aren't reachable anymore. There is also a lot more information we could
store per Node, e.g. known loaded fields or alternative representations;
depending on what we want to store and how that has to be invalidated,
we likely might need an alternative way of representing it. This
implementation is good enough for now though, for measuring the impact
of check elimination.

Bug: v8:7700
Change-Id: I2f001dedf8ab5d86f8acaa22416617bd80701982
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3865160
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82911}
parent fef977e7
......@@ -266,6 +266,11 @@ class V8_EXPORT_PRIVATE ObjectRef {
return lhs.equals(rhs);
}
};
struct Less {
bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const {
return lhs.data_ < rhs.data_;
}
};
protected:
JSHeapBroker* broker() const;
......@@ -293,6 +298,9 @@ template <class T>
using ZoneRefUnorderedSet =
ZoneUnorderedSet<T, ObjectRef::Hash, ObjectRef::Equal>;
template <class K, class V>
using ZoneRefMap = ZoneMap<K, V, ObjectRef::Less>;
// Temporary class that carries information from a Map. We'd like to remove
// this class and use MapRef instead, but we can't as long as we support the
// kDisabled broker mode. That's because obtaining the MapRef via
......
......@@ -1261,8 +1261,6 @@ void FeedbackNexus::Print(std::ostream& os) {
case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
case FeedbackSlotKind::kStoreGlobalSloppy:
......@@ -1276,6 +1274,19 @@ void FeedbackNexus::Print(std::ostream& os) {
os << InlineCacheState2String(ic_state());
break;
}
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
os << InlineCacheState2String(ic_state());
if (ic_state() == InlineCacheState::MONOMORPHIC) {
os << "\n ";
if (GetFeedback().GetHeapObjectOrSmi().IsPropertyCell()) {
os << Brief(GetFeedback());
} else {
LoadHandler::PrintHandler(GetFeedback().GetHeapObjectOrSmi(), os);
}
}
break;
}
case FeedbackSlotKind::kLoadProperty: {
os << InlineCacheState2String(ic_state());
if (ic_state() == InlineCacheState::MONOMORPHIC) {
......
......@@ -517,6 +517,8 @@ void LoadHandler::PrintHandler(Object handler, std::ostream& os) {
} else if (handler.IsCodeT()) {
os << "LoadHandler(Code)("
<< Builtins::name(CodeT::cast(handler).builtin_id()) << ")";
} else if (handler.IsSymbol()) {
os << "LoadHandler(Symbol)(" << Brief(Symbol::cast(handler)) << ")";
} else {
LoadHandler load_handler = LoadHandler::cast(handler);
int raw_handler = load_handler.smi_handler().ToSmi().value();
......
......@@ -542,15 +542,12 @@ void MaglevGraphBuilder::VisitCompareOperation() {
ValueNode *left, *right;
if (IsRegisterEqualToAccumulator(0)) {
left = right = LoadRegisterTagged(0);
AddNewNode<CheckHeapObject>({left});
AddNewNode<CheckSymbol>({left});
BuildCheckSymbol(left);
} else {
left = LoadRegisterTagged(0);
right = GetAccumulatorTagged();
AddNewNode<CheckHeapObject>({left});
AddNewNode<CheckSymbol>({left});
AddNewNode<CheckHeapObject>({right});
AddNewNode<CheckSymbol>({right});
BuildCheckSymbol(left);
BuildCheckSymbol(right);
}
if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
right)) {
......@@ -892,14 +889,78 @@ void MaglevGraphBuilder::VisitStaLookupSlot() {
SetAccumulator(BuildCallRuntime(StaLookupSlotFunction(flags), {name, value}));
}
void MaglevGraphBuilder::BuildCheckSmi(ValueNode* object) {
NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
if (NodeInfo::IsSmi(known_info)) return;
// TODO(leszeks): Figure out a way to also handle CheckedSmiUntag.
AddNewNode<CheckSmi>({object});
known_node_aspects().InsertOrUpdateNodeType(object, known_info,
NodeType::kSmi);
}
void MaglevGraphBuilder::BuildCheckHeapObject(ValueNode* object) {
NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
if (NodeInfo::IsAnyHeapObject(known_info)) return;
AddNewNode<CheckHeapObject>({object});
known_node_aspects().InsertOrUpdateNodeType(object, known_info,
NodeType::kAnyHeapObject);
}
void MaglevGraphBuilder::BuildCheckString(ValueNode* object) {
NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
if (NodeInfo::IsString(known_info)) return;
if (!NodeInfo::IsAnyHeapObject(known_info)) {
AddNewNode<CheckHeapObject>({object});
}
AddNewNode<CheckString>({object});
known_node_aspects().InsertOrUpdateNodeType(object, known_info,
NodeType::kString);
}
void MaglevGraphBuilder::BuildCheckSymbol(ValueNode* object) {
NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
if (NodeInfo::IsSymbol(known_info)) return;
if (!NodeInfo::IsAnyHeapObject(known_info)) {
AddNewNode<CheckHeapObject>({object});
}
AddNewNode<CheckSymbol>({object});
known_node_aspects().InsertOrUpdateNodeType(object, known_info,
NodeType::kSymbol);
}
void MaglevGraphBuilder::BuildMapCheck(ValueNode* object,
const compiler::MapRef& map) {
AddNewNode<CheckHeapObject>({object});
ZoneMap<ValueNode*, compiler::MapRef>& map_of_maps =
map.is_stable() ? known_node_aspects().stable_maps
: known_node_aspects().unstable_maps;
auto it = map_of_maps.find(object);
if (it != map_of_maps.end()) {
if (it->second.equals(map)) {
// Map is already checked.
return;
}
// TODO(leszeks): Insert an unconditional deopt if the known type doesn't
// match the required type.
}
NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
if (!NodeInfo::IsAnyHeapObject(known_info)) {
AddNewNode<CheckHeapObject>({object});
}
if (map.is_migration_target()) {
AddNewNode<CheckMapsWithMigration>({object}, map);
} else {
AddNewNode<CheckMaps>({object}, map);
}
map_of_maps.emplace(object, map);
if (map.is_stable()) {
compilation_unit_->broker()->dependencies()->DependOnStableMap(map);
}
known_node_aspects().InsertOrUpdateNodeType(
object, known_info, NodeType::kHeapObjectWithKnownMap);
}
bool MaglevGraphBuilder::TryBuildMonomorphicLoad(ValueNode* object,
......@@ -975,8 +1036,7 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler(
// Check for string maps before checking if we need to do an access check.
// Primitive strings always get the prototype from the native context
// they're operated on, so they don't need the access check.
AddNewNode<CheckHeapObject>({object});
AddNewNode<CheckString>({object});
BuildCheckString(object);
} else if (do_access_check_on_lookup_start_object) {
return false;
} else {
......@@ -1203,7 +1263,7 @@ bool MaglevGraphBuilder::TryBuildMonomorphicStoreFromSmiHandler(
ValueNode* value = GetAccumulatorTagged();
if (representation == Representation::kSmi) {
AddNewNode<CheckSmi>({value});
BuildCheckSmi(value);
AddNewNode<StoreTaggedFieldNoWriteBarrier>({store_target, value}, offset);
return true;
}
......@@ -1228,7 +1288,7 @@ bool MaglevGraphBuilder::TryBuildMonomorphicStoreFromSmiHandler(
BuildMapCheck(value, *maybe_field_map);
} else {
AddNewNode<CheckHeapObject>({value});
BuildCheckHeapObject(value);
}
}
AddNewNode<StoreTaggedFieldWithWriteBarrier>({store_target, value}, offset);
......@@ -2138,7 +2198,7 @@ void MaglevGraphBuilder::BuildToNumberOrToNumeric(Object::Conversion mode) {
switch (broker()->GetFeedbackForBinaryOperation(
compiler::FeedbackSource(feedback(), slot))) {
case BinaryOperationHint::kSignedSmall:
AddNewNode<CheckSmi>({value});
BuildCheckSmi(value);
break;
case BinaryOperationHint::kSignedSmallInputs:
UNREACHABLE();
......
......@@ -510,12 +510,16 @@ class MaglevGraphBuilder {
ValueNode* GetConstant(const compiler::ObjectRef& ref) {
if (ref.IsSmi()) return GetSmiConstant(ref.AsSmi());
// TODO(verwaest): Cache and handle roots.
// TODO(verwaest): Handle roots.
const compiler::HeapObjectRef& constant = ref.AsHeapObject();
Constant* node = CreateNewNode<Constant>(0, constant);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
graph_->AddConstant(node);
return node;
auto it = graph_->constants().find(constant);
if (it == graph_->constants().end()) {
Constant* node = CreateNewNode<Constant>(0, constant);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
graph_->constants().emplace(constant, node);
return node;
}
return it->second;
}
bool IsConstantNodeTheHole(ValueNode* value) {
......@@ -772,6 +776,13 @@ class MaglevGraphBuilder {
void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint.
latest_checkpointed_state_.reset();
// A side effect could change existing objects' maps. For stable maps we
// know this hasn't happened (because we added a dependency on the maps
// staying stable and therefore not possible to transition away from), but
// we can no longer assume that objects with unstable maps still have the
// same map.
known_node_aspects().unstable_maps.clear();
}
int next_offset() const {
......@@ -869,6 +880,10 @@ class MaglevGraphBuilder {
bool TryBuildPropertyCellAccess(
const compiler::GlobalAccessFeedback& global_access_feedback);
void BuildCheckSmi(ValueNode* object);
void BuildCheckHeapObject(ValueNode* object);
void BuildCheckString(ValueNode* object);
void BuildCheckSymbol(ValueNode* object);
void BuildMapCheck(ValueNode* object, const compiler::MapRef& map);
bool TryBuildMonomorphicLoad(ValueNode* object, const compiler::MapRef& map,
......@@ -1004,6 +1019,9 @@ class MaglevGraphBuilder {
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
KnownNodeAspects& known_node_aspects() {
return current_interpreter_frame_.known_node_aspects();
}
// True when this graph builder is building the subgraph of an inlined
// function.
......
......@@ -82,8 +82,9 @@ class GraphProcessor {
node_processor_.PreProcessGraph(compilation_info_, graph);
for (Constant* constant : graph->constants()) {
for (const auto& [ref, constant] : graph->constants()) {
node_processor_.Process(constant, GetCurrentState());
USE(ref);
}
for (const auto& [index, constant] : graph->root()) {
node_processor_.Process(constant, GetCurrentState());
......
......@@ -7,6 +7,7 @@
#include <vector>
#include "src/compiler/heap-refs.h"
#include "src/maglev/maglev-basic-block.h"
#include "src/zone/zone-allocator.h"
......@@ -64,15 +65,15 @@ class Graph final : public ZoneObject {
ZoneMap<int, SmiConstant*>& smi() { return smi_; }
ZoneMap<int, Int32Constant*>& int32() { return int_; }
ZoneMap<double, Float64Constant*>& float64() { return float_; }
ZoneVector<Constant*>& constants() { return constants_; }
compiler::ZoneRefMap<compiler::ObjectRef, Constant*>& constants() {
return constants_;
}
Float64Constant* nan() const { return nan_; }
void set_nan(Float64Constant* nan) {
DCHECK_NULL(nan_);
nan_ = nan;
}
void AddConstant(Constant* constant) { constants_.emplace_back(constant); }
private:
uint32_t tagged_stack_slots_ = kMaxUInt32;
uint32_t untagged_stack_slots_ = kMaxUInt32;
......@@ -81,7 +82,7 @@ class Graph final : public ZoneObject {
ZoneMap<int, SmiConstant*> smi_;
ZoneMap<int, Int32Constant*> int_;
ZoneMap<double, Float64Constant*> float_;
ZoneVector<Constant*> constants_;
compiler::ZoneRefMap<compiler::ObjectRef, Constant*> constants_;
Float64Constant* nan_ = nullptr;
};
......
......@@ -24,22 +24,164 @@ namespace maglev {
class BasicBlock;
class MergePointInterpreterFrameState;
class InterpreterFrameState {
public:
explicit InterpreterFrameState(const MaglevCompilationUnit& info)
: frame_(info) {}
// Destructively intersects the right map into the left map, such that the
// left map is mutated to become the result of the intersection. Values that
// are in both maps are passed to the merging function to be merged with each
// other -- again, the LHS here is expected to be mutated.
template <typename Value, typename MergeFunc>
void DestructivelyIntersect(ZoneMap<ValueNode*, Value>& lhs_map,
const ZoneMap<ValueNode*, Value>& rhs_map,
MergeFunc&& func) {
// Walk the two maps in lock step. This relies on the fact that ZoneMaps are
// sorted.
typename ZoneMap<ValueNode*, Value>::iterator lhs_it = lhs_map.begin();
typename ZoneMap<ValueNode*, Value>::const_iterator rhs_it = rhs_map.begin();
while (lhs_it != lhs_map.end() && rhs_it != rhs_map.end()) {
if (lhs_it->first < rhs_it->first) {
// Remove from LHS elements that are not in RHS.
lhs_it = lhs_map.erase(lhs_it);
} else if (rhs_it->first < lhs_it->first) {
// Skip over elements that are only in RHS.
++rhs_it;
} else {
// Apply the merge function to the values of the two iterators. If the
// function returns false, remove the value.
bool keep_value = func(lhs_it->second, rhs_it->second);
if (keep_value) {
++lhs_it;
} else {
lhs_it = lhs_map.erase(lhs_it);
}
++rhs_it;
}
}
}
// The intersection (using `&`) of any two NodeTypes must be a valid NodeType
// (possibly "kUnknown").
// TODO(leszeks): Figure out how to represent Number/Numeric with this encoding.
enum class NodeType {
kUnknown = 0,
kSmi = (1 << 0),
kAnyHeapObject = (1 << 1),
// All heap object types include the heap object bit, so that they can be
// checked for AnyHeapObject with a single bit check.
kString = (1 << 2) | kAnyHeapObject,
kSymbol = (1 << 3) | kAnyHeapObject,
kHeapNumber = (1 << 4) | kAnyHeapObject,
kHeapObjectWithKnownMap = (1 << 5) | kAnyHeapObject,
};
struct NodeInfo {
NodeType type;
// TODO(leszeks): Consider adding more info for nodes here, e.g. alternative
// representations or previously loaded fields.
static bool IsSmi(const NodeInfo* info) {
if (!info) return false;
return info->type == NodeType::kSmi;
}
static bool IsAnyHeapObject(const NodeInfo* info) {
if (!info) return false;
return static_cast<int>(info->type) &
static_cast<int>(NodeType::kAnyHeapObject);
}
static bool IsString(const NodeInfo* info) {
if (!info) return false;
return info->type == NodeType::kString;
}
static bool IsSymbol(const NodeInfo* info) {
if (!info) return false;
return info->type == NodeType::kSymbol;
}
InterpreterFrameState(const MaglevCompilationUnit& info,
const InterpreterFrameState& state)
: frame_(info) {
frame_.CopyFrom(info, state.frame_, nullptr);
// Mutate this node info by merging in another node info, with the result
// being a node info that is the subset of information valid in both inputs.
void MergeWith(const NodeInfo& other) {
type = static_cast<NodeType>(static_cast<int>(type) &
static_cast<int>(other.type));
}
};
void CopyFrom(const MaglevCompilationUnit& info,
const InterpreterFrameState& state) {
frame_.CopyFrom(info, state.frame_, nullptr);
struct KnownNodeAspects {
explicit KnownNodeAspects(Zone* zone)
: node_infos(zone), stable_maps(zone), unstable_maps(zone) {}
KnownNodeAspects(const KnownNodeAspects& other) = delete;
KnownNodeAspects& operator=(const KnownNodeAspects& other) = delete;
KnownNodeAspects(KnownNodeAspects&& other) = delete;
KnownNodeAspects& operator=(KnownNodeAspects&& other) = delete;
KnownNodeAspects* Clone(Zone* zone) const {
KnownNodeAspects* clone = zone->New<KnownNodeAspects>(zone);
clone->node_infos = node_infos;
clone->stable_maps = stable_maps;
clone->unstable_maps = unstable_maps;
return clone;
}
// Loop headers can safely clone the node types, since those won't be
// invalidated in the loop body, and similarly stable maps will have
// dependencies installed. Unstable maps however might be invalidated by
// calls, and we don't know about these until it's too late.
KnownNodeAspects* CloneWithoutUnstableMaps(Zone* zone) const {
KnownNodeAspects* clone = zone->New<KnownNodeAspects>(zone);
clone->node_infos = node_infos;
clone->stable_maps = stable_maps;
return clone;
}
NodeInfo* GetInfoFor(ValueNode* node) {
auto it = node_infos.find(node);
if (it == node_infos.end()) return nullptr;
return &it->second;
}
void InsertOrUpdateNodeType(ValueNode* node, NodeInfo* existing_info,
NodeType new_type) {
if (existing_info == nullptr) {
DCHECK_EQ(node_infos.find(node), node_infos.end());
node_infos.emplace(node, NodeInfo{new_type});
} else {
DCHECK_EQ(&node_infos.find(node)->second, existing_info);
existing_info->type = new_type;
}
}
void Merge(const KnownNodeAspects& other) {
DestructivelyIntersect(node_infos, other.node_infos,
[](NodeInfo& lhs, const NodeInfo& rhs) {
lhs.MergeWith(rhs);
return lhs.type != NodeType::kUnknown;
});
DestructivelyIntersect(stable_maps, other.stable_maps,
[](compiler::MapRef lhs, compiler::MapRef rhs) {
return lhs.equals(rhs);
});
DestructivelyIntersect(unstable_maps, other.unstable_maps,
[](compiler::MapRef lhs, compiler::MapRef rhs) {
return lhs.equals(rhs);
});
}
// TODO(leszeks): Store these more efficiently than with std::map -- in
// particular, clear out entries that are no longer reachable, perhaps also
// allow lookup by interpreter register rather than by node pointer.
// Permanently valid if checked in a dominator.
ZoneMap<ValueNode*, NodeInfo> node_infos;
// Valid across side-effecting calls, as long as we install a dependency.
ZoneMap<ValueNode*, compiler::MapRef> stable_maps;
// Flushed after side-effecting calls.
ZoneMap<ValueNode*, compiler::MapRef> unstable_maps;
};
class InterpreterFrameState {
public:
explicit InterpreterFrameState(const MaglevCompilationUnit& info)
: frame_(info),
known_node_aspects_(info.zone()->New<KnownNodeAspects>(info.zone())) {}
inline void CopyFrom(const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
......@@ -69,8 +211,14 @@ class InterpreterFrameState {
const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
KnownNodeAspects& known_node_aspects() { return *known_node_aspects_; }
const KnownNodeAspects& known_node_aspects() const {
return *known_node_aspects_;
}
private:
RegisterFrameArray<ValueNode*> frame_;
KnownNodeAspects* known_node_aspects_;
};
class CompactInterpreterFrameState {
......@@ -255,7 +403,8 @@ class MergePointInterpreterFrameState {
predecessors_so_far_(1),
is_loop_header_(false),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
frame_state_(info, liveness, state) {
frame_state_(info, liveness, state),
known_node_aspects_(state.known_node_aspects().Clone(info.zone())) {
predecessors_[0] = predecessor;
}
......@@ -296,6 +445,16 @@ class MergePointInterpreterFrameState {
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = predecessor;
if (known_node_aspects_ == nullptr) {
DCHECK(is_unmerged_loop());
DCHECK_EQ(predecessors_so_far_, 0);
known_node_aspects_ =
unmerged.known_node_aspects().CloneWithoutUnstableMaps(
compilation_unit.zone());
} else {
known_node_aspects_->Merge(unmerged.known_node_aspects());
}
if (FLAG_trace_maglev_graph_building) {
std::cout << "Merging..." << std::endl;
}
......@@ -614,6 +773,7 @@ class MergePointInterpreterFrameState {
CompactInterpreterFrameState frame_state_;
MergePointRegisterState register_state_;
KnownNodeAspects* known_node_aspects_ = nullptr;
};
void InterpreterFrameState::CopyFrom(
......@@ -623,6 +783,9 @@ void InterpreterFrameState::CopyFrom(
info, [&](ValueNode* value, interpreter::Register reg) {
frame_[reg] = value;
});
// Move "what we know" across without copying -- we can safely mutate it
// now, as we won't be entering this merge point again.
known_node_aspects_ = state.known_node_aspects_;
}
} // namespace maglev
......
......@@ -288,8 +288,9 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
printing_visitor_->PreProcessGraph(compilation_info_, graph_);
}
for (Constant* constant : graph_->constants()) {
for (const auto& [ref, constant] : graph_->constants()) {
constant->SetConstantLocation();
USE(ref);
}
for (const auto& [index, constant] : graph_->root()) {
constant->SetConstantLocation();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment