Commit 73a8eded authored by Seth Brenith's avatar Seth Brenith Committed by Commit Bot

[torque] Generate shorter code for indexed field accesses

Currently, when accessing a field that doesn't have a constant offset,
Torque emits code to compute each preceding indexed field's length and
add them all together. This works, but such code can get super long if a
class has many indexed fields, and especially if the length expressions
of some indexed fields refer to other indexed fields. We'd like the
output of the new C++ backend to be short enough to go in inline headers
which will be included in many compilation units.

This change attempts to reorganize the code so that the computation of
each length expression can only be emitted exactly once. This only
shortens the generated C++ code; the resulting TurboFan output should be
identical. There are two main parts:
1. For each indexed field, we already generate a macro that can get a
   Slice referring to that field. Update these macros to not use the dot
   operator on that field. Using the dot operator on the predecessor
   field is allowed.
2. Update the dot operator for indexed fields to emit a call to the
   macro from step 1.

This sort of reverses the dependency added by the previous change
https://crrev.com/c/2429566 : rather than the slice macros depending on
the dot operator, this change makes the dot operator depend on the slice
macros.

The overall torque_generated directory shrinks by under 1% with this
change, but the runtime_macros.cc file (which should eventually become
inline headers) shrinks by 24%. More to the point, this change keeps
runtime_macros.cc from ballooning out of control when we add a
work-in-progress Torque definition for ScopeInfo
( https://crrev.com/c/2357758 ).

Bug: v8:7793
Change-Id: I989dda9c3666f1a49281fef03acb35baebb5b63a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2432070Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Cr-Commit-Position: refs/heads/master@{#70325}
parent 76ad3ab5
......@@ -226,6 +226,11 @@ macro DownCastForTorqueClass<T : type extends HeapObject>(o: HeapObject):
extern macro StaticAssert(bool, constexpr string);
// This is for the implementation of the dot operator. In any context where the
// dot operator is available, the correct way to get the length of an indexed
// field x from object o is `(&o.x).length`.
intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
......
......@@ -1211,15 +1211,15 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
intptr_t offset;
intptr_t length;
std::tie(std::ignore, offset, length) =
TqRuntimeFieldRefSmallOrderedHashSetDataTable(isolate, *this);
TqRuntimeFieldSliceSmallOrderedHashSetDataTable(isolate, *this);
CHECK_EQ(offset, DataTableStartOffset());
CHECK_EQ(length, Capacity());
std::tie(std::ignore, offset, length) =
TqRuntimeFieldRefSmallOrderedHashSetHashTable(isolate, *this);
TqRuntimeFieldSliceSmallOrderedHashSetHashTable(isolate, *this);
CHECK_EQ(offset, GetBucketsStartOffset());
CHECK_EQ(length, NumberOfBuckets());
std::tie(std::ignore, offset, length) =
TqRuntimeFieldRefSmallOrderedHashSetChainTable(isolate, *this);
TqRuntimeFieldSliceSmallOrderedHashSetChainTable(isolate, *this);
CHECK_EQ(offset, GetChainTableOffset());
CHECK_EQ(length, Capacity());
}
......
......@@ -47,6 +47,7 @@ namespace torque {
#define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
V(BasicTypeExpression) \
V(FunctionTypeExpression) \
V(PrecomputedTypeExpression) \
V(UnionTypeExpression)
#define AST_STATEMENT_NODE_KIND_LIST(V) \
......@@ -651,6 +652,17 @@ struct FunctionTypeExpression : TypeExpression {
TypeExpression* return_type;
};
// A PrecomputedTypeExpression is never created directly by the parser. Later
// stages can use this to insert AST snippets where the type has already been
// resolved.
class Type;
struct PrecomputedTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(PrecomputedTypeExpression)
PrecomputedTypeExpression(SourcePosition pos, const Type* type)
: TypeExpression(kKind, pos), type(type) {}
const Type* type;
};
struct UnionTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(UnionTypeExpression)
UnionTypeExpression(SourcePosition pos, TypeExpression* a, TypeExpression* b)
......@@ -1237,6 +1249,58 @@ T* MakeNode(Args... args) {
std::make_unique<T>(CurrentSourcePosition::Get(), std::move(args)...));
}
inline FieldAccessExpression* MakeFieldAccessExpression(Expression* object,
std::string field) {
return MakeNode<FieldAccessExpression>(
object, MakeNode<Identifier>(std::move(field)));
}
inline IdentifierExpression* MakeIdentifierExpression(
std::vector<std::string> namespace_qualification, std::string name,
std::vector<TypeExpression*> args = {}) {
return MakeNode<IdentifierExpression>(std::move(namespace_qualification),
MakeNode<Identifier>(std::move(name)),
std::move(args));
}
inline IdentifierExpression* MakeIdentifierExpression(std::string name) {
return MakeIdentifierExpression({}, std::move(name));
}
inline CallExpression* MakeCallExpression(
IdentifierExpression* callee, std::vector<Expression*> arguments,
std::vector<Identifier*> labels = {}) {
return MakeNode<CallExpression>(callee, std::move(arguments),
std::move(labels));
}
inline CallExpression* MakeCallExpression(
std::string callee, std::vector<Expression*> arguments,
std::vector<Identifier*> labels = {}) {
return MakeCallExpression(MakeIdentifierExpression(std::move(callee)),
std::move(arguments), std::move(labels));
}
inline VarDeclarationStatement* MakeConstDeclarationStatement(
std::string name, Expression* initializer) {
return MakeNode<VarDeclarationStatement>(
/*const_qualified=*/true, MakeNode<Identifier>(std::move(name)),
base::Optional<TypeExpression*>{}, initializer);
}
inline BasicTypeExpression* MakeBasicTypeExpression(
std::vector<std::string> namespace_qualification, std::string name,
std::vector<TypeExpression*> generic_arguments = {}) {
return MakeNode<BasicTypeExpression>(std::move(namespace_qualification),
std::move(name),
std::move(generic_arguments));
}
inline StructExpression* MakeStructExpression(
TypeExpression* type, std::vector<NameAndExpression> initializers) {
return MakeNode<StructExpression>(type, std::move(initializers));
}
} // namespace torque
} // namespace internal
} // namespace v8
......
......@@ -1313,53 +1313,20 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
LocationReference ImplementationVisitor::GenerateFieldReference(
VisitResult object, const Field& field, const ClassType* class_type) {
if (field.index.has_value()) {
return LocationReference::HeapSlice(
GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
}
DCHECK(field.offset.has_value());
StackRange result_range = assembler().TopRange(0);
result_range.Extend(GenerateCopy(object).stack_range());
VisitResult offset;
if (field.offset.has_value()) {
offset =
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
} else {
StackScope stack_scope(this);
for (const Field& f : class_type->ComputeAllFields()) {
if (f.offset) {
offset =
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
}
if (f.name_and_type.name == field.name_and_type.name) break;
if (f.index) {
if (!offset.IsOnStack()) {
offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
}
VisitResult array_length = GenerateArrayLength(object, f);
size_t element_size;
std::string element_size_string;
std::tie(element_size, element_size_string) =
*SizeOf(f.name_and_type.type);
VisitResult array_element_size =
VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
// In contrast to the code used for allocation, we don't need overflow
// checks here because we already know all the offsets fit into memory.
VisitResult array_size =
GenerateCall("*", {{array_length, array_element_size}, {}});
offset = GenerateCall("+", {{offset, array_size}, {}});
}
}
DCHECK(offset.IsOnStack());
offset = stack_scope.Yield(offset);
}
VisitResult offset =
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
result_range.Extend(offset.stack_range());
if (field.index) {
VisitResult length = GenerateArrayLength(object, field);
result_range.Extend(length.stack_range());
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
} else {
const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
field.const_qualified);
return LocationReference::HeapReference(VisitResult(type, result_range));
}
const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
field.const_qualified);
return LocationReference::HeapReference(VisitResult(type, result_range));
}
// This is used to generate field references during initialization, where we can
......@@ -2858,6 +2825,15 @@ VisitResult ImplementationVisitor::GenerateCall(
result << constexpr_arguments[0];
result << ")";
return VisitResult(return_type, result.str());
} else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
const Type* type = specialization_types[0];
const ClassType* class_type = ClassType::DynamicCast(type);
if (!class_type) {
ReportError("%IndexedFieldLength must take a class type parameter");
}
const Field& field =
class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
return GenerateArrayLength(VisitResult(type, argument_range), field);
} else {
assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
constexpr_arguments});
......
......@@ -354,14 +354,17 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
UnionTypeExpression::DynamicCast(type_expression)) {
return TypeOracle::GetUnionType(ComputeType(union_type->a),
ComputeType(union_type->b));
} else {
auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
} else if (auto* function_type_exp =
FunctionTypeExpression::DynamicCast(type_expression)) {
TypeVector argument_types;
for (TypeExpression* type_exp : function_type_exp->parameters) {
argument_types.push_back(ComputeType(type_exp));
}
return TypeOracle::GetBuiltinPointerType(
argument_types, ComputeType(function_type_exp->return_type));
} else {
auto* precomputed = PrecomputedTypeExpression::cast(type_expression);
return precomputed->type;
}
}
......
This diff is collapsed.
......@@ -704,6 +704,13 @@ class ClassType final : public AggregateType {
base::Optional<ObjectSlotKind> ComputeArraySlotKind() const;
bool HasNoPointerSlots() const;
bool HasIndexedFieldsIncludingInParents() const;
const Field* GetFieldPreceding(size_t field_index) const;
// Given that the field exists in this class or a superclass, returns the
// specific class that declared the field.
const ClassType* GetClassDeclaringField(const Field& f) const;
std::string GetSliceMacroName(const Field& field) const;
const InstanceTypeConstraints& GetInstanceTypeConstraints() const {
return decl_->instance_type_constraints;
......@@ -734,6 +741,8 @@ class ClassType final : public AggregateType {
ClassFlags flags, const std::string& generates,
const ClassDeclaration* decl, const TypeAlias* alias);
void GenerateSliceAccessor(size_t field_index);
size_t header_size_;
ResidueClass size_;
mutable ClassFlags flags_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment