Commit 88a2d011 authored by Seth Brenith's avatar Seth Brenith Committed by Commit Bot

[torque] Verify nested struct fields in classes

As one of several steps involved in supporting struct-valued fields
within classes, this CL generates type verification code for the data
contained in those structs.

In order to generate verification code, Torque needs to know about
struct field offsets and the total size of structs. Those calculations
are added to StructType itself and the function TypeVisitor::ComputeType
which initializes the StructType. I repurposed the Field::offset value
to behave in structs more like it does in classes (it had previously
indicated the index of a field within a struct, but nobody used that
value). Overall this works okay, and I think it's less confusing to have
Field::offset mean the same thing everywhere. However, some struct
fields have types with unknown size (Field::GetFieldSizeInformation
fails), so those fields are now marked with offset Field::kInvalidOffset
to indicate that the structs containing them should not be used within
class fields or anywhere else that requires packed layout.

Bug: v8:7793
Change-Id: If2677c8c81efc85e63b4bfb831d818a748427e18
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1897247
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65016}
parent ef651b34
...@@ -569,13 +569,6 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) { ...@@ -569,13 +569,6 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) { void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*this, isolate); TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*this, isolate);
for (int i = 0; i < number_of_all_descriptors(); i++) {
MaybeObjectSlot slot(GetDescriptorSlot(i));
MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryKeyIndex));
MaybeObject::VerifyMaybeObjectPointer(isolate,
*(slot + kEntryDetailsIndex));
MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryValueIndex));
}
if (number_of_all_descriptors() == 0) { if (number_of_all_descriptors() == 0) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(), *this); CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(), *this);
CHECK_EQ(0, number_of_all_descriptors()); CHECK_EQ(0, number_of_all_descriptors());
......
...@@ -155,7 +155,7 @@ class DescriptorArray : public HeapObject { ...@@ -155,7 +155,7 @@ class DescriptorArray : public HeapObject {
return OffsetOfDescriptorAt(number_of_all_descriptors); return OffsetOfDescriptorAt(number_of_all_descriptors);
} }
static constexpr int OffsetOfDescriptorAt(int descriptor) { static constexpr int OffsetOfDescriptorAt(int descriptor) {
return kHeaderSize + descriptor * kEntrySize * kTaggedSize; return kDescriptorsOffset + descriptor * kEntrySize * kTaggedSize;
} }
inline ObjectSlot GetFirstPointerSlot(); inline ObjectSlot GetFirstPointerSlot();
inline ObjectSlot GetDescriptorSlot(int descriptor); inline ObjectSlot GetDescriptorSlot(int descriptor);
...@@ -164,6 +164,8 @@ class DescriptorArray : public HeapObject { ...@@ -164,6 +164,8 @@ class DescriptorArray : public HeapObject {
"Weak fields follow strong fields."); "Weak fields follow strong fields.");
static_assert(kEndOfWeakFieldsOffset == kHeaderSize, static_assert(kEndOfWeakFieldsOffset == kHeaderSize,
"Weak fields extend up to the end of the header."); "Weak fields extend up to the end of the header.");
static_assert(kDescriptorsOffset == kHeaderSize,
"Variable-size array follows header.");
// We use this visitor to also visitor to also visit the enum_cache, which is // We use this visitor to also visitor to also visit the enum_cache, which is
// the only tagged field in the header, and placed at the end of the header. // the only tagged field in the header, and placed at the end of the header.
using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>; using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>;
......
...@@ -9,8 +9,15 @@ extern class EnumCache extends Struct { ...@@ -9,8 +9,15 @@ extern class EnumCache extends Struct {
indices: FixedArray; indices: FixedArray;
} }
// We make this class abstract because it is missing the variable-sized part, @export
// which is still impossible to express in Torque. struct DescriptorEntry {
key: Name|Undefined;
details: Smi|Undefined;
value: JSAny|Weak<Map>|AccessorInfo|AccessorPair|ClassPositions;
}
// We make this class abstract because kHeaderSize is a more natural name for
// the part of the class before the variable-size array.
@abstract @abstract
@dirtyInstantiatedAbstractClass @dirtyInstantiatedAbstractClass
extern class DescriptorArray extends HeapObject { extern class DescriptorArray extends HeapObject {
...@@ -19,6 +26,5 @@ extern class DescriptorArray extends HeapObject { ...@@ -19,6 +26,5 @@ extern class DescriptorArray extends HeapObject {
raw_number_of_marked_descriptors: uint16; raw_number_of_marked_descriptors: uint16;
filler16_bits: uint16; filler16_bits: uint16;
enum_cache: EnumCache; enum_cache: EnumCache;
// DescriptorEntry needs to be a struct with three fields. descriptors[number_of_all_descriptors]: DescriptorEntry;
// desriptors : DescriptorEntry[number_of_all_descriptors]
} }
...@@ -82,6 +82,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, ...@@ -82,6 +82,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
for (const Field& field : type.fields()) { for (const Field& field : type.fields()) {
const Type* field_type = field.name_and_type.type; const Type* field_type = field.name_and_type.type;
if (field_type == TypeOracle::GetVoidType()) continue; if (field_type == TypeOracle::GetVoidType()) continue;
if (field_type->IsStructType()) continue; // Not yet supported.
const std::string& field_name = field.name_and_type.name; const std::string& field_name = field.name_and_type.name;
bool is_field_tagged = field_type->IsSubtypeOf(TypeOracle::GetTaggedType()); bool is_field_tagged = field_type->IsSubtypeOf(TypeOracle::GetTaggedType());
base::Optional<const ClassType*> field_class_type = base::Optional<const ClassType*> field_class_type =
......
...@@ -2881,7 +2881,10 @@ class FieldOffsetsGenerator { ...@@ -2881,7 +2881,10 @@ class FieldOffsetsGenerator {
// Allow void type for marker constants of size zero. // Allow void type for marker constants of size zero.
return current_section_; return current_section_;
} }
if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) { // Currently struct-valued fields are only allowed to have tagged data; see
// TypeVisitor::VisitClassFieldsAndMethods.
if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
f.name_and_type.type->IsStructType()) {
if (f.is_weak) { if (f.is_weak) {
return FieldSectionType::kWeakSection; return FieldSectionType::kWeakSection;
} else { } else {
...@@ -3445,47 +3448,33 @@ void ImplementationVisitor::GeneratePrintDefinitions( ...@@ -3445,47 +3448,33 @@ void ImplementationVisitor::GeneratePrintDefinitions(
namespace { namespace {
void GenerateClassFieldVerifier(const std::string& class_name, // Generate verification code for a single piece of class data, which might be
const ClassType& class_type, const Field& f, // nested within a struct or might be a single element in an indexed field (or
std::ostream& h_contents, // both).
void GenerateFieldValueVerifier(const std::string& class_name,
const Field& class_field,
const Field& leaf_field, size_t struct_offset,
std::string field_size,
std::ostream& cc_contents) { std::ostream& cc_contents) {
if (!f.generate_verify) return; const Type* field_type = leaf_field.name_and_type.type;
const Type* field_type = f.name_and_type.type;
// We only verify tagged types, not raw numbers or pointers.
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) return;
// Do not verify if the field may be uninitialized.
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
if (f.index) {
const Type* index_type = f.index->type;
if (index_type != TypeOracle::GetSmiType()) {
Error("Expected type Smi for indexed field but found type ", *index_type)
.Position(f.pos);
}
// We already verified the index field because it was listed earlier, so we
// can assume it's safe to read here.
cc_contents << " for (int i = 0; i < TaggedField<Smi, " << class_name
<< "::k" << CamelifyString(f.index->name)
<< "Offset>::load(o).value(); ++i) {\n";
} else {
cc_contents << " {\n";
}
bool maybe_object = bool maybe_object =
!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetStrongTaggedType()); !field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
const char* object_type = maybe_object ? "MaybeObject" : "Object"; const char* object_type = maybe_object ? "MaybeObject" : "Object";
const char* verify_fn = const char* verify_fn =
maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer"; maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
const char* index_offset = f.index ? "i * kTaggedSize" : "0"; std::string index_offset = std::to_string(struct_offset);
if (class_field.index) {
index_offset += " + i * " + field_size;
}
// Name the local var based on the field name for nicer CHECK output. // Name the local var based on the field name for nicer CHECK output.
const std::string value = f.name_and_type.name + "__value"; const std::string value = leaf_field.name_and_type.name + "__value";
// Read the field. // Read the field.
cc_contents << " " << object_type << " " << value << " = TaggedField<" cc_contents << " " << object_type << " " << value << " = TaggedField<"
<< object_type << ", " << class_name << "::k" << object_type << ", " << class_name << "::k"
<< CamelifyString(f.name_and_type.name) << "Offset>::load(o, " << CamelifyString(class_field.name_and_type.name)
<< index_offset << ");\n"; << "Offset>::load(o, " << index_offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it. // Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, " cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
...@@ -3494,7 +3483,7 @@ void GenerateClassFieldVerifier(const std::string& class_name, ...@@ -3494,7 +3483,7 @@ void GenerateClassFieldVerifier(const std::string& class_name,
// Check that the value is of an appropriate type. We can skip this part for // Check that the value is of an appropriate type. We can skip this part for
// the Object type because it would not check anything beyond what we already // the Object type because it would not check anything beyond what we already
// checked with VerifyPointer. // checked with VerifyPointer.
if (f.name_and_type.type != TypeOracle::GetObjectType()) { if (field_type != TypeOracle::GetObjectType()) {
std::stringstream type_check; std::stringstream type_check;
bool at_start = true; bool at_start = true;
// If weak pointers are allowed, then start by checking for a cleared value. // If weak pointers are allowed, then start by checking for a cleared value.
...@@ -3524,6 +3513,58 @@ void GenerateClassFieldVerifier(const std::string& class_name, ...@@ -3524,6 +3513,58 @@ void GenerateClassFieldVerifier(const std::string& class_name,
} }
cc_contents << " CHECK(" << type_check.str() << ");\n"; cc_contents << " CHECK(" << type_check.str() << ");\n";
} }
}
void GenerateClassFieldVerifier(const std::string& class_name,
const ClassType& class_type, const Field& f,
std::ostream& h_contents,
std::ostream& cc_contents) {
if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
// We only verify tagged types, not raw numbers or pointers. Structs
// consisting of tagged types are also included.
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
!field_type->IsStructType())
return;
// Do not verify if the field may be uninitialized.
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
if (f.index) {
const Type* index_type = f.index->type;
std::string index_offset =
class_name + "::k" + CamelifyString(f.index->name) + "Offset";
cc_contents << " for (int i = 0; i < ";
if (index_type == TypeOracle::GetSmiType()) {
// We already verified the index field because it was listed earlier, so
// we can assume it's safe to read here.
cc_contents << "TaggedField<Smi, " << index_offset
<< ">::load(o).value()";
} else {
const Type* constexpr_version = index_type->ConstexprVersion();
if (constexpr_version == nullptr) {
Error("constexpr representation for type ", index_type->ToString(),
" is required due to usage as index")
.Position(f.pos);
}
cc_contents << "o.ReadField<" << constexpr_version->GetGeneratedTypeName()
<< ">(" << index_offset << ")";
}
cc_contents << "; ++i) {\n";
} else {
cc_contents << " {\n";
}
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
for (const Field& field : struct_type->fields()) {
GenerateFieldValueVerifier(class_name, f, field, field.offset,
std::to_string(struct_type->PackedSize()),
cc_contents);
}
} else {
GenerateFieldValueVerifier(class_name, f, f, 0, "kTaggedSize", cc_contents);
}
cc_contents << " }\n"; cc_contents << " }\n";
} }
......
...@@ -138,6 +138,7 @@ const StructType* TypeVisitor::ComputeType( ...@@ -138,6 +138,7 @@ const StructType* TypeVisitor::ComputeType(
CurrentSourcePosition::Scope position_activator(decl->pos); CurrentSourcePosition::Scope position_activator(decl->pos);
size_t offset = 0; size_t offset = 0;
bool packable = true;
for (auto& field : decl->fields) { for (auto& field : decl->fields) {
CurrentSourcePosition::Scope position_activator( CurrentSourcePosition::Scope position_activator(
field.name_and_type.type->pos); field.name_and_type.type->pos);
...@@ -146,15 +147,36 @@ const StructType* TypeVisitor::ComputeType( ...@@ -146,15 +147,36 @@ const StructType* TypeVisitor::ComputeType(
ReportError("struct field \"", field.name_and_type.name->value, ReportError("struct field \"", field.name_and_type.name->value,
"\" carries constexpr type \"", *field_type, "\""); "\" carries constexpr type \"", *field_type, "\"");
} }
struct_type->RegisterField({field.name_and_type.name->pos, Field f{field.name_and_type.name->pos,
struct_type, struct_type,
base::nullopt, base::nullopt,
{field.name_and_type.name->value, field_type}, {field.name_and_type.name->value, field_type},
offset, offset,
false, false,
field.const_qualified, field.const_qualified,
false}); false};
offset += LoweredSlotCount(field_type); auto optional_size = f.GetOptionalFieldSizeInformation();
// Structs may contain fields that aren't representable in packed form. If
// so, then this field and any subsequent fields should have their offsets
// marked as invalid.
if (!optional_size.has_value()) {
packable = false;
}
if (!packable) {
f.offset = Field::kInvalidOffset;
}
struct_type->RegisterField(f);
// Offsets are assigned based on an assumption of no space between members.
// This might lead to invalid alignment in some cases, but most structs are
// never actually packed in memory together (they just represent a batch of
// CSA TNode values that should be passed around together). For any struct
// that is used as a class field, we verify its offsets when setting up the
// class type.
if (optional_size.has_value()) {
size_t field_size = 0;
std::tie(field_size, std::ignore) = *optional_size;
offset += field_size;
}
} }
return struct_type; return struct_type;
} }
...@@ -298,6 +320,22 @@ void TypeVisitor::VisitClassFieldsAndMethods( ...@@ -298,6 +320,22 @@ void TypeVisitor::VisitClassFieldsAndMethods(
ReportError("non-extern classes do not support weak fields"); ReportError("non-extern classes do not support weak fields");
} }
} }
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
for (const Field& struct_field : struct_type->fields()) {
if (!struct_field.name_and_type.type->IsSubtypeOf(
TypeOracle::GetTaggedType())) {
// If we ever actually need different sizes of struct fields, then we
// can define the packing and alignment rules. Until then, let's keep
// it simple. This restriction also helps keep the tagged and untagged
// regions separate in the class layout (see also
// FieldOffsetsGenerator::GetSectionFor).
Error(
"Classes do not support fields which are structs containing "
"untagged data.");
}
}
}
base::Optional<NameAndType> index_field;
if (field_expression.index) { if (field_expression.index) {
if (seen_indexed_field || if (seen_indexed_field ||
(super_class && super_class->HasIndexedField())) { (super_class && super_class->HasIndexedField())) {
...@@ -305,18 +343,8 @@ void TypeVisitor::VisitClassFieldsAndMethods( ...@@ -305,18 +343,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
"only one indexable field is currently supported per class"); "only one indexable field is currently supported per class");
} }
seen_indexed_field = true; seen_indexed_field = true;
const NameAndType& index_field = index_field = class_type->LookupFieldInternal(*field_expression.index)
class_type->LookupFieldInternal(*field_expression.index) .name_and_type;
.name_and_type;
class_type->RegisterField(
{field_expression.name_and_type.name->pos,
class_type,
index_field,
{field_expression.name_and_type.name->value, field_type},
class_offset,
field_expression.weak,
field_expression.const_qualified,
field_expression.generate_verify});
} else { } else {
if (seen_indexed_field) { if (seen_indexed_field) {
ReportError("cannot declare non-indexable field \"", ReportError("cannot declare non-indexable field \"",
...@@ -324,27 +352,26 @@ void TypeVisitor::VisitClassFieldsAndMethods( ...@@ -324,27 +352,26 @@ void TypeVisitor::VisitClassFieldsAndMethods(
"\" after an indexable field " "\" after an indexable field "
"declaration"); "declaration");
} }
const Field& field = class_type->RegisterField( }
{field_expression.name_and_type.name->pos, const Field& field = class_type->RegisterField(
class_type, {field_expression.name_and_type.name->pos,
base::nullopt, class_type,
{field_expression.name_and_type.name->value, field_type}, index_field,
class_offset, {field_expression.name_and_type.name->value, field_type},
field_expression.weak, class_offset,
field_expression.const_qualified, field_expression.weak,
field_expression.generate_verify}); field_expression.const_qualified,
size_t field_size; field_expression.generate_verify});
std::string size_string; size_t field_size;
std::string machine_type; std::tie(field_size, std::ignore) = field.GetFieldSizeInformation();
std::tie(field_size, size_string) = field.GetFieldSizeInformation(); // Our allocations don't support alignments beyond kTaggedSize.
// Our allocations don't support alignments beyond kTaggedSize. size_t alignment = std::min(
size_t alignment = std::min( static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size);
static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size); if (alignment > 0 && class_offset % alignment != 0) {
if (alignment > 0 && class_offset % alignment != 0) { ReportError("field ", field_expression.name_and_type.name, " at offset ",
ReportError("field ", field_expression.name_and_type.name, class_offset, " is not ", alignment, "-byte aligned.");
" at offset ", class_offset, " is not ", alignment, }
"-byte aligned."); if (!field_expression.index) {
}
class_offset += field_size; class_offset += field_size;
} }
} }
......
...@@ -336,6 +336,29 @@ std::string StructType::GetGeneratedTypeNameImpl() const { ...@@ -336,6 +336,29 @@ std::string StructType::GetGeneratedTypeNameImpl() const {
return generated_type_name_; return generated_type_name_;
} }
size_t StructType::PackedSize() const {
size_t result = 0;
if (!fields_.empty()) {
const Field& last = fields_.back();
if (last.offset == Field::kInvalidOffset) {
// This struct can't be packed. Find the first invalid field and use its
// name and position for the error.
for (const Field& field : fields_) {
if (field.offset == Field::kInvalidOffset) {
Error("Cannot compute packed size of ", ToString(), " due to field ",
field.name_and_type.name, " of unknown size")
.Position(field.pos);
return 0;
}
}
}
size_t field_size = 0;
std::tie(field_size, std::ignore) = last.GetFieldSizeInformation();
result = last.offset + field_size;
}
return result;
}
// static // static
std::string Type::ComputeName(const std::string& basename, std::string Type::ComputeName(const std::string& basename,
MaybeSpecializationKey specialized_from) { MaybeSpecializationKey specialized_from) {
...@@ -695,7 +718,18 @@ VisitResult VisitResult::NeverResult() { ...@@ -695,7 +718,18 @@ VisitResult VisitResult::NeverResult() {
} }
std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const { std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
std::string size_string = "#no size"; auto optional = GetOptionalFieldSizeInformation();
if (optional.has_value()) {
return *optional;
}
Error("fields of type ", *name_and_type.type, " are not (yet) supported")
.Position(pos);
return std::make_tuple(0, "#no size");
}
base::Optional<std::tuple<size_t, std::string>>
Field::GetOptionalFieldSizeInformation() const {
std::string size_string;
const Type* field_type = this->name_and_type.type; const Type* field_type = this->name_and_type.type;
size_t field_size = 0; size_t field_size = 0;
if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) { if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
...@@ -734,8 +768,12 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const { ...@@ -734,8 +768,12 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
} else if (field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) { } else if (field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
field_size = TargetArchitecture::RawPtrSize(); field_size = TargetArchitecture::RawPtrSize();
size_string = "kIntptrSize"; size_string = "kIntptrSize";
} else if (const StructType* struct_type =
StructType::DynamicCast(field_type)) {
field_size = struct_type->PackedSize();
size_string = std::to_string(field_size);
} else { } else {
ReportError("fields of type ", *field_type, " are not (yet) supported"); return {};
} }
return std::make_tuple(field_size, size_string); return std::make_tuple(field_size, size_string);
} }
......
...@@ -183,14 +183,28 @@ struct Field { ...@@ -183,14 +183,28 @@ struct Field {
// reliance of string types is quite clunky. // reliance of string types is quite clunky.
std::tuple<size_t, std::string> GetFieldSizeInformation() const; std::tuple<size_t, std::string> GetFieldSizeInformation() const;
// Like GetFieldSizeInformation, but rather than raising an error if the
// field's size is unknown, it returns no value.
base::Optional<std::tuple<size_t, std::string>>
GetOptionalFieldSizeInformation() const;
SourcePosition pos; SourcePosition pos;
const AggregateType* aggregate; const AggregateType* aggregate;
base::Optional<NameAndType> index; base::Optional<NameAndType> index;
NameAndType name_and_type; NameAndType name_and_type;
// The byte offset of this field from the beginning of the containing class or
// struct. Most structs are never packed together in memory, and are only used
// to hold a batch of related CSA TNode values, in which case |offset| is
// irrelevant. In structs, this value can be set to kInvalidOffset to indicate
// that the struct should never be used in packed form.
size_t offset; size_t offset;
bool is_weak; bool is_weak;
bool const_qualified; bool const_qualified;
bool generate_verify; bool generate_verify;
static constexpr size_t kInvalidOffset = SIZE_MAX;
}; };
std::ostream& operator<<(std::ostream& os, const Field& name_and_type); std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
...@@ -503,6 +517,9 @@ class StructType final : public AggregateType { ...@@ -503,6 +517,9 @@ class StructType final : public AggregateType {
std::string GetGeneratedTypeNameImpl() const override; std::string GetGeneratedTypeNameImpl() const override;
// Returns the sum of the size of all members. Does not validate alignment.
size_t PackedSize() const;
private: private:
friend class TypeOracle; friend class TypeOracle;
StructType(Namespace* nspace, const StructDeclaration* decl, StructType(Namespace* nspace, const StructDeclaration* decl,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment