Commit 8ed9be48 authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[torque] allow allocation of strings

This CL generalizes and improves how we handle allocations in Torque.

Overview of the changes:
- Remove obsolete special handling for JSObject classes, since it was
  incomplete: It breaks as soon as slack tracking is active.
- Handle array initialization using slices.
- Properly align allocation sizes. This enabled allocating strings.
- Port AllocateSeq{One,Two}ByteString to Torque, which is much easier
  now than the old CSA code since allocation size alignment and
  large-object space allocation just happen out-of-the-box.
- Remove obsolete or unnecessary intrinsics, some of them turn into
  macros in the torque_internal namespace.
- Distinguish between header size and overall size for ClassType,
  make size optional and only defined when it is statically known.


Bug: v8:10004 v8:7793
Change-Id: I623db233e7fb4deed54e8039ae0c24705e9a44e8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1932356Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65397}
parent 3ab75534
......@@ -190,6 +190,8 @@ type FrameArray extends FixedArray;
const kTaggedSize: constexpr int31 generates 'kTaggedSize';
const kDoubleSize: constexpr int31 generates 'kDoubleSize';
const kVariableSizeSentinel:
constexpr int31 generates 'kVariableSizeSentinel';
const kSmiTagSize: constexpr int31 generates 'kSmiTagSize';
const V8_INFINITY: constexpr float64 generates 'V8_INFINITY';
......@@ -920,12 +922,18 @@ extern macro FixedCOWArrayMapConstant(): Map;
extern macro EmptyByteArrayConstant(): ByteArray;
extern macro EmptyFixedArrayConstant(): EmptyFixedArray;
extern macro PromiseCapabilityMapConstant(): Map;
extern macro OneByteStringMapConstant(): Map;
extern macro StringMapConstant(): Map;
const kFixedArrayMap: Map = FixedArrayMapConstant();
const kCOWMap: Map = FixedCOWArrayMapConstant();
const kEmptyByteArray: ByteArray = EmptyByteArrayConstant();
const kEmptyFixedArray: EmptyFixedArray = EmptyFixedArrayConstant();
const kPromiseCapabilityMap: Map = PromiseCapabilityMapConstant();
// The map of a non-internalized internal SeqOneByteString.
const kOneByteStringMap: Map = OneByteStringMapConstant();
// The map of a non-internalized internal SeqTwoByteString.
const kStringMap: Map = StringMapConstant();
extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
bool;
......@@ -1281,8 +1289,6 @@ transitioning macro GetMethod(implicit context: Context)(
extern macro NumberToString(Number): String;
extern macro IsOneByteStringInstanceType(InstanceType): bool;
extern macro AllocateSeqOneByteString(uint32): String;
extern macro AllocateSeqTwoByteString(uint32): String;
// After converting an index to an integer, calculate a relative index:
// return index < 0 ? max(length + index, 0) : min(index, length)
......
......@@ -56,6 +56,9 @@ FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
FromConstexpr<uint32, constexpr int31>(i: constexpr int31): uint32 {
return Unsigned(Int32Constant(i));
}
FromConstexpr<uint32, constexpr uint32>(i: constexpr uint32): uint32 {
return Unsigned(%FromConstexpr<int32>(i));
}
FromConstexpr<uintptr, constexpr int31>(i: constexpr int31): uintptr {
return ChangeUint32ToWord(i);
}
......@@ -131,6 +134,9 @@ Convert<uintptr, uint32>(ui: uint32): uintptr {
Convert<intptr, uint16>(ui: uint16): intptr {
return Signed(ChangeUint32ToWord(ui));
}
Convert<intptr, uint8>(ui: uint8): intptr {
return Signed(ChangeUint32ToWord(ui));
}
Convert<int32, uint8>(i: uint8): int32 {
return Signed(Convert<uint32>(i));
}
......
......@@ -101,43 +101,62 @@ namespace torque_internal {
unsafeMarker: Unsafe;
}
} // namespace torque_internal
macro AddIndexedFieldSizeToObjectSize(
baseSize: intptr, arrayLength: intptr,
fieldSize: constexpr int32): intptr {
const arrayLength = Convert<int32>(arrayLength);
const byteLength = TryInt32Mul(arrayLength, fieldSize)
otherwise unreachable;
return TryIntPtrAdd(baseSize, Convert<intptr>(byteLength))
otherwise unreachable;
}
// These intrinsics should never be called from Torque code. They're used
// internally by the 'new' operator and only declared here because it's simpler
// than building the definition from C++.
intrinsic %GetAllocationBaseSize<Class : type extends HeapObject>(map: Map):
intptr;
intrinsic %Allocate<Class : type extends HeapObject>(size: intptr): Class;
intrinsic %GetStructMap(instanceKind: constexpr InstanceType): Map;
intrinsic %AddIndexedFieldSizeToObjectSize<T: type>(
baseSize: intptr, indexSize: T, fieldSize: int32): intptr {
const convertedIndexSize = Convert<int32>(indexSize);
const variableSize: int32 =
TryInt32Mul(convertedIndexSize, fieldSize) otherwise unreachable;
const convertedVariableSize = Convert<intptr>(variableSize);
return TryIntPtrAdd(baseSize, convertedVariableSize) otherwise unreachable;
}
intrinsic
%InitializeFieldsFromIterator<Container: type, Index: type, Iterator: type>(
c: Container, length: Index, i: Iterator) {
try {
let mutableIterator = i;
let current: Index = 0;
while (current < length) {
// TODO(danno): The indexed accessor on the container requires that the
// '[]=' operator be defined explicitly for the Container
// (e.g. FixedArray). We should change this to use slice references
// once they are implemented.
c[current++] = mutableIterator.Next() otherwise NoMore;
}
macro AlignTagged(x: intptr): intptr {
// Round up to a multiple of kTaggedSize.
return (x + kObjectAlignmentMask) & ~kObjectAlignmentMask;
}
macro IsTaggedAligned(x: intptr): bool {
return (x & kObjectAlignmentMask) == 0;
}
label NoMore deferred {
unreachable;
macro ValidAllocationSize(sizeInBytes: intptr, map: Map): bool {
if (sizeInBytes <= 0) return false;
if (!IsTaggedAligned(sizeInBytes)) return false;
const instanceSizeInWords = Convert<intptr>(map.instance_size_in_words);
return instanceSizeInWords == kVariableSizeSentinel ||
instanceSizeInWords * kTaggedSize == sizeInBytes;
}
}
type UninitializedHeapObject extends HeapObject;
extern macro AllocateAllowLOS(intptr): UninitializedHeapObject;
extern macro GetStructMap(constexpr InstanceType): Map;
macro Allocate(sizeInBytes: intptr, map: Map): UninitializedHeapObject {
assert(ValidAllocationSize(sizeInBytes, map));
return AllocateAllowLOS(sizeInBytes);
}
macro InitializeFieldsFromIterator<T: type, Iterator: type>(
target: Slice<T>, originIterator: Iterator) {
let targetIterator = target.Iterator();
let originIterator = originIterator;
while (true) {
const ref:&T = targetIterator.Next() otherwise break;
* ref = originIterator.Next() otherwise unreachable;
}
}
// Dummy implementations: do not initialize for UninitializedIterator.
InitializeFieldsFromIterator<char8, UninitializedIterator>(
_target: Slice<char8>, _originIterator: UninitializedIterator) {}
InitializeFieldsFromIterator<char16, UninitializedIterator>(
_target: Slice<char16>, _originIterator: UninitializedIterator) {}
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
// For safety reasons, this is only allowed for untagged types.
struct UninitializedIterator {}
// %RawDownCast should *never* be used anywhere in Torque code except for
// in Torque-based UnsafeCast operators preceeded by an appropriate
......
......@@ -3349,60 +3349,6 @@ TNode<BoolT> CodeStubAssembler::IsZeroOrContext(SloppyTNode<Object> object) {
[=] { return IsContext(CAST(object)); });
}
TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
TNode<Uint32T> length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero);
TNode<IntPtrT> raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
TNode<IntPtrT> size =
WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
BIND(&if_sizeissmall);
{
// Just allocate the SeqOneByteString in new space.
TNode<HeapObject> result =
AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kOneByteStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
length, MachineRepresentation::kWord32);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
Int32Constant(String::kEmptyHashField),
MachineRepresentation::kWord32);
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
TNode<Object> result =
CallRuntime(Runtime::kAllocateSeqOneByteString, NoContextConstant(),
ChangeUint32ToTagged(length));
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_lengthiszero);
{
var_result.Bind(EmptyStringConstant());
Goto(&if_join);
}
BIND(&if_join);
return CAST(var_result.value());
}
TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
uint32_t length, AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
......@@ -3421,60 +3367,6 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
return CAST(result);
}
TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
TNode<Uint32T> length, AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero);
TNode<IntPtrT> raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
TNode<IntPtrT> size =
WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
BIND(&if_sizeissmall);
{
// Just allocate the SeqTwoByteString in new space.
TNode<HeapObject> result =
AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
length, MachineRepresentation::kWord32);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
Int32Constant(String::kEmptyHashField),
MachineRepresentation::kWord32);
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
TNode<Object> result =
CallRuntime(Runtime::kAllocateSeqTwoByteString, NoContextConstant(),
ChangeUint32ToTagged(length));
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_lengthiszero);
{
var_result.Bind(EmptyStringConstant());
Goto(&if_join);
}
BIND(&if_join);
return CAST(var_result.value());
}
TNode<String> CodeStubAssembler::AllocateSlicedString(RootIndex map_root_index,
TNode<Uint32T> length,
TNode<String> parent,
......
......@@ -115,6 +115,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(number_to_string, number_to_string, NumberToString) \
V(Object_string, Object_string, ObjectString) \
V(object_to_string, object_to_string, ObjectToString) \
V(OneByteStringMap, one_byte_string_map, OneByteStringMap) \
V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \
V(PreparseDataMap, preparse_data_map, PreparseDataMap) \
......@@ -137,6 +138,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
V(string_string, string_string, StringString) \
V(string_to_string, string_to_string, StringToString) \
V(StringMap, string_map, StringMap) \
V(SymbolMap, symbol_map, SymbolMap) \
V(TheHoleValue, the_hole_value, TheHole) \
V(then_string, then_string, ThenString) \
......@@ -803,6 +805,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<HeapObject> AllocateAllowLOS(TNode<IntPtrT> size) {
return Allocate(size, AllocationFlag::kAllowLargeObjectAllocation);
}
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
......@@ -1756,13 +1762,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Allocate a SeqOneByteString with the given length.
TNode<String> AllocateSeqOneByteString(uint32_t length,
AllocationFlags flags = kNone);
TNode<String> AllocateSeqOneByteString(TNode<Uint32T> length,
AllocationFlags flags = kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqOneByteString;
// Allocate a SeqTwoByteString with the given length.
TNode<String> AllocateSeqTwoByteString(uint32_t length,
AllocationFlags flags = kNone);
TNode<String> AllocateSeqTwoByteString(TNode<Uint32T> length,
AllocationFlags flags = kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqTwoByteString;
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
......
......@@ -38,11 +38,7 @@ macro NewJSObject(implicit context: Context)(): JSObject {
const objectFunction: JSFunction = GetObjectFunction();
const map: Map = Cast<Map>(objectFunction.prototype_or_initial_map)
otherwise unreachable;
return new JSObject{
map,
properties_or_hash: kEmptyFixedArray,
elements: kEmptyFixedArray
};
return AllocateJSObjectFromMap(map);
}
@abstract
......
......@@ -19,3 +19,6 @@ extern class Symbol extends Name {
type PublicSymbol extends Symbol;
type PrivateSymbol extends Symbol;
const kNameEmptyHashField:
constexpr uint32 generates 'Name::kEmptyHashField';
......@@ -56,3 +56,27 @@ extern class ThinString extends String {
// A direct string can be accessed directly through CSA without going into the
// C++ runtime. See also: ToDirectStringAssembler.
type DirectString extends String;
@export
macro AllocateSeqOneByteString(length: uint32): String {
assert(length <= kStringMaxLength);
if (length == 0) return kEmptyString;
return new SeqOneByteString{
map: kOneByteStringMap,
hash_field: kNameEmptyHashField,
length: Signed(length),
chars: ...UninitializedIterator {}
};
}
@export
macro AllocateSeqTwoByteString(length: uint32): String {
assert(length <= kStringMaxLength);
if (length == 0) return kEmptyString;
return new SeqTwoByteString{
map: kStringMap,
hash_field: kNameEmptyHashField,
length: Signed(length),
chars: ...UninitializedIterator {}
};
}
......@@ -35,6 +35,8 @@ static const char* const SMI_TYPE_STRING = "Smi";
static const char* const TAGGED_TYPE_STRING = "Tagged";
static const char* const STRONG_TAGGED_TYPE_STRING = "StrongTagged";
static const char* const UNINITIALIZED_TYPE_STRING = "Uninitialized";
static const char* const UNINITIALIZED_HEAP_OBJECT_TYPE_STRING =
"UninitializedHeapObject";
static const char* const RAWPTR_TYPE_STRING = "RawPtr";
static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
static const char* const STRING_TYPE_STRING = "String";
......@@ -56,6 +58,8 @@ static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal";
static const char* const REFERENCE_TYPE_STRING = "Reference";
static const char* const SLICE_TYPE_STRING = "Slice";
static const char* const WEAK_TYPE_STRING = "Weak";
static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
"UninitializedIterator";
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
"_generic_type_instantiation_namespace";
......
......@@ -198,13 +198,18 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
if (parameter_types.size() != 1) {
ReportError("%RawDownCast must take a single parameter");
}
if (!return_type->IsSubtypeOf(parameter_types[0])) {
const Type* original_type = parameter_types[0];
bool is_subtype =
return_type->IsSubtypeOf(original_type) ||
(original_type == TypeOracle::GetUninitializedHeapObjectType() &&
return_type->IsSubtypeOf(TypeOracle::GetHeapObjectType()));
if (!is_subtype) {
ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
*parameter_types[0]);
*original_type);
}
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
if (return_type->GetGeneratedTNodeTypeName() !=
parameter_types[0]->GetGeneratedTNodeTypeName()) {
original_type->GetGeneratedTNodeTypeName()) {
out_ << "TORQUE_CAST";
}
}
......@@ -238,29 +243,6 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
s << "%FromConstexpr does not support return type " << *return_type;
ReportError(s.str());
}
} else if (instruction.intrinsic->ExternalName() ==
"%GetAllocationBaseSize") {
if (instruction.specialization_types.size() != 1) {
ReportError(
"incorrect number of type parameters for "
"%GetAllocationBaseSize (should be one)");
}
const ClassType* class_type =
ClassType::cast(instruction.specialization_types[0]);
// Special case classes that may not always have a fixed size (e.g.
// JSObjects). Their size must be fetched from the map.
if (class_type != TypeOracle::GetJSObjectType()) {
out_ << "CodeStubAssembler(state_).IntPtrConstant((";
args[0] = std::to_string(class_type->size());
} else {
out_ << "CodeStubAssembler(state_).TimesTaggedSize(CodeStubAssembler("
"state_).LoadMapInstanceSizeInWords(";
}
} else if (instruction.intrinsic->ExternalName() == "%Allocate") {
out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_).Allocate";
} else if (instruction.intrinsic->ExternalName() == "%GetStructMap") {
out_ << "CodeStubAssembler(state_).GetStructMap";
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
......@@ -268,22 +250,11 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
out_ << "(";
PrintCommaSeparatedList(out_, args);
if (instruction.intrinsic->ExternalName() == "%Allocate") out_ << ")";
if (instruction.intrinsic->ExternalName() == "%GetAllocationBaseSize")
out_ << "))";
if (return_type->IsStructType()) {
out_ << ").Flatten();\n";
} else {
out_ << ");\n";
}
if (instruction.intrinsic->ExternalName() == "%Allocate") {
out_ << " CodeStubAssembler(state_).InitializeFieldsWithRoot("
<< results[0] << ", ";
out_ << "CodeStubAssembler(state_).IntPtrConstant("
<< std::to_string(ClassType::cast(return_type)->size()) << "), ";
PrintCommaSeparatedList(out_, args);
out_ << ", RootIndex::kUndefinedValue);\n";
}
}
void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
......
......@@ -117,8 +117,12 @@ void DeclarationVisitor::Visit(ExternalBuiltinDeclaration* decl) {
void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
Signature signature = TypeVisitor::MakeSignature(decl);
if (signature.parameter_types.types.size() == 0 ||
!(signature.parameter_types.types[0] == TypeOracle::GetContextType())) {
if (signature.parameter_types.types.size() == 0) {
ReportError(
"Missing parameters for runtime function, at least the context "
"parameter is required.");
}
if (!(signature.parameter_types.types[0] == TypeOracle::GetContextType())) {
ReportError(
"first parameter to runtime functions has to be the context and have "
"type Context, but found type ",
......
......@@ -46,9 +46,9 @@ void CheckAlreadyDeclared(const std::string& name, const char* new_type) {
} // namespace
std::vector<Declarable*> Declarations::LookupGlobalScope(
const std::string& name) {
const QualifiedName& name) {
std::vector<Declarable*> d =
GlobalContext::GetDefaultNamespace()->Lookup(QualifiedName(name));
GlobalContext::GetDefaultNamespace()->Lookup(name);
if (d.empty()) {
std::stringstream s;
s << "cannot find \"" << name << "\" in global scope";
......@@ -76,7 +76,7 @@ const Type* Declarations::LookupType(const Identifier* name) {
return alias->type();
}
const Type* Declarations::LookupGlobalType(const std::string& name) {
const Type* Declarations::LookupGlobalType(const QualifiedName& name) {
TypeAlias* declaration = EnsureUnique(
FilterDeclarables<TypeAlias>(LookupGlobalScope(name)), name, "type");
return declaration->type();
......@@ -139,8 +139,9 @@ GenericType* Declarations::LookupUniqueGenericType(const QualifiedName& name) {
GenericType* Declarations::LookupGlobalUniqueGenericType(
const std::string& name) {
return EnsureUnique(FilterDeclarables<GenericType>(LookupGlobalScope(name)),
name, "generic type");
return EnsureUnique(
FilterDeclarables<GenericType>(LookupGlobalScope(QualifiedName(name))),
name, "generic type");
}
base::Optional<GenericType*> Declarations::TryLookupGenericType(
......
......@@ -56,12 +56,12 @@ class Declarations {
return d;
}
static std::vector<Declarable*> LookupGlobalScope(const std::string& name);
static std::vector<Declarable*> LookupGlobalScope(const QualifiedName& name);
static const TypeAlias* LookupTypeAlias(const QualifiedName& name);
static const Type* LookupType(const QualifiedName& name);
static const Type* LookupType(const Identifier* identifier);
static const Type* LookupGlobalType(const std::string& name);
static const Type* LookupGlobalType(const QualifiedName& name);
static Builtin* FindSomeInternalBuiltinWithType(
const BuiltinPointerType* type);
......
This diff is collapsed.
......@@ -170,6 +170,7 @@ class LocationReference {
struct InitializerResults {
std::vector<Identifier*> names;
std::map<std::string, VisitResult> field_value_map;
std::map<std::string, VisitResult> array_lengths;
};
template <class T>
......@@ -369,13 +370,19 @@ class ImplementationVisitor {
InitializerResults VisitInitializerResults(
const ClassType* class_type,
const std::vector<NameAndExpression>& expressions);
void InitializeFieldFromSpread(VisitResult object, const Field& field,
LocationReference GenerateFieldReference(VisitResult object,
const NameAndType& field,
const ClassType* class_type);
VisitResult GenerateArrayLength(VisitResult object, const Field& field);
VisitResult GenerateArrayLength(const ClassType* class_type,
const InitializerResults& initializer_results,
const Field& field);
VisitResult GenerateObjectSize(const ClassType* class_type,
const InitializerResults& initializer_results);
VisitResult AddVariableObjectSize(
VisitResult object_size, const ClassType* current_class,
const InitializerResults& initializer_results);
void InitializeFieldFromSpread(VisitResult object, const Field& field,
const InitializerResults& initializer_results,
const ClassType* class_type);
void InitializeClass(const ClassType* class_type, VisitResult allocate_result,
const InitializerResults& initializer_results);
......
......@@ -292,7 +292,9 @@ void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
void CreateFieldReferenceInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
ExpectSubtype(stack->Top(), type);
if (stack->Top() != TypeOracle::GetUninitializedHeapObjectType()) {
ExpectSubtype(stack->Top(), type);
}
stack->Push(TypeOracle::GetIntPtrType());
}
......
......@@ -206,9 +206,12 @@ struct NamespaceConstantInstruction : InstructionBase {
struct CreateFieldReferenceInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CreateFieldReferenceInstruction(const Type* type, std::string field_name)
: type(type), field_name(std::move(field_name)) {}
const Type* type;
CreateFieldReferenceInstruction(const ClassType* type, std::string field_name)
: type(type), field_name(std::move(field_name)) {
// Trigger errors early.
this->type->LookupField(this->field_name);
}
const ClassType* type;
std::string field_name;
};
......
......@@ -190,6 +190,12 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UNINITIALIZED_TYPE_STRING);
}
static const Type* GetUninitializedHeapObjectType() {
return Get().GetBuiltinType(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
UNINITIALIZED_HEAP_OBJECT_TYPE_STRING));
}
static const Type* GetSmiType() {
return Get().GetBuiltinType(SMI_TYPE_STRING);
}
......@@ -270,6 +276,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(JS_FUNCTION_TYPE_STRING);
}
static const Type* GetUninitializedIteratorType() {
return Get().GetBuiltinType(UNINITIALIZED_ITERATOR_TYPE_STRING);
}
static bool IsImplicitlyConvertableFrom(const Type* to, const Type* from) {
for (GenericCallable* from_constexpr :
Declarations::LookupGeneric(kFromConstexprMacroName)) {
......@@ -295,9 +305,12 @@ class TypeOracle : public ContextualClass<TypeOracle> {
static Namespace* CreateGenericTypeInstantiationNamespace();
private:
const Type* GetBuiltinType(const std::string& name) {
const Type* GetBuiltinType(const QualifiedName& name) {
return Declarations::LookupGlobalType(name);
}
const Type* GetBuiltinType(const std::string& name) {
return GetBuiltinType(QualifiedName(name));
}
Deduplicator<BuiltinPointerType> function_pointer_types_;
std::vector<const BuiltinPointerType*> all_builtin_pointer_types_;
......
......@@ -307,7 +307,8 @@ const ClassType* TypeVisitor::ComputeType(
}
const Type* super_type = TypeVisitor::ComputeType(*decl->super);
const ClassType* super_class = ClassType::DynamicCast(super_type);
const Type* struct_type = Declarations::LookupGlobalType("Struct");
const Type* struct_type =
Declarations::LookupGlobalType(QualifiedName("Struct"));
if (!super_class || super_class != struct_type) {
ReportError("Intern class ", decl->name->value,
" must extend class Struct.");
......@@ -385,7 +386,10 @@ Signature TypeVisitor::MakeSignature(const CallableDeclaration* declaration) {
void TypeVisitor::VisitClassFieldsAndMethods(
ClassType* class_type, const ClassDeclaration* class_declaration) {
const ClassType* super_class = class_type->GetSuperClass();
size_t class_offset = super_class ? super_class->size() : 0;
size_t class_offset = super_class ? super_class->header_size() : 0;
size_t header_size = class_offset;
DCHECK_IMPLIES(super_class && !super_class->size(),
class_declaration->fields.empty());
bool seen_indexed_field = false;
for (const ClassFieldExpression& field_expression :
class_declaration->fields) {
......@@ -467,9 +471,18 @@ void TypeVisitor::VisitClassFieldsAndMethods(
}
if (!field_expression.index) {
class_offset += field_size;
// In-object properties are not considered part of the header.
if (!class_type->IsShape()) {
header_size = class_offset;
}
}
}
class_type->SetSize(class_offset);
DCHECK_GT(header_size, 0);
class_type->header_size_ = header_size;
if ((!super_class || super_class->size()) && !seen_indexed_field) {
DCHECK_GE(class_offset, header_size);
class_type->size_ = class_offset;
}
class_type->GenerateAccessors();
DeclareMethods(class_type, class_declaration->methods);
}
......
......@@ -434,7 +434,6 @@ ClassType::ClassType(const Type* parent, Namespace* nspace,
const std::string& generates, const ClassDeclaration* decl,
const TypeAlias* alias)
: AggregateType(Kind::kClassType, parent, nspace, name),
size_(0),
flags_(flags & ~(kInternalFlags)),
generates_(generates),
decl_(decl),
......
......@@ -608,12 +608,18 @@ class ClassType final : public AggregateType {
bool IsShape() const { return flags_ & ClassFlag::kIsShape; }
bool HasStaticSize() const;
bool HasIndexedField() const override;
size_t size() const { return size_; }
size_t header_size() const {
if (!is_finalized_) Finalize();
return header_size_;
}
base::Optional<size_t> size() const {
if (!is_finalized_) Finalize();
return size_;
}
const ClassType* GetSuperClass() const {
if (parent() == nullptr) return nullptr;
return parent()->IsClassType() ? ClassType::DynamicCast(parent()) : nullptr;
}
void SetSize(size_t size) { size_ = size; }
void GenerateAccessors();
bool AllowInstantiation() const;
const Field& RegisterField(Field field) override {
......@@ -647,7 +653,8 @@ class ClassType final : public AggregateType {
ClassFlags flags, const std::string& generates,
const ClassDeclaration* decl, const TypeAlias* alias);
size_t size_;
size_t header_size_;
base::Optional<size_t> size_;
mutable ClassFlags flags_;
const std::string generates_;
const ClassDeclaration* decl_;
......
......@@ -25,6 +25,7 @@ namespace torque_internal {
const object: HeapObject;
const offset: intptr;
}
type UninitializedHeapObject extends HeapObject;
}
type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment