Commit 59e8d45a authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[torque] multiple arrays in one object

This allows the definition of classes with several arrays and ports
SmallOrderedHashTable subclasses to Torque as an example, including
the existing CSA allocation functions for them.

Overview of changes:
- Introduce ResidueClass to encapsulate the modulo-arithmetic
  necessary to do alignment checks.
- Add MachineOperatorReducer to the CSA pipeline to address now
  missing CSA ad-hoc constant folding that got blocked by a
  temporary phi.
- Allow assignments to references to structs. This is needed to
  initialize the data_table part of SmallOrderedHashMap.
- Make the NumberLiteralExpression AST-node store a double instead
  of a string. This is necessary to detect arrays with constant size
  used for padding.
- Turn offsets into base::Optional<size_t> to ensure we don't use
  an invalid or statically unknown offset.
- Remove CreateFieldReferenceInstruction since it doesn't work for
  complex offset computations and the logic can be expressed better
  in ImplementationVisitor.
- Validate alignment of structs embedded in classes.

Bug: v8:10004 v8:7793
Change-Id: Ifa414b42278e572a0c577bf9da3d37f80771a258
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1958011
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65538}
parent d2528de4
...@@ -88,6 +88,15 @@ constexpr bool all(Args... rest) { ...@@ -88,6 +88,15 @@ constexpr bool all(Args... rest) {
return fold(std::logical_and<>{}, true, rest...); return fold(std::logical_and<>{}, true, rest...);
} }
template <class... Ts>
struct make_void {
using type = void;
};
// Corresponds to C++17's std::void_t.
// Used for SFINAE based on type errors.
template <class... Ts>
using void_t = typename make_void<Ts...>::type;
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
......
...@@ -1531,3 +1531,17 @@ macro ReplaceTheHoleWithUndefined(o: JSAny|TheHole): JSAny { ...@@ -1531,3 +1531,17 @@ macro ReplaceTheHoleWithUndefined(o: JSAny|TheHole): JSAny {
} }
extern macro DecodeScopeInfoHasContextExtension(intptr): intptr; extern macro DecodeScopeInfoHasContextExtension(intptr): intptr;
struct ConstantIterator<T: type> {
macro Empty(): bool {
return false;
}
macro Next(): T labels _NoMore {
return this.value;
}
value: T;
}
macro ConstantIterator<T: type>(value: T): ConstantIterator<T> {
return ConstantIterator{value};
}
...@@ -47,6 +47,11 @@ FromConstexpr<Number, constexpr float64>(f: constexpr float64): Number { ...@@ -47,6 +47,11 @@ FromConstexpr<Number, constexpr float64>(f: constexpr float64): Number {
FromConstexpr<Number, constexpr int31>(i: constexpr int31): Number { FromConstexpr<Number, constexpr int31>(i: constexpr int31): Number {
return %FromConstexpr<Number>(i); return %FromConstexpr<Number>(i);
} }
FromConstexpr<uint8, constexpr int31>(i: constexpr int31): uint8 {
const i: uint32 = i;
StaticAssert(i <= 255);
return %RawDownCast<uint8>(i);
}
FromConstexpr<Number, constexpr Smi>(s: constexpr Smi): Number { FromConstexpr<Number, constexpr Smi>(s: constexpr Smi): Number {
return SmiConstant(s); return SmiConstant(s);
} }
...@@ -121,6 +126,9 @@ Convert<intptr, uint16>(ui: uint16): intptr { ...@@ -121,6 +126,9 @@ Convert<intptr, uint16>(ui: uint16): intptr {
Convert<intptr, uint8>(ui: uint8): intptr { Convert<intptr, uint8>(ui: uint8): intptr {
return Signed(ChangeUint32ToWord(ui)); return Signed(ChangeUint32ToWord(ui));
} }
Convert<uint8, intptr>(i: intptr): uint8 {
return %RawDownCast<uint8>(Unsigned(TruncateIntPtrToInt32(i)) & 0xFF);
}
Convert<int32, uint8>(i: uint8): int32 { Convert<int32, uint8>(i: uint8): int32 {
return Signed(Convert<uint32>(i)); return Signed(Convert<uint32>(i));
} }
......
...@@ -3545,100 +3545,6 @@ CodeStubAssembler::AllocateOrderedHashTable<OrderedHashMap>(); ...@@ -3545,100 +3545,6 @@ CodeStubAssembler::AllocateOrderedHashTable<OrderedHashMap>();
template TNode<OrderedHashSet> template TNode<OrderedHashSet>
CodeStubAssembler::AllocateOrderedHashTable<OrderedHashSet>(); CodeStubAssembler::AllocateOrderedHashTable<OrderedHashSet>();
template <typename CollectionType>
TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
TNode<IntPtrT> capacity) {
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
CSA_ASSERT(this, IntPtrLessThan(
capacity, IntPtrConstant(CollectionType::kMaxCapacity)));
TNode<IntPtrT> data_table_start_offset =
IntPtrConstant(CollectionType::DataTableStartOffset());
TNode<IntPtrT> data_table_size = IntPtrMul(
capacity, IntPtrConstant(CollectionType::kEntrySize * kTaggedSize));
TNode<Int32T> hash_table_size =
Int32Div(TruncateIntPtrToInt32(capacity),
Int32Constant(CollectionType::kLoadFactor));
TNode<IntPtrT> hash_table_start_offset =
IntPtrAdd(data_table_start_offset, data_table_size);
TNode<IntPtrT> hash_table_and_chain_table_size =
IntPtrAdd(ChangeInt32ToIntPtr(hash_table_size), capacity);
TNode<IntPtrT> total_size =
IntPtrAdd(hash_table_start_offset, hash_table_and_chain_table_size);
TNode<IntPtrT> total_size_word_aligned =
IntPtrAdd(total_size, IntPtrConstant(kTaggedSize - 1));
total_size_word_aligned = ChangeInt32ToIntPtr(
Int32Div(TruncateIntPtrToInt32(total_size_word_aligned),
Int32Constant(kTaggedSize)));
total_size_word_aligned =
UncheckedCast<IntPtrT>(TimesTaggedSize(total_size_word_aligned));
// Allocate the table and add the proper map.
TNode<Map> small_ordered_hash_map =
CAST(LoadRoot(CollectionType::GetMapRootIndex()));
TNode<HeapObject> table_obj = AllocateInNewSpace(total_size_word_aligned);
StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
{
// This store overlaps with the header fields stored below.
// Since it happens first, it effectively still just zero-initializes the
// padding.
constexpr int offset =
RoundDown<kTaggedSize>(CollectionType::PaddingOffset());
STATIC_ASSERT(offset + kTaggedSize == CollectionType::PaddingOffset() +
CollectionType::PaddingSize());
StoreObjectFieldNoWriteBarrier(table, offset, SmiConstant(0));
}
// Initialize the SmallOrderedHashTable fields.
StoreObjectByteNoWriteBarrier(
table, CollectionType::NumberOfBucketsOffset(),
Word32And(Int32Constant(0xFF), hash_table_size));
StoreObjectByteNoWriteBarrier(table, CollectionType::NumberOfElementsOffset(),
Int32Constant(0));
StoreObjectByteNoWriteBarrier(
table, CollectionType::NumberOfDeletedElementsOffset(), Int32Constant(0));
TNode<IntPtrT> table_address =
IntPtrSub(BitcastTaggedToWord(table), IntPtrConstant(kHeapObjectTag));
TNode<IntPtrT> hash_table_start_address =
IntPtrAdd(table_address, hash_table_start_offset);
// Initialize the HashTable part.
TNode<ExternalReference> memset =
ExternalConstant(ExternalReference::libc_memset_function());
CallCFunction(
memset, MachineType::AnyTagged(),
std::make_pair(MachineType::Pointer(), hash_table_start_address),
std::make_pair(MachineType::IntPtr(), IntPtrConstant(0xFF)),
std::make_pair(MachineType::UintPtr(), hash_table_and_chain_table_size));
// Initialize the DataTable part.
TNode<Oddball> filler = TheHoleConstant();
TNode<IntPtrT> data_table_start_address =
IntPtrAdd(table_address, data_table_start_offset);
TNode<IntPtrT> data_table_end_address =
IntPtrAdd(data_table_start_address, data_table_size);
StoreFieldsNoWriteBarrier(data_table_start_address, data_table_end_address,
filler);
return table;
}
template V8_EXPORT_PRIVATE TNode<SmallOrderedHashMap>
CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
TNode<IntPtrT> capacity);
template V8_EXPORT_PRIVATE TNode<SmallOrderedHashSet>
CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
TNode<IntPtrT> capacity);
TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap( TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
TNode<Map> map, base::Optional<TNode<HeapObject>> properties, TNode<Map> map, base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements, AllocationFlags flags, base::Optional<TNode<FixedArray>> elements, AllocationFlags flags,
......
...@@ -133,6 +133,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; ...@@ -133,6 +133,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \ V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \
SloppyArgumentsElementsMap) \ SloppyArgumentsElementsMap) \
V(SmallOrderedHashSetMap, small_ordered_hash_set_map, \
SmallOrderedHashSetMap) \
V(SmallOrderedHashMapMap, small_ordered_hash_map_map, \
SmallOrderedHashMapMap) \
V(SmallOrderedNameDictionaryMap, small_ordered_name_dictionary_map, \
SmallOrderedNameDictionaryMap) \
V(species_symbol, species_symbol, SpeciesSymbol) \ V(species_symbol, species_symbol, SpeciesSymbol) \
V(StaleRegister, stale_register, StaleRegister) \ V(StaleRegister, stale_register, StaleRegister) \
V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
...@@ -1792,9 +1798,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -1792,9 +1798,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename CollectionType> template <typename CollectionType>
TNode<CollectionType> AllocateOrderedHashTable(); TNode<CollectionType> AllocateOrderedHashTable();
template <typename CollectionType>
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
TNode<JSObject> AllocateJSObjectFromMap( TNode<JSObject> AllocateJSObjectFromMap(
TNode<Map> map, TNode<Map> map,
base::Optional<TNode<HeapObject>> properties = base::nullopt, base::Optional<TNode<HeapObject>> properties = base::nullopt,
......
...@@ -150,6 +150,12 @@ template <class Type, class Enable = void> ...@@ -150,6 +150,12 @@ template <class Type, class Enable = void>
struct MachineRepresentationOf { struct MachineRepresentationOf {
static const MachineRepresentation value = Type::kMachineRepresentation; static const MachineRepresentation value = Type::kMachineRepresentation;
}; };
// If T defines kMachineType, then we take the machine representation from
// there.
template <class T>
struct MachineRepresentationOf<T, base::void_t<decltype(T::kMachineType)>> {
static const MachineRepresentation value = T::kMachineType.representation();
};
template <class T> template <class T>
struct MachineRepresentationOf< struct MachineRepresentationOf<
T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> { T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
......
...@@ -1891,6 +1891,7 @@ struct CsaEarlyOptimizationPhase { ...@@ -1891,6 +1891,7 @@ struct CsaEarlyOptimizationPhase {
GraphReducer graph_reducer(temp_zone, data->graph(), GraphReducer graph_reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), &data->info()->tick_counter(),
data->jsgraph()->Dead()); data->jsgraph()->Dead());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
BranchElimination branch_condition_elimination(&graph_reducer, BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone); data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
...@@ -1901,6 +1902,7 @@ struct CsaEarlyOptimizationPhase { ...@@ -1901,6 +1902,7 @@ struct CsaEarlyOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(), CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone); temp_zone);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &branch_condition_elimination); AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer); AddReducer(data, &graph_reducer, &common_reducer);
......
...@@ -230,16 +230,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) { ...@@ -230,16 +230,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
break; break;
case FILLER_TYPE: case FILLER_TYPE:
break; break;
case SMALL_ORDERED_HASH_SET_TYPE:
SmallOrderedHashSet::cast(*this).SmallOrderedHashSetVerify(isolate);
break;
case SMALL_ORDERED_HASH_MAP_TYPE:
SmallOrderedHashMap::cast(*this).SmallOrderedHashMapVerify(isolate);
break;
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
SmallOrderedNameDictionary::cast(*this).SmallOrderedNameDictionaryVerify(
isolate);
break;
case CODE_DATA_CONTAINER_TYPE: case CODE_DATA_CONTAINER_TYPE:
CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate); CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
break; break;
......
...@@ -244,9 +244,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT ...@@ -244,9 +244,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case THIN_ONE_BYTE_STRING_TYPE: case THIN_ONE_BYTE_STRING_TYPE:
case UNCACHED_EXTERNAL_STRING_TYPE: case UNCACHED_EXTERNAL_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE: case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
// TODO(all): Handle these types too. // TODO(all): Handle these types too.
os << "UNKNOWN TYPE " << map().instance_type(); os << "UNKNOWN TYPE " << map().instance_type();
UNREACHABLE(); UNREACHABLE();
...@@ -1339,6 +1336,22 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) { ...@@ -1339,6 +1336,22 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
} }
} }
void SmallOrderedHashSet::SmallOrderedHashSetPrint(std::ostream& os) {
PrintHeader(os, "SmallOrderedHashSet");
// TODO(tebbi): Print all fields.
}
void SmallOrderedHashMap::SmallOrderedHashMapPrint(std::ostream& os) {
PrintHeader(os, "SmallOrderedHashMap");
// TODO(tebbi): Print all fields.
}
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryPrint(
std::ostream& os) {
PrintHeader(os, "SmallOrderedNameDictionary");
// TODO(tebbi): Print all fields.
}
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "SharedFunctionInfo"); PrintHeader(os, "SharedFunctionInfo");
os << "\n - name: "; os << "\n - name: ";
......
...@@ -2,9 +2,112 @@ ...@@ -2,9 +2,112 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include 'src/objects/ordered-hash-table.h'
// Using int as a dummy type-parameter to get access to these constants which
// don't actually depend on the derived class. This avoids accidentially
// depending on something from a concrete derived class.
const kSmallOrderedHashTableMaxCapacity: constexpr int31
generates 'SmallOrderedHashTable<int>::kMaxCapacity';
const kSmallOrderedHashTableNotFound: constexpr int31
generates 'SmallOrderedHashTable<int>::kNotFound';
const kSmallOrderedHashTableLoadFactor: constexpr int31
generates 'SmallOrderedHashTable<int>::kLoadFactor';
@noVerifier
@abstract @abstract
extern class SmallOrderedHashTable extends HeapObject extern class SmallOrderedHashTable extends HeapObject
generates 'TNode<HeapObject>'; generates 'TNode<HeapObject>' {
extern class SmallOrderedHashMap extends SmallOrderedHashTable; }
extern class SmallOrderedHashSet extends SmallOrderedHashTable;
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable; extern macro SmallOrderedHashSetMapConstant(): Map;
const kSmallOrderedHashSetMap: Map = SmallOrderedHashSetMapConstant();
@noVerifier
extern class SmallOrderedHashSet extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
number_of_buckets: uint8;
@if(TAGGED_SIZE_8_BYTES) padding[5]: uint8;
@ifnot(TAGGED_SIZE_8_BYTES) padding[1]: uint8;
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
JSAny|TheHole;
hash_table[number_of_buckets]: uint8;
chain_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
uint8;
}
@export
macro AllocateSmallOrderedHashSet(capacity: intptr): SmallOrderedHashSet {
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
assert(
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
return new SmallOrderedHashSet{
map: kSmallOrderedHashSetMap,
number_of_elements: 0,
number_of_deleted_elements: 0,
number_of_buckets: (Convert<uint8>(hashTableSize)),
padding: ...ConstantIterator<uint8>(0),
data_table: ...ConstantIterator(TheHole),
hash_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound),
chain_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound)
};
}
struct HashMapEntry {
key: JSAny|TheHole;
value: JSAny|TheHole;
}
extern macro SmallOrderedHashMapMapConstant(): Map;
const kSmallOrderedHashMapMap: Map = SmallOrderedHashMapMapConstant();
@noVerifier
extern class SmallOrderedHashMap extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
number_of_buckets: uint8;
@if(TAGGED_SIZE_8_BYTES) padding[5]: uint8;
@ifnot(TAGGED_SIZE_8_BYTES) padding[1]: uint8;
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
HashMapEntry;
hash_table[number_of_buckets]: uint8;
chain_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
uint8;
}
@export
macro AllocateSmallOrderedHashMap(capacity: intptr): SmallOrderedHashMap {
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
assert(
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
return new SmallOrderedHashMap{
map: kSmallOrderedHashMapMap,
number_of_elements: 0,
number_of_deleted_elements: 0,
number_of_buckets: (Convert<uint8>(hashTableSize)),
padding: ...ConstantIterator<uint8>(0),
data_table: ...ConstantIterator(HashMapEntry{key: TheHole, value: TheHole}),
hash_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound),
chain_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound)
};
}
struct NameDictionaryEntry {
key: JSAny|TheHole;
value: JSAny|TheHole;
property_details: Smi|TheHole;
}
@noVerifier
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable {
hash: int32;
number_of_elements: uint8;
number_of_deleted_elements: uint8;
number_of_buckets: uint8;
padding: uint8;
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
NameDictionaryEntry;
hash_table[number_of_buckets]: uint8;
chain_table[number_of_buckets]: uint8;
}
...@@ -426,14 +426,14 @@ struct StringLiteralExpression : Expression { ...@@ -426,14 +426,14 @@ struct StringLiteralExpression : Expression {
struct NumberLiteralExpression : Expression { struct NumberLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression) DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression)
NumberLiteralExpression(SourcePosition pos, std::string name) NumberLiteralExpression(SourcePosition pos, double number)
: Expression(kKind, pos), number(std::move(name)) {} : Expression(kKind, pos), number(number) {}
void VisitAllSubExpressions(VisitCallback callback) override { void VisitAllSubExpressions(VisitCallback callback) override {
callback(this); callback(this);
} }
std::string number; double number;
}; };
struct ElementAccessExpression : LocationExpression { struct ElementAccessExpression : LocationExpression {
......
...@@ -118,7 +118,7 @@ void GenerateFieldAddressAccessor(const Field& field, ...@@ -118,7 +118,7 @@ void GenerateFieldAddressAccessor(const Field& field,
h_contents << " uintptr_t " << address_getter << "() const;\n"; h_contents << " uintptr_t " << address_getter << "() const;\n";
cc_contents << "\nuintptr_t Tq" << class_name << "::" << address_getter cc_contents << "\nuintptr_t Tq" << class_name << "::" << address_getter
<< "() const {\n"; << "() const {\n";
cc_contents << " return address_ - i::kHeapObjectTag + " << field.offset cc_contents << " return address_ - i::kHeapObjectTag + " << *field.offset
<< ";\n"; << ";\n";
cc_contents << "}\n"; cc_contents << "}\n";
} }
...@@ -262,7 +262,7 @@ void GenerateGetPropsChunkForField(const Field& field, ...@@ -262,7 +262,7 @@ void GenerateGetPropsChunkForField(const Field& field,
<< struct_field_type.GetOriginalType(kAsStoredInHeap) << struct_field_type.GetOriginalType(kAsStoredInHeap)
<< "\", \"" << "\", \""
<< struct_field_type.GetOriginalType(kUncompressed) << struct_field_type.GetOriginalType(kUncompressed)
<< "\", " << struct_field.offset << "));\n"; << "\", " << *struct_field.offset << "));\n";
} }
struct_field_list = "std::move(" + struct_field_list + ")"; struct_field_list = "std::move(" + struct_field_list + ")";
} }
...@@ -396,6 +396,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, ...@@ -396,6 +396,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
for (const Field& field : type.fields()) { for (const Field& field : type.fields()) {
if (field.name_and_type.type == TypeOracle::GetVoidType()) continue; if (field.name_and_type.type == TypeOracle::GetVoidType()) continue;
if (!field.offset.has_value()) {
// Fields with dynamic offset are currently unsupported.
continue;
}
GenerateFieldAddressAccessor(field, name, h_contents, cc_contents); GenerateFieldAddressAccessor(field, name, h_contents, cc_contents);
GenerateFieldValueAccessor(field, name, h_contents, cc_contents); GenerateFieldValueAccessor(field, name, h_contents, cc_contents);
base::Optional<NameAndType> array_length; base::Optional<NameAndType> array_length;
......
...@@ -207,10 +207,13 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction, ...@@ -207,10 +207,13 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ", ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
*original_type); *original_type);
} }
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
if (return_type->GetGeneratedTNodeTypeName() != if (return_type->GetGeneratedTNodeTypeName() !=
original_type->GetGeneratedTNodeTypeName()) { original_type->GetGeneratedTNodeTypeName()) {
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
out_ << "TORQUE_CAST"; out_ << "TORQUE_CAST";
} else {
out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
<< ">";
} }
} }
} else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") { } else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
...@@ -675,26 +678,6 @@ void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction, ...@@ -675,26 +678,6 @@ void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
">(" + stack->Top() + ")"); ">(" + stack->Top() + ")");
} }
void CSAGenerator::EmitInstruction(
const CreateFieldReferenceInstruction& instruction,
Stack<std::string>* stack) {
base::Optional<const ClassType*> class_type =
instruction.type->ClassSupertype();
if (!class_type.has_value()) {
ReportError("Cannot create field reference of type ", instruction.type,
" which does not inherit from a class type");
}
const Field& field = class_type.value()->LookupField(instruction.field_name);
std::string offset_name = FreshNodeName();
stack->Push(offset_name);
out_ << " TNode<IntPtrT> " << offset_name << " = ca_.IntPtrConstant(";
out_ << field.aggregate->name() << "::k"
<< CamelifyString(field.name_and_type.name) << "Offset";
out_ << ");\n"
<< " USE(" << stack->Top() << ");\n";
}
void CSAGenerator::EmitInstruction(const LoadReferenceInstruction& instruction, void CSAGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
Stack<std::string>* stack) { Stack<std::string>* stack) {
std::string result_name = FreshNodeName(); std::string result_name = FreshNodeName();
...@@ -716,7 +699,9 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction, ...@@ -716,7 +699,9 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
std::string offset = stack->Pop(); std::string offset = stack->Pop();
std::string object = stack->Pop(); std::string object = stack->Pop();
out_ << " CodeStubAssembler(state_).StoreReference(CodeStubAssembler::" out_ << " CodeStubAssembler(state_).StoreReference<"
<< instruction.type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler::"
"Reference{" "Reference{"
<< object << ", " << offset << "}, " << value << ");\n"; << object << ", " << offset << "}, " << value << ");\n";
} }
......
...@@ -99,12 +99,13 @@ class TargetArchitecture : public ContextualClass<TargetArchitecture> { ...@@ -99,12 +99,13 @@ class TargetArchitecture : public ContextualClass<TargetArchitecture> {
public: public:
explicit TargetArchitecture(bool force_32bit); explicit TargetArchitecture(bool force_32bit);
static int TaggedSize() { return Get().tagged_size_; } static size_t TaggedSize() { return Get().tagged_size_; }
static int RawPtrSize() { return Get().raw_ptr_size_; } static size_t RawPtrSize() { return Get().raw_ptr_size_; }
static size_t MaxHeapAlignment() { return TaggedSize(); }
private: private:
const int tagged_size_; const size_t tagged_size_;
const int raw_ptr_size_; const size_t raw_ptr_size_;
}; };
} // namespace torque } // namespace torque
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ namespace torque { ...@@ -24,6 +24,7 @@ namespace torque {
template <typename T> template <typename T>
class Binding; class Binding;
struct LocalValue; struct LocalValue;
class ImplementationVisitor;
// LocationReference is the representation of an l-value, so a value that might // LocationReference is the representation of an l-value, so a value that might
// allow for assignment. For uniformity, this class can also represent // allow for assignment. For uniformity, this class can also represent
...@@ -201,7 +202,12 @@ class LocationReference { ...@@ -201,7 +202,12 @@ class LocationReference {
struct InitializerResults { struct InitializerResults {
std::vector<Identifier*> names; std::vector<Identifier*> names;
std::map<std::string, VisitResult> field_value_map; std::map<std::string, VisitResult> field_value_map;
};
struct LayoutForInitialization {
std::map<std::string, VisitResult> array_lengths; std::map<std::string, VisitResult> array_lengths;
std::map<std::string, VisitResult> offsets;
VisitResult size;
}; };
template <class T> template <class T>
...@@ -402,8 +408,11 @@ class ImplementationVisitor { ...@@ -402,8 +408,11 @@ class ImplementationVisitor {
const ClassType* class_type, const ClassType* class_type,
const std::vector<NameAndExpression>& expressions); const std::vector<NameAndExpression>& expressions);
LocationReference GenerateFieldReference(VisitResult object, LocationReference GenerateFieldReference(VisitResult object,
const NameAndType& field, const Field& field,
const ClassType* class_type); const ClassType* class_type);
LocationReference GenerateFieldReference(
VisitResult object, const Field& field,
const LayoutForInitialization& layout);
VisitResult GenerateArrayLength( VisitResult GenerateArrayLength(
Expression* array_length, Namespace* nspace, Expression* array_length, Namespace* nspace,
const std::map<std::string, LocationReference>& bindings); const std::map<std::string, LocationReference>& bindings);
...@@ -411,15 +420,13 @@ class ImplementationVisitor { ...@@ -411,15 +420,13 @@ class ImplementationVisitor {
VisitResult GenerateArrayLength(const ClassType* class_type, VisitResult GenerateArrayLength(const ClassType* class_type,
const InitializerResults& initializer_results, const InitializerResults& initializer_results,
const Field& field); const Field& field);
VisitResult GenerateObjectSize(const ClassType* class_type, LayoutForInitialization GenerateLayoutForInitialization(
const ClassType* class_type,
const InitializerResults& initializer_results); const InitializerResults& initializer_results);
void InitializeFieldFromSpread(VisitResult object, const Field& field,
const InitializerResults& initializer_results,
const ClassType* class_type);
void InitializeClass(const ClassType* class_type, VisitResult allocate_result, void InitializeClass(const ClassType* class_type, VisitResult allocate_result,
const InitializerResults& initializer_results); const InitializerResults& initializer_results,
const LayoutForInitialization& layout);
VisitResult Visit(StructExpression* decl); VisitResult Visit(StructExpression* decl);
...@@ -427,6 +434,9 @@ class ImplementationVisitor { ...@@ -427,6 +434,9 @@ class ImplementationVisitor {
LocationReference GetLocationReference(IdentifierExpression* expr); LocationReference GetLocationReference(IdentifierExpression* expr);
LocationReference GetLocationReference(DereferenceExpression* expr); LocationReference GetLocationReference(DereferenceExpression* expr);
LocationReference GetLocationReference(FieldAccessExpression* expr); LocationReference GetLocationReference(FieldAccessExpression* expr);
LocationReference GenerateFieldAccess(
LocationReference reference, const std::string& fieldname,
base::Optional<SourcePosition> pos = {});
LocationReference GetLocationReference(ElementAccessExpression* expr); LocationReference GetLocationReference(ElementAccessExpression* expr);
VisitResult GenerateFetchFromLocation(const LocationReference& reference); VisitResult GenerateFetchFromLocation(const LocationReference& reference);
......
...@@ -290,14 +290,6 @@ void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack, ...@@ -290,14 +290,6 @@ void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->Poke(stack->AboveTop() - 1, destination_type); stack->Poke(stack->AboveTop() - 1, destination_type);
} }
void CreateFieldReferenceInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
if (stack->Top() != TypeOracle::GetUninitializedHeapObjectType()) {
ExpectSubtype(stack->Top(), type);
}
stack->Push(TypeOracle::GetIntPtrType());
}
void LoadReferenceInstruction::TypeInstruction(Stack<const Type*>* stack, void LoadReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const { ControlFlowGraph* cfg) const {
ExpectType(TypeOracle::GetIntPtrType(), stack->Pop()); ExpectType(TypeOracle::GetIntPtrType(), stack->Pop());
......
...@@ -30,7 +30,6 @@ class RuntimeFunction; ...@@ -30,7 +30,6 @@ class RuntimeFunction;
V(DeleteRangeInstruction) \ V(DeleteRangeInstruction) \
V(PushUninitializedInstruction) \ V(PushUninitializedInstruction) \
V(PushBuiltinPointerInstruction) \ V(PushBuiltinPointerInstruction) \
V(CreateFieldReferenceInstruction) \
V(LoadReferenceInstruction) \ V(LoadReferenceInstruction) \
V(StoreReferenceInstruction) \ V(StoreReferenceInstruction) \
V(LoadBitFieldInstruction) \ V(LoadBitFieldInstruction) \
...@@ -206,17 +205,6 @@ struct NamespaceConstantInstruction : InstructionBase { ...@@ -206,17 +205,6 @@ struct NamespaceConstantInstruction : InstructionBase {
NamespaceConstant* constant; NamespaceConstant* constant;
}; };
struct CreateFieldReferenceInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CreateFieldReferenceInstruction(const ClassType* type, std::string field_name)
: type(type), field_name(std::move(field_name)) {
// Trigger errors early.
this->type->LookupField(this->field_name);
}
const ClassType* type;
std::string field_name;
};
struct LoadReferenceInstruction : InstructionBase { struct LoadReferenceInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE() TORQUE_INSTRUCTION_BOILERPLATE()
explicit LoadReferenceInstruction(const Type* type) : type(type) {} explicit LoadReferenceInstruction(const Type* type) : type(type) {}
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <algorithm> #include <algorithm>
#include <cctype> #include <cctype>
#include <set> #include <set>
#include <stdexcept>
#include <unordered_map> #include <unordered_map>
#include "src/common/globals.h" #include "src/common/globals.h"
...@@ -1618,7 +1619,15 @@ base::Optional<ParseResult> MakeAssignmentExpression( ...@@ -1618,7 +1619,15 @@ base::Optional<ParseResult> MakeAssignmentExpression(
base::Optional<ParseResult> MakeNumberLiteralExpression( base::Optional<ParseResult> MakeNumberLiteralExpression(
ParseResultIterator* child_results) { ParseResultIterator* child_results) {
auto number = child_results->NextAs<std::string>(); auto number = child_results->NextAs<std::string>();
Expression* result = MakeNode<NumberLiteralExpression>(std::move(number)); // TODO(tebbi): Support 64bit literals.
// Meanwhile, we type it as constexpr float64 when out of int32 range.
double value = 0;
try {
value = std::stod(number);
} catch (const std::out_of_range&) {
Error("double literal out-of-range").Throw();
}
Expression* result = MakeNode<NumberLiteralExpression>(value);
return ParseResult{result}; return ParseResult{result};
} }
......
...@@ -204,8 +204,7 @@ const StructType* TypeVisitor::ComputeType( ...@@ -204,8 +204,7 @@ const StructType* TypeVisitor::ComputeType(
CurrentScope::Scope struct_namespace_scope(struct_type->nspace()); CurrentScope::Scope struct_namespace_scope(struct_type->nspace());
CurrentSourcePosition::Scope position_activator(decl->pos); CurrentSourcePosition::Scope position_activator(decl->pos);
size_t offset = 0; ResidueClass offset = 0;
bool packable = true;
for (auto& field : decl->fields) { for (auto& field : decl->fields) {
CurrentSourcePosition::Scope position_activator( CurrentSourcePosition::Scope position_activator(
field.name_and_type.type->pos); field.name_and_type.type->pos);
...@@ -218,20 +217,11 @@ const StructType* TypeVisitor::ComputeType( ...@@ -218,20 +217,11 @@ const StructType* TypeVisitor::ComputeType(
struct_type, struct_type,
base::nullopt, base::nullopt,
{field.name_and_type.name->value, field_type}, {field.name_and_type.name->value, field_type},
offset, offset.SingleValue(),
false, false,
field.const_qualified, field.const_qualified,
false}; false};
auto optional_size = SizeOf(f.name_and_type.type); auto optional_size = SizeOf(f.name_and_type.type);
// Structs may contain fields that aren't representable in packed form. If
// so, then this field and any subsequent fields should have their offsets
// marked as invalid.
if (!optional_size.has_value()) {
packable = false;
}
if (!packable) {
f.offset = Field::kInvalidOffset;
}
struct_type->RegisterField(f); struct_type->RegisterField(f);
// Offsets are assigned based on an assumption of no space between members. // Offsets are assigned based on an assumption of no space between members.
// This might lead to invalid alignment in some cases, but most structs are // This might lead to invalid alignment in some cases, but most structs are
...@@ -243,6 +233,10 @@ const StructType* TypeVisitor::ComputeType( ...@@ -243,6 +233,10 @@ const StructType* TypeVisitor::ComputeType(
size_t field_size = 0; size_t field_size = 0;
std::tie(field_size, std::ignore) = *optional_size; std::tie(field_size, std::ignore) = *optional_size;
offset += field_size; offset += field_size;
} else {
// Structs may contain fields that aren't representable in packed form. If
// so, the offset of subsequent fields are marked as invalid.
offset = ResidueClass::Unknown();
} }
} }
return struct_type; return struct_type;
...@@ -389,11 +383,13 @@ Signature TypeVisitor::MakeSignature(const CallableDeclaration* declaration) { ...@@ -389,11 +383,13 @@ Signature TypeVisitor::MakeSignature(const CallableDeclaration* declaration) {
void TypeVisitor::VisitClassFieldsAndMethods( void TypeVisitor::VisitClassFieldsAndMethods(
ClassType* class_type, const ClassDeclaration* class_declaration) { ClassType* class_type, const ClassDeclaration* class_declaration) {
const ClassType* super_class = class_type->GetSuperClass(); const ClassType* super_class = class_type->GetSuperClass();
size_t class_offset = super_class ? super_class->header_size() : 0; ResidueClass class_offset = 0;
size_t header_size = class_offset; size_t header_size = 0;
DCHECK_IMPLIES(super_class && !super_class->size(), if (super_class) {
class_declaration->fields.empty()); class_offset = super_class->size();
bool seen_indexed_field = false; header_size = super_class->header_size();
}
for (const ClassFieldExpression& field_expression : for (const ClassFieldExpression& field_expression :
class_declaration->fields) { class_declaration->fields) {
CurrentSourcePosition::Scope position_activator( CurrentSourcePosition::Scope position_activator(
...@@ -436,55 +432,42 @@ void TypeVisitor::VisitClassFieldsAndMethods( ...@@ -436,55 +432,42 @@ void TypeVisitor::VisitClassFieldsAndMethods(
} }
} }
} }
base::Optional<Expression*> array_length; base::Optional<Expression*> array_length = field_expression.index;
if (field_expression.index) {
if (seen_indexed_field ||
(super_class && super_class->HasIndexedField())) {
ReportError(
"only one indexable field is currently supported per class");
}
seen_indexed_field = true;
array_length = *field_expression.index;
} else {
if (seen_indexed_field) {
ReportError("cannot declare non-indexable field \"",
field_expression.name_and_type.name,
"\" after an indexable field "
"declaration");
}
}
const Field& field = class_type->RegisterField( const Field& field = class_type->RegisterField(
{field_expression.name_and_type.name->pos, {field_expression.name_and_type.name->pos,
class_type, class_type,
array_length, array_length,
{field_expression.name_and_type.name->value, field_type}, {field_expression.name_and_type.name->value, field_type},
class_offset, class_offset.SingleValue(),
field_expression.weak, field_expression.weak,
field_expression.const_qualified, field_expression.const_qualified,
field_expression.generate_verify}); field_expression.generate_verify});
size_t field_size; ResidueClass field_size = std::get<0>(field.GetFieldSizeInformation());
std::tie(field_size, std::ignore) = field.GetFieldSizeInformation(); if (field.index) {
// Our allocations don't support alignments beyond kTaggedSize. if (auto literal = NumberLiteralExpression::DynamicCast(*field.index)) {
size_t alignment = std::min( size_t value = static_cast<size_t>(literal->number);
static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size); if (value != literal->number) {
if (alignment > 0 && class_offset % alignment != 0) { Error("non-integral array length").Position(field.pos);
ReportError("field ", field_expression.name_and_type.name, " at offset ", }
class_offset, " is not ", alignment, "-byte aligned."); field_size *= value;
} } else {
if (!field_expression.index) { field_size *= ResidueClass::Unknown();
}
}
field.ValidateAlignment(class_offset);
class_offset += field_size; class_offset += field_size;
// In-object properties are not considered part of the header. // In-object properties are not considered part of the header.
if (!class_type->IsShape()) { if (class_offset.SingleValue() && !class_type->IsShape()) {
header_size = class_offset; header_size = *class_offset.SingleValue();
} }
if (!field.index && !class_offset.SingleValue()) {
Error("Indexed fields have to be at the end of the object")
.Position(field.pos);
} }
} }
DCHECK_GT(header_size, 0); DCHECK_GT(header_size, 0);
class_type->header_size_ = header_size; class_type->header_size_ = header_size;
if ((!super_class || super_class->size()) && !seen_indexed_field) {
DCHECK_GE(class_offset, header_size);
class_type->size_ = class_offset; class_type->size_ = class_offset;
}
class_type->GenerateAccessors(); class_type->GenerateAccessors();
DeclareMethods(class_type, class_declaration->methods); DeclareMethods(class_type, class_declaration->methods);
} }
......
...@@ -4,12 +4,14 @@ ...@@ -4,12 +4,14 @@
#include <iostream> #include <iostream>
#include "src/torque/types.h"
#include "src/base/bits.h"
#include "src/torque/ast.h" #include "src/torque/ast.h"
#include "src/torque/declarable.h" #include "src/torque/declarable.h"
#include "src/torque/global-context.h" #include "src/torque/global-context.h"
#include "src/torque/type-oracle.h" #include "src/torque/type-oracle.h"
#include "src/torque/type-visitor.h" #include "src/torque/type-visitor.h"
#include "src/torque/types.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -363,23 +365,8 @@ std::string StructType::GetGeneratedTypeNameImpl() const { ...@@ -363,23 +365,8 @@ std::string StructType::GetGeneratedTypeNameImpl() const {
size_t StructType::PackedSize() const { size_t StructType::PackedSize() const {
size_t result = 0; size_t result = 0;
if (!fields_.empty()) { for (const Field& field : fields()) {
const Field& last = fields_.back(); result += std::get<0>(field.GetFieldSizeInformation());
if (last.offset == Field::kInvalidOffset) {
// This struct can't be packed. Find the first invalid field and use its
// name and position for the error.
for (const Field& field : fields_) {
if (field.offset == Field::kInvalidOffset) {
Error("Cannot compute packed size of ", ToString(), " due to field ",
field.name_and_type.name, " of unknown size")
.Position(field.pos);
return 0;
}
}
}
size_t field_size = 0;
std::tie(field_size, std::ignore) = last.GetFieldSizeInformation();
result = last.offset + field_size;
} }
return result; return result;
} }
...@@ -446,6 +433,7 @@ ClassType::ClassType(const Type* parent, Namespace* nspace, ...@@ -446,6 +433,7 @@ ClassType::ClassType(const Type* parent, Namespace* nspace,
const std::string& generates, const ClassDeclaration* decl, const std::string& generates, const ClassDeclaration* decl,
const TypeAlias* alias) const TypeAlias* alias)
: AggregateType(Kind::kClassType, parent, nspace, name), : AggregateType(Kind::kClassType, parent, nspace, name),
size_(ResidueClass::Unknown()),
flags_(flags & ~(kInternalFlags)), flags_(flags & ~(kInternalFlags)),
generates_(generates), generates_(generates),
decl_(decl), decl_(decl),
...@@ -741,6 +729,71 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const { ...@@ -741,6 +729,71 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
return std::make_tuple(0, "#no size"); return std::make_tuple(0, "#no size");
} }
size_t Type::AlignmentLog2() const {
if (parent()) return parent()->AlignmentLog2();
return TargetArchitecture::TaggedSize();
}
size_t AbstractType::AlignmentLog2() const {
size_t alignment;
if (this == TypeOracle::GetTaggedType()) {
alignment = TargetArchitecture::TaggedSize();
} else if (this == TypeOracle::GetRawPtrType()) {
alignment = TargetArchitecture::RawPtrSize();
} else if (this == TypeOracle::GetVoidType()) {
alignment = 1;
} else if (this == TypeOracle::GetInt8Type()) {
alignment = kUInt8Size;
} else if (this == TypeOracle::GetUint8Type()) {
alignment = kUInt8Size;
} else if (this == TypeOracle::GetInt16Type()) {
alignment = kUInt16Size;
} else if (this == TypeOracle::GetUint16Type()) {
alignment = kUInt16Size;
} else if (this == TypeOracle::GetInt32Type()) {
alignment = kInt32Size;
} else if (this == TypeOracle::GetUint32Type()) {
alignment = kInt32Size;
} else if (this == TypeOracle::GetFloat64Type()) {
alignment = kDoubleSize;
} else if (this == TypeOracle::GetIntPtrType()) {
alignment = TargetArchitecture::RawPtrSize();
} else if (this == TypeOracle::GetUIntPtrType()) {
alignment = TargetArchitecture::RawPtrSize();
} else {
return Type::AlignmentLog2();
}
alignment = std::min(alignment, TargetArchitecture::TaggedSize());
return base::bits::WhichPowerOfTwo(alignment);
}
size_t StructType::AlignmentLog2() const {
size_t alignment_log_2 = 0;
for (const Field& field : fields()) {
alignment_log_2 =
std::max(alignment_log_2, field.name_and_type.type->AlignmentLog2());
}
return alignment_log_2;
}
void Field::ValidateAlignment(ResidueClass at_offset) const {
const Type* type = name_and_type.type;
if (const StructType* struct_type = StructType::DynamicCast(type)) {
for (const Field& field : struct_type->fields()) {
field.ValidateAlignment(at_offset);
size_t field_size = std::get<0>(field.GetFieldSizeInformation());
at_offset += field_size;
}
} else {
size_t alignment_log_2 = name_and_type.type->AlignmentLog2();
if (at_offset.AlignmentLog2() < alignment_log_2) {
Error("field ", name_and_type.name, " at offset ", at_offset, " is not ",
size_t{1} << alignment_log_2, "-byte aligned.")
.Position(pos);
}
}
}
base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) { base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
std::string size_string; std::string size_string;
size_t size; size_t size;
......
...@@ -137,6 +137,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase { ...@@ -137,6 +137,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
const MaybeSpecializationKey& GetSpecializedFrom() const { const MaybeSpecializationKey& GetSpecializedFrom() const {
return specialized_from_; return specialized_from_;
} }
static base::Optional<const Type*> MatchUnaryGeneric(const Type* type, static base::Optional<const Type*> MatchUnaryGeneric(const Type* type,
GenericType* generic); GenericType* generic);
...@@ -152,6 +153,8 @@ class V8_EXPORT_PRIVATE Type : public TypeBase { ...@@ -152,6 +153,8 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
return nullptr; return nullptr;
} }
virtual size_t AlignmentLog2() const;
protected: protected:
Type(TypeBase::Kind kind, const Type* parent, Type(TypeBase::Kind kind, const Type* parent,
MaybeSpecializationKey specialized_from = base::nullopt); MaybeSpecializationKey specialized_from = base::nullopt);
...@@ -197,6 +200,8 @@ struct Field { ...@@ -197,6 +200,8 @@ struct Field {
// reliance of string types is quite clunky. // reliance of string types is quite clunky.
std::tuple<size_t, std::string> GetFieldSizeInformation() const; std::tuple<size_t, std::string> GetFieldSizeInformation() const;
void ValidateAlignment(ResidueClass at_offset) const;
SourcePosition pos; SourcePosition pos;
const AggregateType* aggregate; const AggregateType* aggregate;
base::Optional<Expression*> index; base::Optional<Expression*> index;
...@@ -205,15 +210,14 @@ struct Field { ...@@ -205,15 +210,14 @@ struct Field {
// The byte offset of this field from the beginning of the containing class or // The byte offset of this field from the beginning of the containing class or
// struct. Most structs are never packed together in memory, and are only used // struct. Most structs are never packed together in memory, and are only used
// to hold a batch of related CSA TNode values, in which case |offset| is // to hold a batch of related CSA TNode values, in which case |offset| is
// irrelevant. In structs, this value can be set to kInvalidOffset to indicate // irrelevant.
// that the struct should never be used in packed form. // The offset may be unknown because the field is after an indexed field or
size_t offset; // because we don't support the struct field for on-heap layouts.
base::Optional<size_t> offset;
bool is_weak; bool is_weak;
bool const_qualified; bool const_qualified;
bool generate_verify; bool generate_verify;
static constexpr size_t kInvalidOffset = SIZE_MAX;
}; };
std::ostream& operator<<(std::ostream& os, const Field& name_and_type); std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
...@@ -269,6 +273,8 @@ class AbstractType final : public Type { ...@@ -269,6 +273,8 @@ class AbstractType final : public Type {
std::vector<RuntimeType> GetRuntimeTypes() const override; std::vector<RuntimeType> GetRuntimeTypes() const override;
size_t AlignmentLog2() const override;
private: private:
friend class TypeOracle; friend class TypeOracle;
AbstractType(const Type* parent, AbstractTypeFlags flags, AbstractType(const Type* parent, AbstractTypeFlags flags,
...@@ -564,9 +570,11 @@ class StructType final : public AggregateType { ...@@ -564,9 +570,11 @@ class StructType final : public AggregateType {
std::string GetGeneratedTypeNameImpl() const override; std::string GetGeneratedTypeNameImpl() const override;
// Returns the sum of the size of all members. Does not validate alignment. // Returns the sum of the size of all members.
size_t PackedSize() const; size_t PackedSize() const;
size_t AlignmentLog2() const override;
private: private:
friend class TypeOracle; friend class TypeOracle;
StructType(Namespace* nspace, const StructDeclaration* decl, StructType(Namespace* nspace, const StructDeclaration* decl,
...@@ -614,7 +622,7 @@ class ClassType final : public AggregateType { ...@@ -614,7 +622,7 @@ class ClassType final : public AggregateType {
if (!is_finalized_) Finalize(); if (!is_finalized_) Finalize();
return header_size_; return header_size_;
} }
base::Optional<size_t> size() const { ResidueClass size() const {
if (!is_finalized_) Finalize(); if (!is_finalized_) Finalize();
return size_; return size_;
} }
...@@ -656,7 +664,7 @@ class ClassType final : public AggregateType { ...@@ -656,7 +664,7 @@ class ClassType final : public AggregateType {
const ClassDeclaration* decl, const TypeAlias* alias); const ClassDeclaration* decl, const TypeAlias* alias);
size_t header_size_; size_t header_size_;
base::Optional<size_t> size_; ResidueClass size_;
mutable ClassFlags flags_; mutable ClassFlags flags_;
const std::string generates_; const std::string generates_;
const ClassDeclaration* decl_; const ClassDeclaration* decl_;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
#include "src/base/bits.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/torque/ast.h" #include "src/torque/ast.h"
#include "src/torque/declarable.h" #include "src/torque/declarable.h"
...@@ -357,6 +358,18 @@ IncludeObjectMacrosScope::~IncludeObjectMacrosScope() { ...@@ -357,6 +358,18 @@ IncludeObjectMacrosScope::~IncludeObjectMacrosScope() {
os_ << "\n#include \"src/objects/object-macros-undef.h\"\n"; os_ << "\n#include \"src/objects/object-macros-undef.h\"\n";
} }
size_t ResidueClass::AlignmentLog2() const {
if (value_ == 0) return modulus_log_2_;
return base::bits::CountTrailingZeros(value_);
}
const size_t ResidueClass::kMaxModulusLog2;
std::ostream& operator<<(std::ostream& os, const ResidueClass& a) {
if (a.SingleValue().has_value()) return os << *a.SingleValue();
return os << "[" << a.value_ << " mod 2^" << a.modulus_log_2_ << "]";
}
} // namespace torque } // namespace torque
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -407,6 +407,87 @@ class IncludeObjectMacrosScope { ...@@ -407,6 +407,87 @@ class IncludeObjectMacrosScope {
std::ostream& os_; std::ostream& os_;
}; };
// A value of ResidueClass is a congruence class of integers modulo a power
// of 2.
// In contrast to common modulo arithmetic, we also allow addition and
// multiplication of congruence classes with different modulus. In this case, we
// do an abstract-interpretation style approximation to produce an as small as
// possible congruence class. ResidueClass is used to represent partial
// knowledge about offsets and sizes to validate alignment constraints.
// ResidueClass(x,m) = {y \in Z | x == y mod 2^m} = {x+k2^m | k \in Z} where Z
// is the set of all integers.
// Notation: 2^x is 2 to the power of x.
class ResidueClass {
public:
ResidueClass(size_t value, size_t modulus_log_2 =
kMaxModulusLog2) // NOLINT(runtime/explicit)
: value_(value),
modulus_log_2_(std::min(modulus_log_2, kMaxModulusLog2)) {
if (modulus_log_2_ < kMaxModulusLog2) {
value_ %= size_t{1} << modulus_log_2_;
}
}
// 0 modulo 1, in other words, the class of all integers.
static ResidueClass Unknown() { return ResidueClass{0, 0}; }
// If the modulus corresponds to the size of size_t, it represents a concrete
// value.
base::Optional<size_t> SingleValue() const {
if (modulus_log_2_ == kMaxModulusLog2) return value_;
return base::nullopt;
}
friend ResidueClass operator+(const ResidueClass& a, const ResidueClass& b) {
return ResidueClass{a.value_ + b.value_,
std::min(a.modulus_log_2_, b.modulus_log_2_)};
}
// Reasoning for the choice of the new modulus:
// {x+k2^a | k \in Z} * {y+l2^b | l \in Z}
// = {xy + xl2^b + yk2^a + kl2^(a+b)| k,l \in Z},
// which is a subset of {xy + k2^c | k \in Z}
// if 2^c is a common divisor of x2^b, y2^a and hence also of 2^(a+b) since
// x<2^a and y<2^b.
// So we use the gcd of x2^b and y2^a as the new modulus.
friend ResidueClass operator*(const ResidueClass& a, const ResidueClass& b) {
return ResidueClass{a.value_ * b.value_,
std::min(a.modulus_log_2_ + b.AlignmentLog2(),
b.modulus_log_2_ + a.AlignmentLog2())};
}
friend std::ostream& operator<<(std::ostream& os, const ResidueClass& a);
ResidueClass& operator+=(const ResidueClass& other) {
*this = *this + other;
return *this;
}
ResidueClass& operator*=(const ResidueClass& other) {
*this = *this * other;
return *this;
}
// 2^AlignmentLog2() is the larget power of 2 that divides all elements of the
// congruence class.
size_t AlignmentLog2() const;
size_t Alignment() const {
DCHECK_LT(AlignmentLog2(), kMaxModulusLog2);
return size_t{1} << AlignmentLog2();
}
private:
// The value is the representative of the congruence class. It's always
// smaller than 2^modulus_log_2_.
size_t value_;
// Base 2 logarithm of the modulus.
size_t modulus_log_2_;
// size_t values are modulo 2^kMaxModulusLog2, so we don't consider larger
// modulus.
static const size_t kMaxModulusLog2 = 8 * sizeof(size_t);
};
} // namespace torque } // namespace torque
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -3507,8 +3507,7 @@ TEST(SmallOrderedHashMapAllocate) { ...@@ -3507,8 +3507,7 @@ TEST(SmallOrderedHashMapAllocate) {
{ {
CodeStubAssembler m(asm_tester.state()); CodeStubAssembler m(asm_tester.state());
TNode<Smi> capacity = m.CAST(m.Parameter(0)); TNode<Smi> capacity = m.CAST(m.Parameter(0));
m.Return(m.AllocateSmallOrderedHashTable<SmallOrderedHashMap>( m.Return(m.AllocateSmallOrderedHashMap(m.SmiToIntPtr(capacity)));
m.SmiToIntPtr(capacity)));
} }
FunctionTester ft(asm_tester.GenerateCode(), kNumParams); FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
...@@ -3546,8 +3545,7 @@ TEST(SmallOrderedHashSetAllocate) { ...@@ -3546,8 +3545,7 @@ TEST(SmallOrderedHashSetAllocate) {
{ {
CodeStubAssembler m(asm_tester.state()); CodeStubAssembler m(asm_tester.state());
TNode<Smi> capacity = m.CAST(m.Parameter(0)); TNode<Smi> capacity = m.CAST(m.Parameter(0));
m.Return(m.AllocateSmallOrderedHashTable<SmallOrderedHashSet>( m.Return(m.AllocateSmallOrderedHashSet(m.SmiToIntPtr(capacity)));
m.SmiToIntPtr(capacity)));
} }
FunctionTester ft(asm_tester.GenerateCode(), kNumParams); FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
......
...@@ -80,6 +80,8 @@ extern macro TaggedToSmi(Object): Smi ...@@ -80,6 +80,8 @@ extern macro TaggedToSmi(Object): Smi
extern macro TaggedToHeapObject(Object): HeapObject extern macro TaggedToHeapObject(Object): HeapObject
labels CastError; labels CastError;
extern macro IntPtrConstant(constexpr int31): intptr;
macro FromConstexpr<To: type, From: type>(o: From): To; macro FromConstexpr<To: type, From: type>(o: From): To;
FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi { FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
return SmiConstant(s); return SmiConstant(s);
...@@ -87,6 +89,9 @@ FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi { ...@@ -87,6 +89,9 @@ FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
FromConstexpr<Smi, constexpr int31>(s: constexpr int31): Smi { FromConstexpr<Smi, constexpr int31>(s: constexpr int31): Smi {
return %FromConstexpr<Smi>(s); return %FromConstexpr<Smi>(s);
} }
FromConstexpr<intptr, constexpr int31>(i: constexpr int31): intptr {
return IntPtrConstant(i);
}
macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A
labels CastError { labels CastError {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment