Commit 76e722c1 authored by Daniel Clifford's avatar Daniel Clifford Committed by Commit Bot

[torque] Implement simple automatic index operators

In the process, cleanup the StoreFixedArray* operators
and change most FixedArray element accesses so that
they explicitly use the '.objects' and '.floats'
fields.

Bug: v8:7793
Change-Id: I3e45a9b7536ec76e1413b7e508d79a56b37604ff
Reviewed-on: https://chromium-review.googlesource.com/c/1460948
Commit-Queue: Daniel Clifford <danno@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59649}
parent 6188533d
......@@ -36,7 +36,7 @@ namespace array_join {
context: Context, receiver: JSReceiver, k: Number): Object {
const array: JSArray = UnsafeCast<JSArray>(receiver);
const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
const element: Object = fixedArray[UnsafeCast<Smi>(k)];
const element: Object = fixedArray.objects[UnsafeCast<Smi>(k)];
return element == Hole ? kEmptyString : element;
}
......@@ -134,7 +134,7 @@ namespace array_join {
const length: intptr = fixedArray.length_intptr;
assert(index <= length);
if (index < length) {
fixedArray[index] = element;
fixedArray.objects[index] = element;
return fixedArray;
} else
deferred {
......@@ -142,7 +142,7 @@ namespace array_join {
assert(index < newLength);
const newfixedArray: FixedArray =
ExtractFixedArray(fixedArray, 0, length, newLength, kFixedArrays);
newfixedArray[index] = element;
newfixedArray.objects[index] = element;
return newfixedArray;
}
}
......@@ -231,7 +231,7 @@ namespace array_join {
// Fast path when there's only one buffer element.
if (buffer.index == 1) {
const fixedArray: FixedArray = buffer.fixedArray;
typeswitch (fixedArray[0]) {
typeswitch (fixedArray.objects[0]) {
// When the element is a string, just return it and completely avoid
// allocating another string.
case (str: String): {
......@@ -442,11 +442,11 @@ namespace array_join {
stack: FixedArray, receiver: JSReceiver): Boolean {
const capacity: intptr = stack.length_intptr;
for (let i: intptr = 0; i < capacity; i++) {
const previouslyVisited: Object = stack[i];
const previouslyVisited: Object = stack.objects[i];
// Add `receiver` to the first open slot
if (previouslyVisited == Hole) {
stack[i] = receiver;
stack.objects[i] = receiver;
return True;
}
......@@ -470,8 +470,8 @@ namespace array_join {
try {
const stack: FixedArray = LoadJoinStack()
otherwise IfUninitialized;
if (stack[0] == Hole) {
stack[0] = receiver;
if (stack.objects[0] == Hole) {
stack.objects[0] = receiver;
} else if (JoinStackPush(stack, receiver) == False)
deferred {
goto ReceiverNotAdded;
......@@ -480,7 +480,7 @@ namespace array_join {
label IfUninitialized {
const stack: FixedArray =
AllocateFixedArrayWithHoles(kMinJoinStackSize, kNone);
stack[0] = receiver;
stack.objects[0] = receiver;
SetJoinStack(stack);
}
goto ReceiverAdded;
......@@ -492,7 +492,7 @@ namespace array_join {
stack: FixedArray, receiver: JSReceiver): Object {
const len: intptr = stack.length_intptr;
for (let i: intptr = 0; i < len; i++) {
if (stack[i] == receiver) {
if (stack.objects[i] == receiver) {
// Shrink the Join Stack if the stack will be empty and is larger than
// the minimum size.
if (i == 0 && len > kMinJoinStackSize) deferred {
......@@ -501,7 +501,7 @@ namespace array_join {
SetJoinStack(newStack);
}
else {
stack[i] = Hole;
stack.objects[i] = Hole;
}
return Undefined;
}
......@@ -517,7 +517,7 @@ namespace array_join {
// Builtin call was not nested (receiver is the first entry) and
// did not contain other nested arrays that expanded the stack.
if (stack[0] == receiver && len == kMinJoinStackSize) {
if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER);
} else
deferred {
......
......@@ -11,7 +11,7 @@ namespace array_lastindexof {
elements: FixedArrayBase, index: Smi): Object
labels IfHole {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
const element: Object = elements[index];
const element: Object = elements.objects[index];
if (element == Hole) goto IfHole;
return element;
}
......
......@@ -136,9 +136,9 @@ namespace array_map {
SmiUntag(length), kAllowLargeObjectAllocation);
a = new JSArray{map, this.fixedArray};
for (let i: Smi = 0; i < validLength; i++) {
typeswitch (this.fixedArray[i]) {
typeswitch (this.fixedArray.objects[i]) {
case (n: Number): {
elements[i] = Float64SilenceNaN(Convert<float64>(n));
elements.floats[i] = Float64SilenceNaN(Convert<float64>(n));
}
case (h: HeapObject): {
assert(h == Hole);
......@@ -159,16 +159,16 @@ namespace array_map {
StoreResult(implicit context: Context)(index: Smi, result: Object) {
typeswitch (result) {
case (s: Smi): {
this.fixedArray[index] = s;
this.fixedArray.objects[index] = s;
}
case (s: HeapNumber): {
this.onlySmis = false;
this.fixedArray[index] = s;
this.fixedArray.objects[index] = s;
}
case (s: HeapObject): {
this.onlySmis = false;
this.onlyNumbers = false;
this.fixedArray[index] = s;
this.fixedArray.objects[index] = s;
}
}
}
......
......@@ -8,23 +8,24 @@ namespace array_reverse {
LoadElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi): Smi {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return UnsafeCast<Smi>(elems[index]);
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
return UnsafeCast<Smi>(elements.objects[index]);
}
LoadElement<array::FastPackedObjectElements, Object>(
implicit context: Context)(elements: FixedArrayBase, index: Smi): Object {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return elems[index];
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
return elements.objects[index];
}
LoadElement<array::FastPackedDoubleElements, float64>(
implicit context: Context)(elements: FixedArrayBase, index: Smi):
float64 {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
// This macro is only used for PACKED_DOUBLE, loading the hole should
// be impossible.
return LoadDoubleWithHoleCheck(elems, index) otherwise unreachable;
return LoadDoubleWithHoleCheck(elements, index)
otherwise unreachable;
}
macro StoreElement<ElementsAccessor: type, T: type>(
......@@ -40,15 +41,15 @@ namespace array_reverse {
StoreElement<array::FastPackedObjectElements, Object>(
implicit context:
Context)(elements: FixedArrayBase, index: Smi, value: Object) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
elems[index] = value;
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
elements.objects[index] = value;
}
StoreElement<array::FastPackedDoubleElements, float64>(
implicit context:
Context)(elements: FixedArrayBase, index: Smi, value: float64) {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
StoreFixedDoubleArrayElementWithSmiIndex(elems, index, value);
StoreFixedDoubleArrayElementSmi(elems, index, value);
}
// Fast-path for all PACKED_* elements kinds. These do not need to check
......
......@@ -43,13 +43,13 @@ namespace array_slice {
// defined arguments
const end: Smi = start + count;
const unmappedElements: FixedArray =
Cast<FixedArray>(sloppyElements[kSloppyArgumentsArgumentsIndex])
Cast<FixedArray>(sloppyElements.objects[kSloppyArgumentsArgumentsIndex])
otherwise Bailout;
const unmappedElementsLength: Smi = unmappedElements.length;
if (SmiAbove(end, unmappedElementsLength)) goto Bailout;
const argumentsContext: Context =
UnsafeCast<Context>(sloppyElements[kSloppyArgumentsContextIndex]);
const argumentsContext: Context = UnsafeCast<Context>(
sloppyElements.objects[kSloppyArgumentsContextIndex]);
const arrayMap: Map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, context);
const result: JSArray =
......@@ -62,10 +62,10 @@ namespace array_slice {
// Fill in the part of the result that map to context-mapped parameters.
for (let current: Smi = start; current < to; ++current) {
const e: Object =
sloppyElements[current + kSloppyArgumentsParameterMapStart];
sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
const newElement: Object = e != Hole ?
argumentsContext[UnsafeCast<Smi>(e)] :
unmappedElements[current];
unmappedElements.objects[current];
StoreFixedArrayElementSmi(
resultElements, indexOut++, newElement, SKIP_WRITE_BARRIER);
}
......
......@@ -43,19 +43,10 @@ namespace array {
}
macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object {
const e: Object = a[i];
const e: Object = a.objects[i];
return e == Hole ? Undefined : e;
}
macro LoadElementOrUndefined(a: FixedArray, i: intptr): Object {
const e: Object = a[i];
return e == Hole ? Undefined : e;
}
macro LoadElementOrUndefined(a: FixedArray, i: constexpr int31): Object {
return LoadElementOrUndefined(a, Convert<intptr>(i));
}
macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
try {
const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
......@@ -66,34 +57,18 @@ namespace array {
}
}
macro LoadElementOrUndefined(a: FixedDoubleArray, i: intptr):
NumberOrUndefined {
try {
const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
return AllocateHeapNumberWithValue(f);
}
label IfHole {
return Undefined;
}
}
macro LoadElementOrUndefined(a: FixedDoubleArray, i: constexpr int31):
NumberOrUndefined {
return LoadElementOrUndefined(a, Convert<intptr>(i));
}
macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
StoreFixedDoubleArrayHoleSmi(elements, k);
}
macro StoreArrayHole(elements: FixedArray, k: Smi): void {
elements[k] = Hole;
elements.objects[k] = Hole;
}
macro CopyArrayElement(
elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void {
const e: Object = elements[from];
newElements[to] = e;
const e: Object = elements.objects[from];
newElements.objects[to] = e;
}
macro CopyArrayElement(
......@@ -102,7 +77,7 @@ namespace array {
try {
const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from)
otherwise FoundHole;
newElements[to] = floatValue;
newElements.floats[to] = floatValue;
}
label FoundHole {
StoreArrayHole(newElements, to);
......
......@@ -67,10 +67,13 @@ type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
type Map extends HeapObject generates 'TNode<Map>';
type FixedArrayBase extends HeapObject generates 'TNode<FixedArrayBase>';
type FixedArray extends FixedArrayBase generates 'TNode<FixedArray>';
type FixedDoubleArray extends FixedArrayBase
generates 'TNode<FixedDoubleArray>';
extern class FixedArrayBase extends HeapObject { length: Smi; }
extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
extern class FixedDoubleArray extends FixedArrayBase {
floats[length]: float64;
}
// These intrinsics should never be called from Torque code. They're used
// internally by the 'new' operator and only declared here because it's simpler
......@@ -176,8 +179,8 @@ type FixedTypedArrayBase extends FixedArrayBase
generates 'TNode<FixedTypedArrayBase>';
type FixedTypedArray extends FixedTypedArrayBase
generates 'TNode<FixedTypedArray>';
type SloppyArgumentsElements extends FixedArray
generates 'TNode<FixedArray>';
extern class SloppyArgumentsElements extends FixedArray
generates 'TNode<FixedArray>' {}
type NumberDictionary extends HeapObject
generates 'TNode<NumberDictionary>';
......@@ -1241,46 +1244,50 @@ extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
extern operator '.length' macro LoadJSTypedArrayLength(JSTypedArray): Smi;
extern operator '.length' macro LoadFastJSArrayLength(FastJSArray): Smi;
extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
FixedArrayBase): intptr;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
extern operator '[]' macro LoadFixedArrayElement(
extern operator '.objects[]' macro LoadFixedArrayElement(
FixedArray, intptr): Object;
extern operator '.objects[]' macro LoadFixedArrayElement(
FixedArray, Smi): Object;
extern operator '.objects[]' macro LoadFixedArrayElement(
FixedArray, constexpr int31): Object;
extern operator '[]=' macro StoreFixedArrayElement(
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, intptr, Smi): void;
extern operator '[]=' macro StoreFixedArrayElement(FixedArray, Smi, Smi): void;
extern operator '[]=' macro StoreFixedArrayElement(
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, Smi, Smi): void;
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, intptr, HeapObject): void;
extern operator '[]=' macro StoreFixedArrayElement(
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, Smi): void;
extern operator '[]=' macro StoreFixedArrayElement(
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, HeapObject): void;
extern operator '[]=' macro StoreFixedArrayElementSmi(
extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object): void;
operator '[]=' macro StoreFixedDoubleArrayNumber(
a: FixedDoubleArray, index: Smi, value: Number): void {
a[index] = Convert<float64>(value);
}
extern macro StoreFixedArrayElementSmi(
extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
FixedDoubleArray, intptr, float64): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
FixedDoubleArray, Smi, float64): void;
operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
a: FixedDoubleArray, i: Smi, n: Number): void {
StoreFixedDoubleArrayElementSmi(a, i, Convert<float64>(n));
}
operator '[]=' macro StoreFixedDoubleArrayDirect(
a: FixedDoubleArray, i: Smi, v: Number) {
a.floats[i] = Convert<float64>(v);
}
operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
a.objects[i] = v;
}
extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
extern macro LoadFixedDoubleArrayElement(FixedDoubleArray, Smi): float64;
extern macro Float64SilenceNaN(float64): float64;
extern macro StoreFixedDoubleArrayElement(
FixedDoubleArray, Object, float64, constexpr ParameterMode);
extern macro StoreFixedArrayElement(
FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
macro StoreFixedDoubleArrayElementWithSmiIndex(
array: FixedDoubleArray, index: Smi, value: float64) {
StoreFixedDoubleArrayElement(array, index, value, SMI_PARAMETERS);
}
extern macro GetNumberDictionaryNumberOfElements(NumberDictionary): Smi;
extern macro GetIteratorMethod(implicit context: Context)(HeapObject): Object
labels IfIteratorUndefined;
......@@ -1348,9 +1355,6 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, intptr, Smi): JSArray;
extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
extern macro AllocateJSObjectFromMap(Map): JSObject;
extern operator '[]=' macro StoreFixedDoubleArrayElementSmi(
FixedDoubleArray, Smi, float64): void;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
labels IfHole;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, intptr): float64
......@@ -1437,7 +1441,7 @@ LoadElementNoHole<FixedArray>(implicit context: Context)(
try {
let elements: FixedArray =
Cast<FixedArray>(a.elements) otherwise Unexpected;
let e: Object = elements[index];
let e: Object = elements.objects[index];
if (e == Hole) {
goto IfHole;
}
......
......@@ -126,23 +126,23 @@ namespace typed_array {
if (left < middle && right >= to) {
// If the left run has elements, but the right does not, we take
// from the left.
target[targetIndex] = source[left++];
target.objects[targetIndex] = source.objects[left++];
} else if (left < middle) {
// If both have elements, we need to compare.
const leftElement: Object = source[left];
const rightElement: Object = source[right];
const leftElement: Object = source.objects[left];
const rightElement: Object = source.objects[right];
if (CallCompare(leftElement, rightElement) <= 0) {
target[targetIndex] = leftElement;
target.objects[targetIndex] = leftElement;
left++;
} else {
target[targetIndex] = rightElement;
target.objects[targetIndex] = rightElement;
right++;
}
} else {
// No elements on the left, but the right does, so we take
// from the right.
assert(left == middle);
target[targetIndex] = source[right++];
target.objects[targetIndex] = source.objects[right++];
}
}
}
......@@ -253,14 +253,15 @@ namespace typed_array {
for (let i: Smi = 0; i < len; ++i) {
const element: Object = loadfn(context, array, i);
work1[i] = element;
work2[i] = element;
work1.objects[i] = element;
work2.objects[i] = element;
}
TypedArrayMergeSort(work2, 0, len, work1);
// work1 contains the sorted numbers. Write them back.
for (let i: Smi = 0; i < len; ++i) storefn(context, array, i, work1[i]);
for (let i: Smi = 0; i < len; ++i)
storefn(context, array, i, work1.objects[i]);
return array;
}
......
......@@ -9,6 +9,7 @@
#include "src/objects/instance-type.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
......@@ -97,14 +98,10 @@ class FixedArrayBase : public HeapObject {
#endif // V8_HOST_ARCH_32_BIT
// Layout description.
#define FIXED_ARRAY_BASE_FIELDS(V) \
V(kLengthOffset, kTaggedSize) \
/* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
FIXED_ARRAY_BASE_FIELDS)
#undef FIXED_ARRAY_BASE_FIELDS
static const int kHeaderSize = kSize;
protected:
// Special-purpose constructor for subclasses that have fast paths where
......
......@@ -694,6 +694,7 @@ struct StructFieldExpression {
struct ClassFieldExpression {
NameAndTypeExpression name_and_type;
base::Optional<std::string> index;
bool weak;
};
......
......@@ -287,6 +287,11 @@ void DeclarationVisitor::DeclareMethods(
if (constructor_this_type->Constructors().size() != 0) return;
// TODO(danno): Currently, default constructors for classes with
// open-ended arrays at the end are not supported. For now, if one is
// encountered, don't actually create the constructor.
if (container_type->HasIndexedField()) return;
// Generate default constructor.
Signature constructor_signature;
constructor_signature.parameter_types.var_args = false;
......@@ -302,6 +307,7 @@ void DeclarationVisitor::DeclareMethods(
std::vector<Expression*> super_arguments;
for (auto current_type : hierarchy) {
for (auto& f : current_type->fields()) {
DCHECK(!f.index);
std::string parameter_name("p" + std::to_string(parameter_number++));
constructor_signature.parameter_names.push_back(parameter_name);
constructor_signature.parameter_types.types.push_back(
......@@ -368,6 +374,7 @@ void DeclarationVisitor::Visit(ClassDeclaration* decl) {
// The generates clause must create a TNode<>
std::string generates = decl->name;
if (decl->generates) {
generates = *decl->generates;
if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
generates.substr(generates.length() - 1, 1) != ">") {
ReportError("generated type \"", generates,
......@@ -533,6 +540,7 @@ void DeclarationVisitor::FinalizeStructFieldsAndMethods(
const Type* field_type = Declarations::GetType(field.name_and_type.type);
struct_type->RegisterField({field.name_and_type.type->pos,
struct_type,
base::nullopt,
{field.name_and_type.name, field_type},
offset,
false});
......@@ -546,6 +554,7 @@ void DeclarationVisitor::FinalizeClassFieldsAndMethods(
ClassType* class_type, ClassDeclaration* class_declaration) {
const ClassType* super_class = class_type->GetSuperClass();
size_t class_offset = super_class ? super_class->size() : 0;
bool seen_indexed_field = false;
for (ClassFieldExpression& field_expression : class_declaration->fields) {
CurrentSourcePosition::Scope position_activator(
field_expression.name_and_type.type->pos);
......@@ -553,30 +562,55 @@ void DeclarationVisitor::FinalizeClassFieldsAndMethods(
Declarations::GetType(field_expression.name_and_type.type);
if (!class_declaration->is_extern) {
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
ReportError("Non-extern classes do not support untagged fields.");
ReportError("non-extern classes do not support untagged fields");
}
if (field_expression.weak) {
ReportError("Non-extern classes do not support weak fields.");
ReportError("non-extern classes do not support weak fields");
}
}
const Field& field = class_type->RegisterField(
{field_expression.name_and_type.type->pos,
class_type,
{field_expression.name_and_type.name, field_type},
class_offset,
field_expression.weak});
size_t field_size;
std::string size_string;
std::string machine_type;
std::tie(field_size, size_string, machine_type) =
field.GetFieldSizeInformation();
size_t aligned_offset = class_offset & ~(field_size - 1);
if (class_offset != aligned_offset) {
ReportError("field ", field_expression.name_and_type.name,
" is not aligned to its size (", aligned_offset, " vs ",
class_offset, " for field size ", field_size, ")");
if (field_expression.index) {
if (seen_indexed_field ||
(super_class && super_class->HasIndexedField())) {
ReportError(
"only one indexable field is currently supported per class");
}
seen_indexed_field = true;
const Field* index_field =
&(class_type->LookupField(*field_expression.index));
class_type->RegisterField(
{field_expression.name_and_type.type->pos,
class_type,
index_field,
{field_expression.name_and_type.name, field_type},
class_offset,
field_expression.weak});
} else {
if (seen_indexed_field) {
ReportError("cannot declare non-indexable field \"",
field_expression.name_and_type.name,
"\" after an indexable field "
"declaration");
}
const Field& field = class_type->RegisterField(
{field_expression.name_and_type.type->pos,
class_type,
base::nullopt,
{field_expression.name_and_type.name, field_type},
class_offset,
field_expression.weak});
size_t field_size;
std::string size_string;
std::string machine_type;
std::tie(field_size, size_string, machine_type) =
field.GetFieldSizeInformation();
size_t aligned_offset = class_offset & ~(field_size - 1);
if (class_offset != aligned_offset) {
ReportError("field ", field_expression.name_and_type.name,
" is not aligned to its size (", aligned_offset, " vs ",
class_offset, " for field size ", field_size, ")");
}
class_offset += field_size;
}
class_offset += field_size;
}
class_type->SetSize(class_offset);
......@@ -595,15 +629,18 @@ void DeclarationVisitor::FinalizeClassFieldsAndMethods(
this_struct_type->RegisterField(
{CurrentSourcePosition::Get(),
super_struct_type,
base::nullopt,
{kConstructorStructSuperFieldName, super_struct_type},
struct_offset,
false});
struct_offset += LoweredSlotCount(super_struct_type);
}
for (auto& field : class_type->fields()) {
if (field.index) continue;
const Type* field_type = field.name_and_type.type;
this_struct_type->RegisterField({field.pos,
class_type,
base::nullopt,
{field.name_and_type.name, field_type},
struct_offset,
false});
......@@ -616,6 +653,7 @@ void DeclarationVisitor::FinalizeClassFieldsAndMethods(
// function and define a corresponding '.field' operator. The
// implementation iterator will turn the snippits into code.
for (auto& field : class_type->fields()) {
if (field.index) continue;
CurrentSourcePosition::Scope position_activator(field.pos);
IdentifierExpression* parameter =
MakeNode<IdentifierExpression>(std::string{"o"});
......
......@@ -1729,15 +1729,30 @@ LocationReference ImplementationVisitor::GetLocationReference(
ProjectStructField(reference.temporary(), expr->field),
reference.temporary_description());
}
return LocationReference::FieldAccess(GenerateFetchFromLocation(reference),
expr->field);
VisitResult object_result = GenerateFetchFromLocation(reference);
if (const ClassType* class_type =
ClassType::DynamicCast(object_result.type())) {
if (class_type->HasField(expr->field)) {
const Field& field = (class_type->LookupField(expr->field));
if (field.index) {
return LocationReference::IndexedFieldAccess(object_result,
expr->field);
}
}
}
return LocationReference::FieldAccess(object_result, expr->field);
}
LocationReference ImplementationVisitor::GetLocationReference(
ElementAccessExpression* expr) {
VisitResult array = Visit(expr->array);
LocationReference reference = GetLocationReference(expr->array);
VisitResult index = Visit(expr->index);
return LocationReference::ArrayAccess(array, index);
if (reference.IsIndexedFieldAccess()) {
return LocationReference::IndexedFieldIndexedAccess(reference, index);
} else {
return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
index);
}
}
LocationReference ImplementationVisitor::GetLocationReference(
......@@ -1806,6 +1821,10 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
} else if (reference.IsVariableAccess()) {
return GenerateCopy(reference.variable());
} else {
if (reference.IsIndexedFieldAccess()) {
ReportError(
"fetching a value directly from an indexed field isn't allowed");
}
DCHECK(reference.IsCallAccess());
return GenerateCall(reference.eval_function(),
Arguments{reference.call_arguments(), {}});
......@@ -1824,6 +1843,8 @@ void ImplementationVisitor::GenerateAssignToLocation(
GenerateImplicitConvert(variable.type(), assignment_value);
assembler().Poke(variable.stack_range(), converted_value.stack_range(),
variable.type());
} else if (reference.IsIndexedFieldAccess()) {
ReportError("assigning a value directly to an indexed field isn't allowed");
} else {
DCHECK(reference.IsTemporary());
ReportError("cannot assign to temporary ",
......
......@@ -54,6 +54,26 @@ class LocationReference {
result.eval_function_ = "." + fieldname;
result.assign_function_ = "." + fieldname + "=";
result.call_arguments_ = {object};
result.index_field_ = base::nullopt;
return result;
}
static LocationReference IndexedFieldIndexedAccess(
const LocationReference& indexed_field, VisitResult index) {
LocationReference result;
DCHECK(indexed_field.IsIndexedFieldAccess());
std::string fieldname = *indexed_field.index_field_;
result.eval_function_ = "." + fieldname + "[]";
result.assign_function_ = "." + fieldname + "[]=";
result.call_arguments_ = indexed_field.call_arguments_;
result.call_arguments_.push_back(index);
result.index_field_ = fieldname;
return result;
}
static LocationReference IndexedFieldAccess(VisitResult object,
std::string fieldname) {
LocationReference result;
result.call_arguments_ = {object};
result.index_field_ = fieldname;
return result;
}
......@@ -82,6 +102,13 @@ class LocationReference {
return *temporary_description_;
}
bool IsArrayField() const { return index_field_.has_value(); }
bool IsIndexedFieldAccess() const {
return IsArrayField() && !IsCallAccess();
}
bool IsIndexedFieldIndexedAccess() const {
return IsArrayField() && IsCallAccess();
}
bool IsCallAccess() const {
bool is_call_access = eval_function_.has_value();
DCHECK_EQ(is_call_access, assign_function_.has_value());
......@@ -107,6 +134,7 @@ class LocationReference {
base::Optional<std::string> eval_function_;
base::Optional<std::string> assign_function_;
VisitResultVector call_arguments_;
base::Optional<std::string> index_field_;
LocationReference() = default;
};
......@@ -252,8 +280,8 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(CallExpression* expr, bool is_tail = false);
VisitResult Visit(CallMethodExpression* expr);
VisitResult Visit(IntrinsicCallExpression* intrinsic);
VisitResult Visit(LoadObjectFieldExpression* intrinsic);
VisitResult Visit(StoreObjectFieldExpression* intrinsic);
VisitResult Visit(LoadObjectFieldExpression* expr);
VisitResult Visit(StoreObjectFieldExpression* expr);
const Type* Visit(TailCallStatement* stmt);
VisitResult Visit(ConditionalExpression* expr);
......
......@@ -1134,8 +1134,10 @@ base::Optional<ParseResult> MakeNameAndType(
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
auto weak = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
auto index = child_results->NextAs<base::Optional<std::string>>();
auto type = child_results->NextAs<TypeExpression*>();
return ParseResult{ClassFieldExpression{{std::move(name), type}, weak}};
return ParseResult{
ClassFieldExpression{{std::move(name), type}, index, weak}};
}
base::Optional<ParseResult> MakeStructField(
......@@ -1331,8 +1333,12 @@ struct TorqueGrammar : Grammar {
Symbol nameAndType = {
Rule({&identifier, Token(":"), &type}, MakeNameAndType)};
Symbol* optionalArraySpecifier = {
Optional<std::string>(Sequence({Token("["), &identifier, Token("]")}))};
Symbol classField = {
Rule({CheckIf(Token("weak")), &identifier, Token(":"), &type, Token(";")},
Rule({CheckIf(Token("weak")), &identifier, optionalArraySpecifier,
Token(":"), &type, Token(";")},
MakeClassField)};
Symbol structField = {
......
......@@ -172,6 +172,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UINT8_TYPE_STRING);
}
static const Type* GetFloat64Type() {
return Get().GetBuiltinType(FLOAT64_TYPE_STRING);
}
static const Type* GetNeverType() {
return Get().GetBuiltinType(NEVER_TYPE_STRING);
}
......
......@@ -226,6 +226,18 @@ std::vector<const AggregateType*> AggregateType::GetHierarchy() {
return hierarchy;
}
bool AggregateType::HasField(const std::string& name) const {
for (auto& field : fields_) {
if (field.name_and_type.name == name) return true;
}
if (parent() != nullptr) {
if (auto parent_class = ClassType::DynamicCast(parent())) {
return parent_class->HasField(name);
}
}
return false;
}
const Field& AggregateType::LookupField(const std::string& name) const {
for (auto& field : fields_) {
if (field.name_and_type.name == name) return field;
......@@ -235,7 +247,7 @@ const Field& AggregateType::LookupField(const std::string& name) const {
return parent_class->LookupField(name);
}
}
ReportError("no field ", name, "found");
ReportError("no field ", name, " found");
}
std::string StructType::GetGeneratedTypeName() const {
......@@ -261,6 +273,33 @@ std::string StructType::ToExplicitString() const {
return result.str();
}
ClassType::ClassType(const Type* parent, Namespace* nspace,
const std::string& name, bool is_extern, bool transient,
const std::string& generates)
: AggregateType(Kind::kClassType, parent, nspace, name),
this_struct_(nullptr),
is_extern_(is_extern),
transient_(transient),
size_(0),
has_indexed_field_(false),
generates_(generates) {
CheckForDuplicateFields();
if (parent) {
if (const ClassType* super_class = ClassType::DynamicCast(parent)) {
if (super_class->HasIndexedField()) {
has_indexed_field_ = true;
}
}
}
}
bool ClassType::HasIndexedField() const {
if (has_indexed_field_) return true;
const ClassType* super_class = GetSuperClass();
if (super_class) return super_class->HasIndexedField();
return false;
}
std::string ClassType::GetGeneratedTNodeTypeName() const {
if (!IsExtern()) return generates_;
std::string prefix = nspace()->IsDefaultNamespace()
......@@ -525,6 +564,10 @@ std::tuple<size_t, std::string, std::string> Field::GetFieldSizeInformation()
field_size = kUInt8Size;
size_string = "kUInt8Size";
machine_type = "MachineType::Uint8()";
} else if (field_type == TypeOracle::GetFloat64Type()) {
field_size = kDoubleSize;
size_string = "kDoubleSize";
machine_type = "MachineType::Float64()";
} else if (field_type == TypeOracle::GetIntPtrType()) {
field_size = kIntptrSize;
size_string = "kIntptrSize";
......
......@@ -45,6 +45,7 @@ static const char* const INT16_TYPE_STRING = "int16";
static const char* const UINT16_TYPE_STRING = "uint16";
static const char* const INT8_TYPE_STRING = "int8";
static const char* const UINT8_TYPE_STRING = "uint8";
static const char* const FLOAT64_TYPE_STRING = "float64";
static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
......@@ -168,6 +169,7 @@ struct Field {
SourcePosition pos;
const AggregateType* aggregate;
base::Optional<const Field*> index;
NameAndType name_and_type;
size_t offset;
bool is_weak;
......@@ -404,9 +406,11 @@ class AggregateType : public Type {
const Type* NonConstexprVersion() const override { return this; }
bool IsConstexpr() const override { return false; }
virtual bool HasIndexedField() const { return false; }
void SetFields(std::vector<Field> fields) { fields_ = std::move(fields); }
const std::vector<Field>& fields() const { return fields_; }
bool HasField(const std::string& name) const;
const Field& LookupField(const std::string& name) const;
const std::string& name() const { return name_; }
Namespace* nspace() const { return namespace_; }
......@@ -415,7 +419,7 @@ class AggregateType : public Type {
return "_method_" + name_ + "_" + name;
}
const Field& RegisterField(Field field) {
virtual const Field& RegisterField(Field field) {
fields_.push_back(field);
return fields_.back();
}
......@@ -474,6 +478,7 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeName() const override;
bool IsExtern() const { return is_extern_; }
bool IsTransient() const override { return transient_; }
bool HasIndexedField() const override;
size_t size() const { return size_; }
StructType* struct_type() const { return this_struct_; }
const ClassType* GetSuperClass() const {
......@@ -483,24 +488,23 @@ class ClassType final : public AggregateType {
void SetSize(size_t size) { size_ = size; }
void SetThisStruct(StructType* this_struct) { this_struct_ = this_struct; }
bool AllowInstantiation() const;
const Field& RegisterField(Field field) override {
if (field.index) {
has_indexed_field_ = true;
}
return AggregateType::RegisterField(field);
}
private:
friend class TypeOracle;
ClassType(const Type* parent, Namespace* nspace, const std::string& name,
bool is_extern, bool transient, const std::string& generates)
: AggregateType(Kind::kClassType, parent, nspace, name),
this_struct_(nullptr),
is_extern_(is_extern),
transient_(transient),
size_(0),
generates_(generates) {
CheckForDuplicateFields();
}
bool is_extern, bool transient, const std::string& generates);
StructType* this_struct_;
bool is_extern_;
bool transient_;
size_t size_;
bool has_indexed_field_;
const std::string generates_;
};
......
......@@ -191,14 +191,14 @@ namespace array {
context: Context, sortState: SortState, index: Smi): Object {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
return elements[index];
return elements.objects[index];
}
Load<FastSmiOrObjectElements>(
context: Context, sortState: SortState, index: Smi): Object {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
const result: Object = elements[index];
const result: Object = elements.objects[index];
if (IsTheHole(result)) {
// The pre-processing step removed all holes by compacting all elements
// at the start of the array. Finding a hole means the cmp function or
......@@ -242,7 +242,7 @@ namespace array {
Object {
const elements = sortState.tempArray;
assert(IsFixedArray(elements));
return elements[index];
return elements.objects[index];
}
transitioning builtin Store<ElementsAccessor: type>(
......@@ -263,7 +263,7 @@ namespace array {
context: Context, sortState: SortState, index: Smi, value: Object): Smi {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
elements[index] = value;
elements.objects[index] = value;
return kSuccess;
}
......@@ -274,7 +274,7 @@ namespace array {
const heapVal = UnsafeCast<HeapNumber>(value);
// Make sure we do not store signalling NaNs into double arrays.
const val = Float64SilenceNaN(Convert<float64>(heapVal));
StoreFixedDoubleArrayElementWithSmiIndex(elements, index, val);
StoreFixedDoubleArrayElementSmi(elements, index, val);
return kSuccess;
}
......@@ -303,7 +303,7 @@ namespace array {
Store<TempArrayElements>(
context: Context, sortState: SortState, index: Smi, value: Object): Smi {
const elements = sortState.tempArray;
elements[index] = value;
elements.objects[index] = value;
return kSuccess;
}
......@@ -424,20 +424,20 @@ namespace array {
macro GetPendingRunBase(implicit context:
Context)(pendingRuns: FixedArray, run: Smi): Smi {
return UnsafeCast<Smi>(pendingRuns[run << 1]);
return UnsafeCast<Smi>(pendingRuns.objects[run << 1]);
}
macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
pendingRuns[run << 1] = value;
pendingRuns.objects[run << 1] = value;
}
macro GetPendingRunLength(implicit context: Context)(
pendingRuns: FixedArray, run: Smi): Smi {
return UnsafeCast<Smi>(pendingRuns[(run << 1) + 1]);
return UnsafeCast<Smi>(pendingRuns.objects[(run << 1) + 1]);
}
macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
pendingRuns[(run << 1) + 1] = value;
pendingRuns.objects[(run << 1) + 1] = value;
}
macro PushRun(implicit context:
......@@ -558,7 +558,7 @@ namespace array {
while (srcIdx < to) {
const element = CallLoad(load, srcIdx++) otherwise Bailout;
tempArray[dstIdx++] = element;
tempArray.objects[dstIdx++] = element;
}
}
......@@ -578,7 +578,7 @@ namespace array {
let to: Smi = srcPos + length;
try {
while (srcIdx < to) {
CallStore(store, dstIdx++, tempArray[srcIdx++]) otherwise Bailout;
CallStore(store, dstIdx++, tempArray.objects[srcIdx++]) otherwise Bailout;
}
return kSuccess;
}
......@@ -1107,7 +1107,7 @@ namespace array {
let elementB = CallLoad(load, cursorB) otherwise Bailout;
let order =
CallCompareFn(elementB, tempArray[cursorTemp]) otherwise Bailout;
CallCompareFn(elementB, tempArray.objects[cursorTemp]) otherwise Bailout;
if (order < 0) {
CopyElement(load, store, cursorB, dest) otherwise Bailout;
......@@ -1121,7 +1121,7 @@ namespace array {
if (lengthB == 0) goto Succeed;
if (nofWinsB >= minGallop) break;
} else {
CallStore(store, dest, tempArray[cursorTemp]) otherwise Bailout;
CallStore(store, dest, tempArray.objects[cursorTemp]) otherwise Bailout;
++cursorTemp;
++dest;
......@@ -1169,7 +1169,7 @@ namespace array {
if (--lengthB == 0) goto Succeed;
nofWinsB =
CallGallopLeft(load, tempArray[cursorTemp], cursorB, lengthB, 0)
CallGallopLeft(load, tempArray.objects[cursorTemp], cursorB, lengthB, 0)
otherwise Bailout;
assert(nofWinsB >= 0);
if (nofWinsB > 0) {
......@@ -1181,7 +1181,7 @@ namespace array {
if (lengthB == 0) goto Succeed;
}
CallStore(store, dest++, tempArray[cursorTemp++]) otherwise Bailout;
CallStore(store, dest++, tempArray.objects[cursorTemp++]) otherwise Bailout;
if (--lengthA == 1) goto CopyB;
}
++minGallop; // Penalize it for leaving galloping mode
......@@ -1197,7 +1197,7 @@ namespace array {
assert(lengthA == 1 && lengthB > 0);
// The last element of run A belongs at the end of the merge.
CallCopyWithinSortArray(cursorB, dest, lengthB) otherwise Bailout;
CallStore(store, dest + lengthB, tempArray[cursorTemp]) otherwise Bailout;
CallStore(store, dest + lengthB, tempArray.objects[cursorTemp]) otherwise Bailout;
}
}
......@@ -1250,7 +1250,7 @@ namespace array {
let elementA = CallLoad(load, cursorA) otherwise Bailout;
let order =
CallCompareFn(tempArray[cursorTemp], elementA) otherwise Bailout;
CallCompareFn(tempArray.objects[cursorTemp], elementA) otherwise Bailout;
if (order < 0) {
CopyElement(load, store, cursorA, dest) otherwise Bailout;
......@@ -1264,7 +1264,7 @@ namespace array {
if (lengthA == 0) goto Succeed;
if (nofWinsA >= minGallop) break;
} else {
CallStore(store, dest, tempArray[cursorTemp]) otherwise Bailout;
CallStore(store, dest, tempArray.objects[cursorTemp]) otherwise Bailout;
--cursorTemp;
--dest;
......@@ -1292,7 +1292,7 @@ namespace array {
sortState.minGallop = minGallop;
let k: Smi = CallGallopRight(
load, tempArray[cursorTemp], baseA, lengthA, lengthA - 1)
load, tempArray.objects[cursorTemp], baseA, lengthA, lengthA - 1)
otherwise Bailout;
assert(k >= 0);
nofWinsA = lengthA - k;
......@@ -1306,7 +1306,7 @@ namespace array {
lengthA = lengthA - nofWinsA;
if (lengthA == 0) goto Succeed;
}
CallStore(store, dest--, tempArray[cursorTemp--]) otherwise Bailout;
CallStore(store, dest--, tempArray.objects[cursorTemp--]) otherwise Bailout;
if (--lengthB == 1) goto CopyA;
let key = CallLoad(load, cursorA) otherwise Bailout;
......@@ -1350,7 +1350,7 @@ namespace array {
dest = dest - lengthA;
cursorA = cursorA - lengthA;
CallCopyWithinSortArray(cursorA + 1, dest + 1, lengthA) otherwise Bailout;
CallStore(store, dest, tempArray[cursorTemp]) otherwise Bailout;
CallStore(store, dest, tempArray.objects[cursorTemp]) otherwise Bailout;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment