Commit 1c3085e2 authored by Toon Verwaest's avatar Toon Verwaest Committed by V8 LUCI CQ

[cleanup] Resolve -Wshadow warnings in code-stub-assembler.h

By changing AllocationFlag from enum to enum class

Bug: v8:12244, v8:12245
Change-Id: Ifdd04bb12026619f6422a98ee0890bd557f0e4e1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3181536
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Auto-Submit: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77188}
parent 749e41d4
......@@ -97,7 +97,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
// Allocate and initialize the register file.
TNode<FixedArrayBase> parameters_and_registers =
AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length,
kAllowLargeObjectAllocation);
AllocationFlag::kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
......
......@@ -334,7 +334,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
// Allocate a new FixedArray of Objects.
TNode<FixedArray> new_elements = CAST(AllocateFixedArray(
new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation));
new_kind, intptr_length, AllocationFlag::kAllowLargeObjectAllocation));
// CopyFixedArrayElements does not distinguish between holey and packed for
// its first argument, so we don't need to dispatch on {kind} here.
CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
......
......@@ -1089,8 +1089,9 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
const ElementsKind kind = PACKED_ELEMENTS;
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
TNode<JSArray> array = AllocateJSArray(kind, array_map, size, SmiTag(size),
kAllowLargeObjectAllocation);
TNode<JSArray> array =
AllocateJSArray(kind, array_map, size, SmiTag(size),
AllocationFlag::kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
......@@ -1200,8 +1201,9 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
const ElementsKind kind = PACKED_ELEMENTS;
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
TNode<JSArray> array = AllocateJSArray(kind, array_map, size, SmiTag(size),
kAllowLargeObjectAllocation);
TNode<JSArray> array =
AllocateJSArray(kind, array_map, size, SmiTag(size),
AllocationFlag::kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
......@@ -2502,8 +2504,8 @@ TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable(
// See HashTable::NewInternal().
TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
TNode<FixedArray> table = CAST(
AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
TNode<FixedArray> table = CAST(AllocateFixedArray(
HOLEY_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation));
TNode<Map> map =
HeapConstant(EphemeronHashTable::GetMap(ReadOnlyRoots(isolate())));
......
......@@ -338,7 +338,7 @@ TNode<JSObject> ConstructorBuiltinsAssembler::FastNewObject(
BIND(&instantiate_map);
return AllocateJSObjectFromMap(initial_map, properties.value(), base::nullopt,
kNone, kWithSlackTracking);
AllocationFlag::kNone, kWithSlackTracking);
}
TNode<Context> ConstructorBuiltinsAssembler::FastNewFunctionContext(
......
......@@ -249,8 +249,9 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
BIND(&if_has_enum_cache);
{
GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
TNode<FixedArray> values_or_entries = CAST(AllocateFixedArray(
PACKED_ELEMENTS, object_enum_length, kAllowLargeObjectAllocation));
TNode<FixedArray> values_or_entries =
CAST(AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
AllocationFlag::kAllowLargeObjectAllocation));
// If in case we have enum_cache,
// we can't detect accessor of object until loop through descriptors.
......@@ -1252,13 +1253,14 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TNode<IntPtrT> size =
IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
TNode<FixedArrayBase> parameters_and_registers =
AllocateFixedArray(HOLEY_ELEMENTS, size, kAllowLargeObjectAllocation);
TNode<FixedArrayBase> parameters_and_registers = AllocateFixedArray(
HOLEY_ELEMENTS, size, AllocationFlag::kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), size, RootIndex::kUndefinedValue);
// TODO(cbruni): support start_offset to avoid double initialization.
TNode<JSObject> result = AllocateJSObjectFromMap(
map, base::nullopt, base::nullopt, kNone, kWithSlackTracking);
TNode<JSObject> result =
AllocateJSObjectFromMap(map, base::nullopt, base::nullopt,
AllocationFlag::kNone, kWithSlackTracking);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kFunctionOffset,
closure);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContextOffset,
......
......@@ -112,7 +112,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
std::tie(var_array, var_elements) =
AllocateUninitializedJSArrayWithElements(
elements_kind, map, length, no_gc_site, length_intptr,
kAllowLargeObjectAllocation, JSRegExpResult::kSize);
AllocationFlag::kAllowLargeObjectAllocation, JSRegExpResult::kSize);
Goto(&allocated);
}
......@@ -124,7 +124,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
std::tie(var_array, var_elements) =
AllocateUninitializedJSArrayWithElements(
elements_kind, map, length, no_gc_site, length_intptr,
kAllowLargeObjectAllocation, JSRegExpResultWithIndices::kSize);
AllocationFlag::kAllowLargeObjectAllocation,
JSRegExpResultWithIndices::kSize);
Goto(&allocated);
}
......@@ -329,8 +330,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties = AllocateSwissNameDictionary(num_properties);
} else {
properties =
AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation);
properties = AllocateNameDictionary(
num_properties, AllocationFlag::kAllowLargeObjectAllocation);
}
TNode<JSObject> group_object = AllocateJSObjectFromMap(map, properties);
......
......@@ -236,7 +236,8 @@ macro AllocateFromNew(
return Allocate(
sizeInBytes,
%RawConstexprCast<constexpr AllocationFlag>(
kAllocateBaseFlags | AllocationFlag::kPretenured));
%RawConstexprCast<constexpr int32>(kAllocateBaseFlags) |
%RawConstexprCast<constexpr int32>(AllocationFlag::kPretenured)));
} else {
return Allocate(sizeInBytes, kAllocateBaseFlags);
}
......
......@@ -1234,8 +1234,9 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TVARIABLE(Object, result);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
bool needs_double_alignment = flags & kDoubleAlignment;
bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation;
bool needs_double_alignment = flags & AllocationFlag::kDoubleAlignment;
bool allow_large_object_allocation =
flags & AllocationFlag::kAllowLargeObjectAllocation;
if (allow_large_object_allocation) {
Label next(this);
......@@ -1281,7 +1282,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (flags & kPretenured) {
if (flags & AllocationFlag::kPretenured) {
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
......@@ -1333,7 +1334,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
DCHECK_EQ(flags & kDoubleAlignment, 0);
DCHECK_EQ(flags & AllocationFlag::kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
......@@ -1341,8 +1342,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
return AllocateRaw(size_in_bytes, flags | AllocationFlag::kDoubleAlignment,
top_address, limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
......@@ -1351,8 +1352,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
return AllocateRaw(size_in_bytes, flags & ~AllocationFlag::kDoubleAlignment,
top_address, limit_address);
#else
#error Architecture not supported
#endif
......@@ -1360,7 +1361,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags) {
DCHECK(flags == kNone || flags == kDoubleAlignment);
DCHECK(flags == AllocationFlag::kNone ||
flags == AllocationFlag::kDoubleAlignment);
CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
......@@ -1368,9 +1370,10 @@ TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
if (FLAG_single_generation) flags |= kPretenured;
bool const new_space = !(flags & kPretenured);
bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
if (FLAG_single_generation) flags |= AllocationFlag::kPretenured;
bool const new_space = !(flags & AllocationFlag::kPretenured);
bool const allow_large_objects =
flags & AllocationFlag::kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
// different generation than requested.
bool const always_allocated_in_requested_space =
......@@ -1383,7 +1386,8 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
}
}
if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) {
if (!(flags & AllocationFlag::kDoubleAlignment) &&
always_allocated_in_requested_space) {
return OptimizedAllocate(
size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
......@@ -1421,7 +1425,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
IntPtrAdd(ReinterpretCast<IntPtrT>(top_address),
IntPtrConstant(kSystemPointerSize));
if (flags & kDoubleAlignment) {
if (flags & AllocationFlag::kDoubleAlignment) {
return AllocateRawDoubleAligned(size_in_bytes, flags,
ReinterpretCast<RawPtrT>(top_address),
ReinterpretCast<RawPtrT>(limit_address));
......@@ -1434,7 +1438,8 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
CHECK(flags == kNone || flags == kDoubleAlignment);
CHECK(flags == AllocationFlag::kNone ||
flags == AllocationFlag::kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
......@@ -3275,7 +3280,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TNode<Cell> CodeStubAssembler::AllocateCellWithValue(TNode<Object> value,
WriteBarrierMode mode) {
TNode<HeapObject> result = Allocate(Cell::kSize, kNone);
TNode<HeapObject> result = Allocate(Cell::kSize, AllocationFlag::kNone);
StoreMapNoWriteBarrier(result, RootIndex::kCellMap);
TNode<Cell> cell = CAST(result);
StoreCellValue(cell, value, mode);
......@@ -3298,7 +3303,7 @@ void CodeStubAssembler::StoreCellValue(TNode<Cell> cell, TNode<Object> value,
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
TNode<HeapObject> result = Allocate(HeapNumber::kSize, kNone);
TNode<HeapObject> result = Allocate(HeapNumber::kSize, AllocationFlag::kNone);
RootIndex heap_map_index = RootIndex::kHeapNumberMap;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<HeapNumber>(result);
......@@ -3343,7 +3348,8 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
TNode<IntPtrT> size =
IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
Signed(WordShl(length, kSystemPointerSizeLog2)));
TNode<HeapObject> raw_result = Allocate(size, kAllowLargeObjectAllocation);
TNode<HeapObject> raw_result =
Allocate(size, AllocationFlag::kAllowLargeObjectAllocation);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
......@@ -3665,9 +3671,9 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
const ElementsKind elements_kind = HOLEY_ELEMENTS;
TNode<Map> fixed_array_map =
HeapConstant(CollectionType::GetMap(ReadOnlyRoots(isolate())));
TNode<CollectionType> table =
CAST(AllocateFixedArray(elements_kind, fixed_array_length,
kAllowLargeObjectAllocation, fixed_array_map));
TNode<CollectionType> table = CAST(AllocateFixedArray(
elements_kind, fixed_array_length,
AllocationFlag::kAllowLargeObjectAllocation, fixed_array_map));
Comment("Initialize the OrderedHashTable fields.");
const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
......@@ -3968,7 +3974,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TNode<IntPtrT> capacity, AllocationFlags allocation_flags,
int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CHECK_EQ(allocation_flags & ~AllocationFlag::kAllowLargeObjectAllocation, 0);
CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
TVARIABLE(JSArray, array);
......@@ -4018,7 +4024,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
// folding trick. Instead, we first allocate the elements in large object
// space, and then allocate the JSArray (and possibly the allocation
// memento) in new space.
if (allocation_flags & kAllowLargeObjectAllocation) {
if (allocation_flags & AllocationFlag::kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size), &next);
......@@ -4258,7 +4264,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind);
if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
if (IsDoubleElementsKind(kind)) flags |= AllocationFlag::kDoubleAlignment;
// Allocate both array and elements object, and initialize the JSArray.
TNode<HeapObject> array = Allocate(total_size, flags);
if (fixed_array_map) {
......@@ -4268,7 +4274,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
// need the write barrier even in LOS, but it's better to not take chances
// in case this invariant changes later, since it's difficult to enforce
// locally here.
if (flags == CodeStubAssembler::kNone) {
if (flags == AllocationFlag::kNone) {
StoreMapNoWriteBarrier(array, *fixed_array_map);
} else {
StoreMap(array, *fixed_array_map);
......@@ -4556,7 +4562,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
var_holes_converted != nullptr ? HoleConversionMode::kConvertToUndefined
: HoleConversionMode::kDontConvert;
TVARIABLE(FixedArrayBase, var_result);
auto allocation_flags = CodeStubAssembler::kAllowLargeObjectAllocation;
auto allocation_flags = AllocationFlag::kAllowLargeObjectAllocation;
if (!first) {
first = IntPtrOrSmiConstant<TIndex>(0);
}
......@@ -4662,7 +4668,7 @@ TNode<PropertyArray> CodeStubAssembler::AllocatePropertyArray(
CSA_DCHECK(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> total_size = GetPropertyArrayAllocationSize(capacity);
TNode<HeapObject> array = Allocate(total_size, kNone);
TNode<HeapObject> array = Allocate(total_size, AllocationFlag::kNone);
RootIndex map_index = RootIndex::kPropertyArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
......@@ -11344,7 +11350,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot) {
TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured);
TNode<HeapObject> site = Allocate(size, AllocationFlag::kPretenured);
StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
// Should match AllocationSite::Initialize.
TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
......@@ -15191,7 +15197,7 @@ CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
Allocate(total_size, kAllowLargeObjectAllocation));
Allocate(total_size, AllocationFlag::kAllowLargeObjectAllocation));
StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
......@@ -15291,7 +15297,7 @@ TNode<SwissNameDictionary> CodeStubAssembler::CopySwissNameDictionary(
TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
Allocate(total_size, kAllowLargeObjectAllocation));
Allocate(total_size, AllocationFlag::kAllowLargeObjectAllocation));
StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
......
......@@ -322,7 +322,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
explicit CodeStubAssembler(compiler::CodeAssemblerState* state);
enum AllocationFlag : uint8_t {
enum class AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1,
......@@ -753,13 +753,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Operation bitwise_op);
// Allocate an object of the given size.
TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
TNode<HeapObject> AllocateInNewSpace(
TNode<IntPtrT> size, AllocationFlags flags = AllocationFlag::kNone);
TNode<HeapObject> AllocateInNewSpace(
int size, AllocationFlags flags = AllocationFlag::kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
AllocationFlags flags = AllocationFlag::kNone);
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(int size,
AllocationFlags flags = AllocationFlag::kNone);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
......@@ -1809,17 +1811,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocationFlags flags);
// Allocate a ByteArray with the given length.
TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
AllocationFlags flags = kNone);
TNode<ByteArray> AllocateByteArray(
TNode<UintPtrT> length, AllocationFlags flags = AllocationFlag::kNone);
// Allocate a SeqOneByteString with the given length.
TNode<String> AllocateSeqOneByteString(uint32_t length,
AllocationFlags flags = kNone);
TNode<String> AllocateSeqOneByteString(
uint32_t length, AllocationFlags flags = AllocationFlag::kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqOneByteString;
// Allocate a SeqTwoByteString with the given length.
TNode<String> AllocateSeqTwoByteString(uint32_t length,
AllocationFlags flags = kNone);
TNode<String> AllocateSeqTwoByteString(
uint32_t length, AllocationFlags flags = AllocationFlag::kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqTwoByteString;
// Allocate a SlicedOneByteString with the given length, parent and offset.
......@@ -1836,9 +1838,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
TNode<IntPtrT> at_least_space_for,
AllocationFlags = AllocationFlag::kNone);
TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
TNode<IntPtrT> capacity, AllocationFlags = kNone);
TNode<IntPtrT> capacity, AllocationFlags = AllocationFlag::kNone);
TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
Label* large_object_fallback);
......@@ -1856,7 +1859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Map> map,
base::Optional<TNode<HeapObject>> properties = base::nullopt,
base::Optional<TNode<FixedArray>> elements = base::nullopt,
AllocationFlags flags = kNone,
AllocationFlags flags = AllocationFlag::kNone,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
......@@ -1881,30 +1884,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
base::Optional<TNode<AllocationSite>> allocation_site,
TNode<IntPtrT> capacity, AllocationFlags allocation_flags = kNone,
TNode<IntPtrT> capacity,
AllocationFlags allocation_flags = AllocationFlag::kNone,
int array_header_size = JSArray::kHeaderSize);
// Allocate a JSArray and fill elements with the hole.
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags = kNone);
AllocationFlags allocation_flags = AllocationFlag::kNone);
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags = kNone) {
AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
allocation_site, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
TNode<Smi> length,
AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
base::nullopt, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
TNode<Smi> length,
AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, capacity, length, base::nullopt,
allocation_flags);
}
......@@ -1937,7 +1943,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename TIndex>
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, TNode<TIndex> capacity, AllocationFlags flags = kNone,
ElementsKind kind, TNode<TIndex> capacity,
AllocationFlags flags = AllocationFlag::kNone,
base::Optional<TNode<Map>> fixed_array_map = base::nullopt);
TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment