Commit 27cfcf56 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup] Fix kPointerSize usages in platform-independent src/builtins/

Bug: v8:8477, v8:8562
Change-Id: Iebb60551a461304539d943a080ce107eecf6fdbf
Reviewed-on: https://chromium-review.googlesource.com/c/1384264Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58371}
parent 0382ca40
......@@ -101,7 +101,7 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
[this, elements, &offset](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged,
elements, offset.value(), arg);
Increment(&offset, kPointerSize);
Increment(&offset, kSystemPointerSize);
},
first_arg, nullptr, param_mode);
return result;
......@@ -268,12 +268,12 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
mapped_offset = BuildFastLoop(
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
Increment(&current_argument, kPointerSize);
Increment(&current_argument, kSystemPointerSize);
Node* arg = LoadBufferObject(current_argument.value(), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
},
-kPointerSize, INTPTR_PARAMETERS);
-kTaggedSize, INTPTR_PARAMETERS);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
......@@ -291,24 +291,23 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
mapped_count, mode));
Node* the_hole = TheHoleConstant();
VariableList var_list2({&context_index}, zone());
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2);
Node* adjusted_map_array = IntPtrAdd(
BitcastTaggedToWord(map_array),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
Node* zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(var_list2, mapped_offset, zero_offset,
[this, the_hole, elements, adjusted_map_array, &context_index,
mode](Node* offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged,
elements, offset, the_hole);
StoreNoWriteBarrier(
MachineRepresentation::kTagged, adjusted_map_array,
offset, ParameterToTagged(context_index.value(), mode));
Increment(&context_index, 1, mode);
},
-kPointerSize, INTPTR_PARAMETERS);
BuildFastLoop(
var_list2, mapped_offset, zero_offset,
[=, &context_index](Node* offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
the_hole);
StoreNoWriteBarrier(MachineRepresentation::kTagged,
adjusted_map_array, offset,
ParameterToTagged(context_index.value(), mode));
Increment(&context_index, 1, mode);
},
-kTaggedSize, INTPTR_PARAMETERS);
result.Bind(argument_object);
Goto(&done);
......
......@@ -3731,7 +3731,7 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
TailCallRuntime(Runtime::kAbort, context, reason);
} else {
int element_size =
IsDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize;
int max_fast_elements =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
AllocationMemento::kSize) /
......
......@@ -77,7 +77,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
TNode<IntPtrT> parameters_and_register_length =
Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kPointerSizeLog2)),
Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count));
// Allocate space for the promise, the async function object
......@@ -86,7 +86,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize),
Signed(WordShl(parameters_and_register_length,
IntPtrConstant(kPointerSizeLog2))));
IntPtrConstant(kTaggedSizeLog2))));
TNode<HeapObject> base = AllocateInNewSpace(size);
// Initialize the register file.
......
......@@ -68,7 +68,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
// JSPromise::kSizeWithEmbedderFields.
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
kPointerSize)));
kTaggedSize)));
TNode<HeapObject> wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
{
// Initialize Promise
......@@ -255,8 +255,8 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
// JSFunction.
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
kPointerSize)));
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
kTaggedSize)));
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......
......@@ -1005,7 +1005,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
LoadFixedArrayElement(table, entry_start_position,
(OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset) *
kPointerSize);
kTaggedSize);
Store(elements, var_offset.value(), entry_value);
Goto(&continue_loop);
......@@ -1016,7 +1016,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
// Increment the array offset and continue the loop to the next entry.
var_index = cur_index;
var_offset.Bind(
IntPtrAdd(var_offset.value(), IntPtrConstant(kPointerSize)));
IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)));
Goto(&loop);
}
}
......@@ -1101,8 +1101,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
Store(elements, var_offset.value(), entry_key);
var_index = cur_index;
var_offset.Bind(
IntPtrAdd(var_offset.value(), IntPtrConstant(kPointerSize)));
var_offset.Bind(IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)));
Goto(&loop);
}
......@@ -1313,7 +1312,7 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
STATIC_ASSERT(OrderedHashMap::RemovedHolesIndex() ==
OrderedHashSet::RemovedHolesIndex());
TNode<Smi> removed_index = CAST(LoadFixedArrayElement(
CAST(table), i, OrderedHashMap::RemovedHolesIndex() * kPointerSize));
CAST(table), i, OrderedHashMap::RemovedHolesIndex() * kTaggedSize));
GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index);
Decrement(&var_index, 1, SMI_PARAMETERS);
Increment(&var_i);
......@@ -1412,7 +1411,7 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
number_of_buckets);
entry_key =
LoadFixedArrayElement(table, entry_start_position,
TableType::HashTableStartIndex() * kPointerSize);
TableType::HashTableStartIndex() * kTaggedSize);
Increment(&var_index);
Branch(IsTheHole(entry_key), &loop, &done_loop);
}
......@@ -1441,7 +1440,7 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
Return(LoadFixedArrayElement(
CAST(table), SmiUntag(index),
(OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
kPointerSize));
kTaggedSize));
BIND(&if_not_found);
Return(UndefinedConstant());
......@@ -1511,8 +1510,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
// If we found the entry, we just store the value there.
StoreFixedArrayElement(table, entry_start_position_or_hash.value(), value,
UPDATE_WRITE_BARRIER,
kPointerSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset));
kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset));
Return(receiver);
Label no_hash(this), add_entry(this), store_new_entry(this);
......@@ -1573,24 +1572,24 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const bucket_entry = LoadFixedArrayElement(
table, bucket, OrderedHashMap::HashTableStartIndex() * kPointerSize);
table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)),
number_of_buckets);
StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
kPointerSize * OrderedHashMap::HashTableStartIndex());
kTaggedSize * OrderedHashMap::HashTableStartIndex());
StoreFixedArrayElement(table, entry_start, value, UPDATE_WRITE_BARRIER,
kPointerSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset));
kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset));
StoreFixedArrayElement(table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
kPointerSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kChainOffset));
kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kChainOffset));
// Update the bucket head.
StoreFixedArrayElement(table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
OrderedHashMap::HashTableStartIndex() * kPointerSize);
OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Bump the elements count.
TNode<Smi> const number_of_elements =
......@@ -1626,11 +1625,11 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
// If we found the entry, mark the entry as deleted.
StoreFixedArrayElement(table, entry_start_position_or_hash.value(),
TheHoleConstant(), UPDATE_WRITE_BARRIER,
kPointerSize * OrderedHashMap::HashTableStartIndex());
kTaggedSize * OrderedHashMap::HashTableStartIndex());
StoreFixedArrayElement(table, entry_start_position_or_hash.value(),
TheHoleConstant(), UPDATE_WRITE_BARRIER,
kPointerSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset));
kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
OrderedHashMap::kValueOffset));
// Decrement the number of elements, increment the number of deleted elements.
TNode<Smi> const number_of_elements = SmiSub(
......@@ -1743,21 +1742,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const bucket_entry = LoadFixedArrayElement(
table, bucket, OrderedHashSet::HashTableStartIndex() * kPointerSize);
table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)),
number_of_buckets);
StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
kPointerSize * OrderedHashSet::HashTableStartIndex());
kTaggedSize * OrderedHashSet::HashTableStartIndex());
StoreFixedArrayElement(table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
kPointerSize * (OrderedHashSet::HashTableStartIndex() +
OrderedHashSet::kChainOffset));
kTaggedSize * (OrderedHashSet::HashTableStartIndex() +
OrderedHashSet::kChainOffset));
// Update the bucket head.
StoreFixedArrayElement(table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
OrderedHashSet::HashTableStartIndex() * kPointerSize);
OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Bump the elements count.
TNode<Smi> const number_of_elements =
......@@ -1793,7 +1792,7 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
// If we found the entry, mark the entry as deleted.
StoreFixedArrayElement(table, entry_start_position_or_hash.value(),
TheHoleConstant(), UPDATE_WRITE_BARRIER,
kPointerSize * OrderedHashSet::HashTableStartIndex());
kTaggedSize * OrderedHashSet::HashTableStartIndex());
// Decrement the number of elements, increment the number of deleted elements.
TNode<Smi> const number_of_elements = SmiSub(
......@@ -1882,7 +1881,7 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
Node* entry_value = LoadFixedArrayElement(
table, entry_start_position,
(OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
kPointerSize);
kTaggedSize);
// Invoke the {callback} passing the {entry_key}, {entry_value} and the
// {receiver}.
......@@ -1971,7 +1970,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
var_value.Bind(LoadFixedArrayElement(
table, entry_start_position,
(OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
kPointerSize));
kTaggedSize));
Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
&return_value, &return_entry);
......
......@@ -134,7 +134,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
BIND(&done);
}
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset,
feedback_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
......@@ -304,9 +304,9 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
{
Node* boilerplate = literal_site;
CSA_ASSERT(this, IsJSRegExp(boilerplate));
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
Node* copy = Allocate(size);
for (int offset = 0; offset < size; offset += kPointerSize) {
for (int offset = 0; offset < size; offset += kTaggedSize) {
Node* value = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, value);
}
......@@ -545,7 +545,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
LoadObjectField<IntPtrT>(boilerplate, offset.value());
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
}
offset = IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize));
offset = IntPtrAdd(offset.value(), IntPtrConstant(kTaggedSize));
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
&done_init);
}
......@@ -561,33 +561,36 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&continue_with_write_barrier);
{
Comment("Copy in-object properties slow");
BuildFastLoop(offset.value(), instance_size,
[=](Node* offset) {
Node* field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field);
},
kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
BuildFastLoop(
offset.value(), instance_size,
[=](Node* offset) {
// TODO(ishell): value decompression is not necessary here.
Node* field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Comment("Copy mutable HeapNumber values");
BuildFastLoop(offset.value(), instance_size,
[=](Node* offset) {
Node* field = LoadObjectField(copy, offset);
Label copy_mutable_heap_number(this, Label::kDeferred),
continue_loop(this);
// We only have to clone complex field values.
GotoIf(TaggedIsSmi(field), &continue_loop);
Branch(IsMutableHeapNumber(field),
&copy_mutable_heap_number, &continue_loop);
BIND(&copy_mutable_heap_number);
{
Node* double_value = LoadHeapNumberValue(field);
Node* mutable_heap_number =
AllocateMutableHeapNumberWithValue(double_value);
StoreObjectField(copy, offset, mutable_heap_number);
Goto(&continue_loop);
}
BIND(&continue_loop);
},
kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
BuildFastLoop(
offset.value(), instance_size,
[=](Node* offset) {
Node* field = LoadObjectField(copy, offset);
Label copy_mutable_heap_number(this, Label::kDeferred),
continue_loop(this);
// We only have to clone complex field values.
GotoIf(TaggedIsSmi(field), &continue_loop);
Branch(IsMutableHeapNumber(field), &copy_mutable_heap_number,
&continue_loop);
BIND(&copy_mutable_heap_number);
{
Node* double_value = LoadHeapNumberValue(field);
Node* mutable_heap_number =
AllocateMutableHeapNumberWithValue(double_value);
StoreObjectField(copy, offset, mutable_heap_number);
Goto(&continue_loop);
}
BIND(&continue_loop);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Goto(&done_init);
}
BIND(&done_init);
......
......@@ -44,8 +44,8 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
Node* cache_stamp = LoadObjectField(receiver, JSDate::kCacheStampOffset);
GotoIf(WordNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch);
Return(LoadObjectField(
receiver, JSDate::kValueOffset + field_index * kPointerSize));
Return(LoadObjectField(receiver,
JSDate::kValueOffset + field_index * kTaggedSize));
BIND(&stamp_mismatch);
}
......
......@@ -249,7 +249,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
{
// Temp variable to calculate cell offset in bitmap.
Node* r0;
int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
int shift = Bitmap::kBitsPerCellLog2 + kSystemPointerSizeLog2 -
Bitmap::kBytesPerCellLog2;
r0 = WordShr(object, IntPtrConstant(shift));
r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
......@@ -259,7 +259,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
{
// Temp variable to calculate bit offset in cell.
Node* r1;
r1 = WordShr(object, IntPtrConstant(kPointerSizeLog2));
r1 = WordShr(object, IntPtrConstant(kSystemPointerSizeLog2));
r1 = WordAnd(r1, IntPtrConstant((1 << Bitmap::kBitsPerCellLog2) - 1));
// It seems that LSB(e.g. cl) is automatically used, so no manual masking
// is needed. Uncomment the following line otherwise.
......@@ -329,7 +329,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
slot);
Node* new_store_buffer_top =
IntPtrAdd(store_buffer_top, IntPtrConstant(kPointerSize));
IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
store_buffer_top_addr, new_store_buffer_top);
......
......@@ -1395,7 +1395,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
MachineType::Uint16()));
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
Node* size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kPointerSizeLog2)),
Node* size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
Node* parameters_and_registers = AllocateFixedArray(HOLEY_ELEMENTS, size);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
......
......@@ -81,24 +81,24 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
kAllowLargeObjectAllocation);
elements.Bind(allocated_elements);
VARIABLE(index, MachineType::PointerRepresentation(),
IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
VariableList list({&index}, zone());
TVARIABLE(IntPtrT, offset,
IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
VariableList list({&offset}, zone());
GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)),
&if_large_object);
args.ForEach(list, [=, &index](Node* arg) {
args.ForEach(list, [=, &offset](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements,
index.value(), arg);
Increment(&index, kPointerSize);
offset.value(), arg);
Increment(&offset, kTaggedSize);
});
Goto(&allocate_js_array);
BIND(&if_large_object);
{
args.ForEach(list, [=, &index](Node* arg) {
Store(allocated_elements, index.value(), arg);
Increment(&index, kPointerSize);
args.ForEach(list, [=, &offset](Node* arg) {
Store(allocated_elements, offset.value(), arg);
Increment(&offset, kTaggedSize);
});
Goto(&allocate_js_array);
}
......
......@@ -118,7 +118,7 @@ TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(
TNode<JSRegExp> regexp) {
// Load the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize;
return LoadObjectField(regexp, field_offset);
}
......@@ -139,7 +139,7 @@ TNode<Object> RegExpBuiltinsAssembler::LoadLastIndex(TNode<Context> context,
void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
// Store the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize;
StoreObjectField(regexp, field_offset, value);
}
......@@ -592,7 +592,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<Smi> smi_value = SmiFromInt32(value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, match_info,
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kPointerSize);
Increment(&var_to_offset, kTaggedSize);
},
kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
......@@ -2497,10 +2497,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const reg = var_reg.value();
Node* const from = LoadFixedArrayElement(
match_indices, reg,
RegExpMatchInfo::kFirstCaptureIndex * kPointerSize, mode);
RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode);
TNode<Smi> const to = CAST(LoadFixedArrayElement(
match_indices, reg,
(RegExpMatchInfo::kFirstCaptureIndex + 1) * kPointerSize, mode));
(RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode));
Label select_capture(this), select_undefined(this), store_value(this);
VARIABLE(var_value, MachineRepresentation::kTagged);
......
......@@ -2528,14 +2528,13 @@ TNode<JSArray> StringBuiltinsAssembler::StringToList(TNode<Context> context,
TNode<IntPtrT> ch_length = LoadStringLengthAsWord(value);
var_position = IntPtrAdd(var_position.value(), ch_length);
// Increment the array offset and continue the loop.
var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kPointerSize));
var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize));
Goto(&next_codepoint);
}
BIND(&done);
TNode<IntPtrT> new_length =
IntPtrDiv(IntPtrSub(var_offset.value(), first_offset),
IntPtrConstant(kPointerSize));
TNode<IntPtrT> new_length = IntPtrDiv(
IntPtrSub(var_offset.value(), first_offset), IntPtrConstant(kTaggedSize));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_length, IntPtrConstant(0)));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, new_length));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset,
......
......@@ -74,7 +74,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
byte_length,
MachineType::PointerRepresentation());
for (int offset = JSTypedArray::kHeaderSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) {
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectField(holder, offset, SmiConstant(0));
}
}
......
......@@ -31,30 +31,22 @@ namespace {
struct BuiltinMetadata {
const char* name;
Builtins::Kind kind;
union {
Address cpp_entry; // For CPP and API builtins.
int8_t parameter_count; // For TFJ builtins.
} kind_specific_data;
// For CPP and API builtins it's cpp_entry address and for TFJ it's a
// parameter count.
Address cpp_entry_or_parameter_count;
};
// clang-format off
#define DECL_CPP(Name, ...) { #Name, Builtins::CPP, \
{ FUNCTION_ADDR(Builtin_##Name) }},
#define DECL_API(Name, ...) { #Name, Builtins::API, \
{ FUNCTION_ADDR(Builtin_##Name) }},
#ifdef V8_TARGET_BIG_ENDIAN
#define DECL_TFJ(Name, Count, ...) { #Name, Builtins::TFJ, \
{ static_cast<Address>(static_cast<uintptr_t>( \
Count) << (kBitsPerByte * (kPointerSize - 1))) }},
#else
#define DECL_TFJ(Name, Count, ...) { #Name, Builtins::TFJ, \
{ static_cast<Address>(Count) }},
#endif
#define DECL_TFC(Name, ...) { #Name, Builtins::TFC, {} },
#define DECL_TFS(Name, ...) { #Name, Builtins::TFS, {} },
#define DECL_TFH(Name, ...) { #Name, Builtins::TFH, {} },
#define DECL_BCH(Name, ...) { #Name, Builtins::BCH, {} },
#define DECL_ASM(Name, ...) { #Name, Builtins::ASM, {} },
#define DECL_CPP(Name, ...) \
{#Name, Builtins::CPP, FUNCTION_ADDR(Builtin_##Name)},
#define DECL_API(Name, ...) \
{#Name, Builtins::API, FUNCTION_ADDR(Builtin_##Name)},
#define DECL_TFJ(Name, Count, ...) \
{#Name, Builtins::TFJ, static_cast<Address>(Count)},
#define DECL_TFC(Name, ...) {#Name, Builtins::TFC, kNullAddress},
#define DECL_TFS(Name, ...) {#Name, Builtins::TFS, kNullAddress},
#define DECL_TFH(Name, ...) {#Name, Builtins::TFH, kNullAddress},
#define DECL_BCH(Name, ...) {#Name, Builtins::BCH, kNullAddress},
#define DECL_ASM(Name, ...) {#Name, Builtins::ASM, kNullAddress},
const BuiltinMetadata builtin_metadata[] = {
BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH,
DECL_BCH, DECL_ASM)
......@@ -67,7 +59,6 @@ const BuiltinMetadata builtin_metadata[] = {
#undef DECL_TFH
#undef DECL_BCH
#undef DECL_ASM
// clang-format on
} // namespace
......@@ -138,7 +129,7 @@ Handle<Code> Builtins::builtin_handle(int index) {
// static
int Builtins::GetStackParameterCount(Name name) {
DCHECK(Builtins::KindOf(name) == TFJ);
return builtin_metadata[name].kind_specific_data.parameter_count;
return static_cast<int>(builtin_metadata[name].cpp_entry_or_parameter_count);
}
// static
......@@ -205,7 +196,7 @@ void Builtins::PrintBuiltinSize() {
// static
Address Builtins::CppEntryOf(int index) {
DCHECK(Builtins::HasCppImplementation(index));
return builtin_metadata[index].kind_specific_data.cpp_entry;
return builtin_metadata[index].cpp_entry_or_parameter_count;
}
// static
......@@ -221,7 +212,7 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
Address end = heap->builtin_address(Builtins::builtin_count);
if (handle_location >= end) return false;
if (handle_location < start) return false;
*index = static_cast<int>(handle_location - start) >> kPointerSizeLog2;
*index = static_cast<int>(handle_location - start) >> kSystemPointerSizeLog2;
DCHECK(Builtins::IsBuiltinId(*index));
return true;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment