Commit 46d19868 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup] Fix kPointerSize in tests

Bug: v8:8477, v8:8834
Change-Id: I9213cca077a2758b87a6cb95bcb01d0186c32098
Reviewed-on: https://chromium-review.googlesource.com/c/1472633Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59602}
parent 1354ff93
......@@ -4381,10 +4381,10 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
// Load segment's base pointer from WasmInstanceObject::data_segment_starts.
Node* seg_start_array =
LOAD_INSTANCE_FIELD(DataSegmentStarts, MachineType::Pointer());
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >>
kPointerSizeLog2);
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <=
kMaxUInt32 / kSystemPointerSize);
Node* scaled_index = Uint32ToUintptr(graph()->NewNode(
m->Word32Shl(), seg_index, Int32Constant(kPointerSizeLog2)));
m->Word32Shl(), seg_index, Int32Constant(kSystemPointerSizeLog2)));
Node* seg_start = SetEffect(
graph()->NewNode(m->Load(MachineType::Pointer()), seg_start_array,
scaled_index, Effect(), Control()));
......
......@@ -1124,7 +1124,7 @@ class DebugInfoSection : public DebugSection {
uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
w->WriteULEB128(3);
w->Write<uint8_t>(kPointerSize);
w->Write<uint8_t>(kSystemPointerSize);
w->WriteString("v8value");
if (desc_->has_scope_info()) {
......@@ -1172,9 +1172,8 @@ class DebugInfoSection : public DebugSection {
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
w->Write<uint8_t>(DW_OP_fbreg);
w->WriteSLEB128(
JavaScriptFrameConstants::kLastParameterOffset +
kPointerSize * (params - param - 1));
w->WriteSLEB128(JavaScriptFrameConstants::kLastParameterOffset +
kSystemPointerSize * (params - param - 1));
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
......@@ -1633,15 +1632,15 @@ class UnwindInfoSection : public DebugSection {
void UnwindInfoSection::WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position) {
uint32_t align = (w->position() - initial_position) % kPointerSize;
uint32_t align = (w->position() - initial_position) % kSystemPointerSize;
if (align != 0) {
for (uint32_t i = 0; i < (kPointerSize - align); i++) {
for (uint32_t i = 0; i < (kSystemPointerSize - align); i++) {
w->Write<uint8_t>(DW_CFA_NOP);
}
}
DCHECK_EQ((w->position() - initial_position) % kPointerSize, 0);
DCHECK_EQ((w->position() - initial_position) % kSystemPointerSize, 0);
length_slot->set(static_cast<uint32_t>(w->position() - initial_position));
}
......@@ -1701,7 +1700,7 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
// for the previous function. The previous RBP has not been pushed yet.
w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
w->WriteULEB128(AMD64_RSP);
w->WriteSLEB128(-kPointerSize);
w->WriteSLEB128(-kSystemPointerSize);
// The RA is stored at location CFA + kCallerPCOffset. This is an invariant,
// and hence omitted from the next states.
......@@ -1763,7 +1762,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
// The CFA can is now calculated in the same way as in the first state.
w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
w->WriteULEB128(AMD64_RSP);
w->WriteSLEB128(-kPointerSize);
w->WriteSLEB128(-kSystemPointerSize);
// The RBP
w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
......
......@@ -220,7 +220,7 @@ class WeakCell::BodyDescriptor final : public BodyDescriptorBase {
class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return JSObject::BodyDescriptor::IsValidSlot(map, obj, offset);
return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
......@@ -228,7 +228,8 @@ class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
IteratePointers(obj, JSReceiver::kPropertiesOrHashOffset, kTargetOffset, v);
IterateCustomWeakPointer(obj, kTargetOffset, v);
IteratePointers(obj, kTargetOffset + kPointerSize, object_size, v);
IterateJSObjectBodyImpl(map, obj, kTargetOffset + kTaggedSize, object_size,
v);
}
static inline int SizeOf(Map map, HeapObject object) {
......
......@@ -30,7 +30,7 @@ FreeSpace FreeSpace::next() {
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
return FreeSpace::unchecked_cast(*ObjectSlot(address() + kNextOffset));
}
......@@ -42,7 +42,7 @@ void FreeSpace::set_next(FreeSpace next) {
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
ObjectSlot(address() + kNextOffset).Relaxed_Store(next);
}
......
......@@ -146,8 +146,14 @@ class JSWeakRef : public JSObject {
DECL_ACCESSORS(target, HeapObject)
static const int kTargetOffset = JSObject::kHeaderSize;
static const int kSize = kTargetOffset + kPointerSize;
// Layout description.
#define JS_WEAK_REF_FIELDS(V) \
V(kTargetOffset, kTaggedSize) \
/* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_REF_FIELDS)
#undef JS_WEAK_REF_FIELDS
class BodyDescriptor;
......
......@@ -35,7 +35,7 @@ CAST_ACCESSOR(ClassPositions)
void Struct::InitializeBody(int object_size) {
Object value = GetReadOnlyRoots().undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
for (int offset = kHeaderSize; offset < object_size; offset += kTaggedSize) {
WRITE_FIELD(*this, offset, value);
}
}
......
......@@ -41,8 +41,8 @@ class RelocInfo {
static const char* const kFillerCommentString;
// The minimum size of a comment is equal to two bytes for the extra tagged
// pc and kPointerSize for the actual pointer to the comment.
static const int kMinRelocCommentSize = 2 + kPointerSize;
// pc and kSystemPointerSize for the actual pointer to the comment.
static const int kMinRelocCommentSize = 2 + kSystemPointerSize;
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
......@@ -352,7 +352,7 @@ class RelocInfoWriter {
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kSystemPointerSize;
private:
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
......
......@@ -104,9 +104,10 @@ void Deserializer::DeserializeDeferredObjects() {
DCHECK_LE(space, kNumberOfSpaces);
DCHECK_EQ(code - space, kNewObject);
HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
int size = source_.GetInt() << kTaggedSizeLog2;
Address obj_address = object->address();
MaybeObjectSlot start(obj_address + kPointerSize);
// Object's map is already initialized, now read the rest.
MaybeObjectSlot start(obj_address + kTaggedSize);
MaybeObjectSlot end(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
......
......@@ -29,8 +29,10 @@ namespace {
int GetSlotSizeInBytes(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kTagged:
// Spill slots for tagged values are always uncompressed.
return kSystemPointerSize;
case MachineRepresentation::kFloat32:
return kPointerSize;
return kSystemPointerSize;
case MachineRepresentation::kFloat64:
return kDoubleSize;
case MachineRepresentation::kSimd128:
......@@ -521,7 +523,7 @@ class TestEnvironment : public HandleAndZoneScope {
// Keep a map of (MachineRepresentation . std::vector<int>) with
// allocated slots to pick from for each representation.
int slot = slot_parameter_n;
slot_parameter_n -= (GetSlotSizeInBytes(rep) / kPointerSize);
slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
AddStackSlot(&test_signature, rep, slot);
entry->second--;
}
......@@ -535,7 +537,7 @@ class TestEnvironment : public HandleAndZoneScope {
for (int i = 0; i < kSmiConstantCount; i++) {
intptr_t smi_value = static_cast<intptr_t>(
Smi::FromInt(rng_->NextInt(Smi::kMaxValue)).ptr());
Constant constant = kPointerSize == 8
Constant constant = kSystemPointerSize == 8
? Constant(static_cast<int64_t>(smi_value))
: Constant(static_cast<int32_t>(smi_value));
AddConstant(MachineRepresentation::kTagged, AllocateConstant(constant));
......
......@@ -267,7 +267,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
base_pointer = LSB(base_pointer, kPointerSize / 2);
base_pointer = LSB(base_pointer, kSystemPointerSize / 2);
}
#endif
Node* base = m.PointerConstant(base_pointer);
......@@ -589,7 +589,7 @@ void RunLoadStoreSignExtend64(TestAlignment t) {
}
void RunLoadStoreZeroExtend64(TestAlignment t) {
if (kPointerSize < 8) return;
if (kSystemPointerSize < 8) return;
uint64_t buffer[5];
RawMachineAssemblerTester<uint64_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
......
......@@ -5592,7 +5592,7 @@ TEST(RunWord64EqualInBranchP) {
TEST(RunChangeInt32ToInt64P) {
if (kPointerSize < 8) return;
if (kSystemPointerSize < 8) return;
int64_t actual = -1;
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.StoreToPointer(&actual, MachineRepresentation::kWord64,
......@@ -5607,7 +5607,7 @@ TEST(RunChangeInt32ToInt64P) {
TEST(RunChangeUint32ToUint64P) {
if (kPointerSize < 8) return;
if (kSystemPointerSize < 8) return;
int64_t actual = -1;
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
m.StoreToPointer(&actual, MachineRepresentation::kWord64,
......@@ -5622,7 +5622,7 @@ TEST(RunChangeUint32ToUint64P) {
TEST(RunTruncateInt64ToInt32P) {
if (kPointerSize < 8) return;
if (kSystemPointerSize < 8) return;
int64_t expected = -1;
RawMachineAssemblerTester<int32_t> m;
m.Return(m.TruncateInt64ToInt32(
......
......@@ -143,7 +143,7 @@ class Allocator {
}
int StackWords(MachineType type) {
int size = 1 << ElementSizeLog2Of(type.representation());
return size <= kPointerSize ? 1 : size / kPointerSize;
return size <= kSystemPointerSize ? 1 : size / kSystemPointerSize;
}
void Reset() {
stack_offset_ = 0;
......@@ -831,7 +831,8 @@ TEST_INT32_SELECT(63)
TEST(Int64Select_registers) {
if (GetRegConfig()->num_allocatable_general_registers() < 2) return;
if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms
// TODO(titzer): int64 on 32-bit platforms
if (kSystemPointerSize < 8) return;
int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
ArgsBuffer<int64_t>::Sig sig(2);
......
......@@ -307,11 +307,10 @@ void BytecodeExpectationsPrinter::PrintConstant(
void BytecodeExpectationsPrinter::PrintFrameSize(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
const int kPointerSize = sizeof(void*);
int frame_size = bytecode_array->frame_size();
DCHECK_EQ(frame_size % kPointerSize, 0);
stream << "frame size: " << frame_size / kPointerSize
DCHECK(IsAligned(frame_size, kSystemPointerSize));
stream << "frame size: " << frame_size / kSystemPointerSize
<< "\nparameter count: " << bytecode_array->parameter_count() << '\n';
}
......
......@@ -1710,14 +1710,13 @@ TEST(AllocateNameDictionary) {
{
for (int i = 0; i < 256; i = i * 1.1 + 1) {
Handle<Object> result =
ft.Call(handle(Smi::FromInt(i), isolate)).ToHandleChecked();
Handle<HeapObject> result = Handle<HeapObject>::cast(
ft.Call(handle(Smi::FromInt(i), isolate)).ToHandleChecked());
Handle<NameDictionary> dict = NameDictionary::New(isolate, i);
// Both dictionaries should be memory equal.
int size =
FixedArrayBase::kHeaderSize + (dict->length() - 1) * kPointerSize;
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(dict->ptr()),
reinterpret_cast<void*>(result->ptr()), size));
int size = dict->Size();
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(dict->address()),
reinterpret_cast<void*>(result->address()), size));
}
}
}
......
......@@ -4022,7 +4022,7 @@ UNINITIALIZED_TEST(DebugSetOutOfMemoryListener) {
CHECK(!near_heap_limit_callback_called);
// The following allocation fails unless the out-of-memory callback
// increases the heap limit.
int length = 10 * i::MB / i::kPointerSize;
int length = 10 * i::MB / i::kTaggedSize;
i_isolate->factory()->NewFixedArray(length, i::TENURED);
CHECK(near_heap_limit_callback_called);
isolate->RemoveNearHeapLimitCallback(NearHeapLimitCallback, 0);
......
......@@ -24,7 +24,7 @@ static constexpr int kBufferSize = 8 * KB;
static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
#if V8_TARGET_ARCH_IA32
__ mov(eax, Operand(esp, kPointerSize));
__ mov(eax, Operand(esp, kSystemPointerSize));
for (int i = 0; i < kNumInstr; ++i) {
__ add(eax, Immediate(1));
}
......@@ -71,7 +71,7 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
static void FloodWithNop(Isolate* isolate, TestingAssemblerBuffer* buffer) {
MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
#if V8_TARGET_ARCH_IA32
__ mov(eax, Operand(esp, kPointerSize));
__ mov(eax, Operand(esp, kSystemPointerSize));
#elif V8_TARGET_ARCH_X64
__ movl(rax, arg_reg_1);
#elif V8_TARGET_ARCH_MIPS
......
......@@ -1114,7 +1114,7 @@ TEST(DoScavenge) {
// Construct a double value that looks like a pointer to the new space object
// and store it into the obj.
Address fake_object = temp->ptr() + kPointerSize;
Address fake_object = temp->ptr() + kSystemPointerSize;
double boom_value = bit_cast<double>(fake_object);
FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
......@@ -1261,11 +1261,11 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
CHECK_EQ(expected_tagged, helper.IsTagged(index.offset(), instance_size,
&end_of_region_offset));
CHECK_GT(end_of_region_offset, 0);
CHECK_EQ(end_of_region_offset % kPointerSize, 0);
CHECK_EQ(end_of_region_offset % kTaggedSize, 0);
CHECK(end_of_region_offset <= instance_size);
for (int offset = index.offset(); offset < end_of_region_offset;
offset += kPointerSize) {
offset += kTaggedSize) {
CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
}
if (end_of_region_offset < instance_size) {
......@@ -1275,7 +1275,7 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
}
}
for (int offset = 0; offset < JSObject::kHeaderSize; offset += kPointerSize) {
for (int offset = 0; offset < JSObject::kHeaderSize; offset += kTaggedSize) {
// Header queries
CHECK(helper.IsTagged(offset));
int end_of_region_offset;
......@@ -1468,7 +1468,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// |boom_value| to the slot that was earlier recorded by write barrier.
JSObject::MigrateToMap(obj, new_map);
Address fake_object = obj_value->ptr() + kPointerSize;
Address fake_object = obj_value->ptr() + kTaggedSize;
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
FieldIndex double_field_index =
......
......@@ -42,7 +42,7 @@ TEST_F(LoadEliminationTest, LoadElementAndLoadElement) {
Node* effect = graph()->start();
Node* control = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
......@@ -68,7 +68,7 @@ TEST_F(LoadEliminationTest, StoreElementAndLoadElement) {
Node* control = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
Node* value = Parameter(Type::Any(), 2);
ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
......@@ -95,7 +95,7 @@ TEST_F(LoadEliminationTest, StoreElementAndStoreFieldAndLoadElement) {
Node* control = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
Node* value = Parameter(Type::Any(), 2);
ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
......@@ -125,7 +125,7 @@ TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess const access = {kTaggedBase, kPointerSize,
FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
......@@ -152,7 +152,7 @@ TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
Node* value = Parameter(Type::Any(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess access = {kTaggedBase, kPointerSize,
FieldAccess access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
......@@ -180,16 +180,16 @@ TEST_F(LoadEliminationTest, StoreFieldAndKillFields) {
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess access1 = {kTaggedBase, kPointerSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
FieldAccess access1 = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
// Offset that out of field cache size.
FieldAccess access2 = {kTaggedBase, 2048 * kPointerSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
FieldAccess access2 = {kTaggedBase, 2048 * kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, jsgraph(), zone());
......@@ -220,7 +220,7 @@ TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
Node* index = Parameter(Type::UnsignedSmall(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess access = {kTaggedBase, kPointerSize,
FieldAccess access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
......@@ -253,7 +253,7 @@ TEST_F(LoadEliminationTest, LoadElementOnTrueBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
......@@ -288,7 +288,7 @@ TEST_F(LoadEliminationTest, LoadElementOnFalseBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
......@@ -322,7 +322,7 @@ TEST_F(LoadEliminationTest, LoadFieldOnFalseBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess const access = {kTaggedBase, kPointerSize,
FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
......@@ -358,7 +358,7 @@ TEST_F(LoadEliminationTest, LoadFieldOnTrueBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess const access = {kTaggedBase, kPointerSize,
FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
......@@ -394,7 +394,7 @@ TEST_F(LoadEliminationTest, LoadFieldWithTypeMismatch) {
Node* value = Parameter(Type::Signed32(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess const access = {kTaggedBase, kPointerSize,
FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Unsigned31(), MachineType::AnyTagged(),
kNoWriteBarrier};
......@@ -422,7 +422,7 @@ TEST_F(LoadEliminationTest, LoadElementWithTypeMismatch) {
Node* value = Parameter(Type::Signed32(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
ElementAccess const access = {kTaggedBase, kPointerSize, Type::Unsigned31(),
ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Unsigned31(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
......@@ -446,7 +446,7 @@ TEST_F(LoadEliminationTest, AliasAnalysisForFinishRegion) {
Node* value1 = Parameter(Type::Signed32(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess const access = {kTaggedBase, kPointerSize,
FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Signed32(), MachineType::AnyTagged(),
kNoWriteBarrier};
......
......@@ -499,83 +499,83 @@ Matcher<Node*> IsSpeculativeToNumber(const Matcher<Node*>& value_matcher);
// Helpers
static inline Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
: IsInt32Constant(static_cast<int32_t>(value));
return kSystemPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
: IsInt32Constant(static_cast<int32_t>(value));
}
static inline Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
: IsInt32Add(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
: IsInt32Add(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
: IsInt32Sub(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
: IsInt32Sub(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsIntPtrMul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Mul(lhs_matcher, rhs_matcher)
: IsInt32Mul(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsInt64Mul(lhs_matcher, rhs_matcher)
: IsInt32Mul(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsIntPtrDiv(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Div(lhs_matcher, rhs_matcher)
: IsInt32Div(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsInt64Div(lhs_matcher, rhs_matcher)
: IsInt32Div(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
: IsWord32Shl(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
: IsWord32Shl(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordShr(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Shr(lhs_matcher, rhs_matcher)
: IsWord32Shr(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsWord64Shr(lhs_matcher, rhs_matcher)
: IsWord32Shr(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
: IsWord32Sar(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
: IsWord32Sar(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordAnd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64And(lhs_matcher, rhs_matcher)
: IsWord32And(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsWord64And(lhs_matcher, rhs_matcher)
: IsWord32And(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
: IsWord32Or(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
: IsWord32Or(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordXor(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Xor(lhs_matcher, rhs_matcher)
: IsWord32Xor(lhs_matcher, rhs_matcher);
return kSystemPointerSize == 8 ? IsWord64Xor(lhs_matcher, rhs_matcher)
: IsWord32Xor(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsChangeInt32ToIntPtr(
const Matcher<Node*>& matcher) {
return kPointerSize == 8 ? IsChangeInt32ToInt64(matcher) : matcher;
return kSystemPointerSize == 8 ? IsChangeInt32ToInt64(matcher) : matcher;
}
static inline Matcher<Node*> IsChangeUint32ToWord(
const Matcher<Node*>& matcher) {
return kPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
return kSystemPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
}
static inline Matcher<Node*> IsTruncateIntPtrToInt32(
const Matcher<Node*>& matcher) {
return kPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
return kSystemPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
}
} // namespace compiler
......
......@@ -43,7 +43,7 @@ TEST_F(EhFrameWriterTest, Alignment) {
ASSERT_EQ(0, EhFrameConstants::kEhFrameTerminatorSize % 4);
EXPECT_EQ(0, (iterator.GetBufferSize() - EhFrameConstants::kEhFrameHdrSize -
EhFrameConstants::kEhFrameTerminatorSize) %
kPointerSize);
kSystemPointerSize);
}
TEST_F(EhFrameWriterTest, FDEHeader) {
......
......@@ -439,7 +439,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
ast_factory.Internalize(isolate());
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
builder.total_register_count() * kPointerSize);
builder.total_register_count() * kSystemPointerSize);
// Build scorecard of bytecodes encountered in the BytecodeArray.
std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
......@@ -504,7 +504,7 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
int total_registers = locals + temps;
CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
CHECK_EQ(the_array->frame_size(), total_registers * kSystemPointerSize);
}
}
}
......
......@@ -33,7 +33,7 @@ namespace interpreter {
#define U8(i) static_cast<uint8_t>(i)
#define REG_OPERAND(i) \
(InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize - (i))
(InterpreterFrameConstants::kRegisterFileFromFp / kSystemPointerSize - (i))
#define R8(i) static_cast<uint8_t>(REG_OPERAND(i))
#define R16(i) U16(REG_OPERAND(i))
#define R32(i) U32(REG_OPERAND(i))
......
......@@ -72,8 +72,9 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
const Matcher<Node*>& value_matcher) {
return kPointerSize == 8 ? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
: IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
return kSystemPointerSize == 8
? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
: IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
}
Matcher<Node*>
......@@ -321,7 +322,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
MachineType::Pointer(),
c::IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
c::IsWordShl(target_bytecode_matcher,
c::IsIntPtrConstant(kPointerSizeLog2)));
c::IsIntPtrConstant(kSystemPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
......@@ -458,7 +459,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrAdd(
c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
c::IsWordShl(index, c::IsIntPtrConstant(kPointerSizeLog2))),
c::IsWordShl(index,
c::IsIntPtrConstant(kSystemPointerSizeLog2))),
LoadSensitivity::kCritical));
}
}
......
......@@ -82,7 +82,6 @@ consts_misc = [
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment