Commit 0ff813c5 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[cleanup] MachineType 'rep' variables renamed to 'type'

Bug: v8:9183
Change-Id: Idb1910ae30984f548996651e8b2f153531b8cdb0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1605729Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61519}
parent 078cf26a
...@@ -1362,27 +1362,27 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true, ...@@ -1362,27 +1362,27 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
} }
} }
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) { Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType type) {
Node* frame_pointer = LoadParentFramePointer(); Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset)); return Load(type, frame_pointer, IntPtrConstant(offset));
} }
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) { MachineType type) {
return Load(rep, buffer, IntPtrConstant(offset)); return Load(type, buffer, IntPtrConstant(offset));
} }
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object, Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType rep) { int offset, MachineType type) {
CSA_ASSERT(this, IsStrong(object)); CSA_ASSERT(this, IsStrong(object));
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag)); return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
} }
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object, Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset, SloppyTNode<IntPtrT> offset,
MachineType rep) { MachineType type) {
CSA_ASSERT(this, IsStrong(object)); CSA_ASSERT(this, IsStrong(object));
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
} }
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField( TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
......
...@@ -804,11 +804,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -804,11 +804,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load value from current parent frame by given offset in bytes. // Load value from current parent frame by given offset in bytes.
Node* LoadFromParentFrame(int offset, Node* LoadFromParentFrame(int offset,
MachineType rep = MachineType::AnyTagged()); MachineType type = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap. // Load an object pointer from a buffer that isn't in the heap.
Node* LoadBufferObject(Node* buffer, int offset, Node* LoadBufferObject(Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged()); MachineType type = MachineType::AnyTagged());
TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) { TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
return UncheckedCast<RawPtrT>( return UncheckedCast<RawPtrT>(
LoadBufferObject(buffer, offset, MachineType::Pointer())); LoadBufferObject(buffer, offset, MachineType::Pointer()));
...@@ -818,7 +818,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -818,7 +818,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
} }
// Load a field from an object on the heap. // Load a field from an object on the heap.
Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset, Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
MachineType rep); MachineType type);
template <class T, typename std::enable_if< template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value, std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0> int>::type = 0>
...@@ -837,7 +837,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -837,7 +837,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
LoadObjectField(object, offset, MachineType::AnyTagged())); LoadObjectField(object, offset, MachineType::AnyTagged()));
} }
Node* LoadObjectField(SloppyTNode<HeapObject> object, Node* LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset, MachineType rep); SloppyTNode<IntPtrT> offset, MachineType type);
TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object, TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset) { SloppyTNode<IntPtrT> offset) {
return UncheckedCast<Object>( return UncheckedCast<Object>(
......
...@@ -941,14 +941,14 @@ Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) { ...@@ -941,14 +941,14 @@ Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP) CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP #undef DEFINE_CODE_ASSEMBLER_UNARY_OP
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* CodeAssembler::Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning) { LoadSensitivity needs_poisoning) {
return raw_assembler()->Load(rep, base, needs_poisoning); return raw_assembler()->Load(type, base, needs_poisoning);
} }
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset, Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning) { LoadSensitivity needs_poisoning) {
return raw_assembler()->Load(rep, base, offset, needs_poisoning); return raw_assembler()->Load(type, base, offset, needs_poisoning);
} }
Node* CodeAssembler::LoadFullTagged(Node* base, Node* CodeAssembler::LoadFullTagged(Node* base,
...@@ -963,8 +963,8 @@ Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset, ...@@ -963,8 +963,8 @@ Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset,
Load(MachineType::Pointer(), base, offset, needs_poisoning)); Load(MachineType::Pointer(), base, offset, needs_poisoning));
} }
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) { Node* CodeAssembler::AtomicLoad(MachineType type, Node* base, Node* offset) {
return raw_assembler()->AtomicLoad(rep, base, offset); return raw_assembler()->AtomicLoad(type, base, offset);
} }
TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) { TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
......
...@@ -936,17 +936,17 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -936,17 +936,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value); TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value);
// Load raw memory location. // Load raw memory location.
Node* Load(MachineType rep, Node* base, Node* Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
template <class Type> template <class Type>
TNode<Type> Load(MachineType rep, TNode<RawPtr<Type>> base) { TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK( DCHECK(
IsSubtype(rep.representation(), MachineRepresentationOf<Type>::value)); IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(rep, static_cast<Node*>(base))); return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
} }
Node* Load(MachineType rep, Node* base, Node* offset, Node* Load(MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* AtomicLoad(MachineType rep, Node* base, Node* offset); Node* AtomicLoad(MachineType type, Node* base, Node* offset);
// Load uncompressed tagged value from (most likely off JS heap) memory // Load uncompressed tagged value from (most likely off JS heap) memory
// location. // location.
Node* LoadFullTagged( Node* LoadFullTagged(
......
...@@ -645,9 +645,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -645,9 +645,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-or [base + index], value // atomic-or [base + index], value
const Operator* Word32AtomicOr(MachineType type); const Operator* Word32AtomicOr(MachineType type);
// atomic-xor [base + index], value // atomic-xor [base + index], value
const Operator* Word32AtomicXor(MachineType rep); const Operator* Word32AtomicXor(MachineType type);
// atomic-add [base + index], value // atomic-add [base + index], value
const Operator* Word64AtomicAdd(MachineType rep); const Operator* Word64AtomicAdd(MachineType type);
// atomic-sub [base + index], value // atomic-sub [base + index], value
const Operator* Word64AtomicSub(MachineType type); const Operator* Word64AtomicSub(MachineType type);
// atomic-and [base + index], value // atomic-and [base + index], value
...@@ -655,7 +655,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -655,7 +655,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-or [base + index], value // atomic-or [base + index], value
const Operator* Word64AtomicOr(MachineType type); const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value // atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType rep); const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index] // atomic-pair-load [base + index]
const Operator* Word32AtomicPairLoad(); const Operator* Word32AtomicPairLoad();
// atomic-pair-sub [base + index], value_high, value-low // atomic-pair-sub [base + index], value_high, value-low
......
...@@ -127,37 +127,37 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -127,37 +127,37 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
} }
// Memory Operations. // Memory Operations.
Node* Load(MachineType rep, Node* base, Node* Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return Load(rep, base, IntPtrConstant(0), needs_poisoning); return Load(type, base, IntPtrConstant(0), needs_poisoning);
} }
Node* Load(MachineType rep, Node* base, Node* index, Node* Load(MachineType type, Node* base, Node* index,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
// change_op is used below to change to the correct Tagged representation // change_op is used below to change to the correct Tagged representation
const Operator* change_op = nullptr; const Operator* change_op = nullptr;
if (COMPRESS_POINTERS_BOOL) { if (COMPRESS_POINTERS_BOOL) {
switch (rep.representation()) { switch (type.representation()) {
case MachineRepresentation::kTaggedPointer: case MachineRepresentation::kTaggedPointer:
rep = MachineType::CompressedPointer(); type = MachineType::CompressedPointer();
change_op = machine()->ChangeCompressedPointerToTaggedPointer(); change_op = machine()->ChangeCompressedPointerToTaggedPointer();
break; break;
case MachineRepresentation::kTaggedSigned: case MachineRepresentation::kTaggedSigned:
rep = MachineType::CompressedSigned(); type = MachineType::CompressedSigned();
change_op = machine()->ChangeCompressedSignedToTaggedSigned(); change_op = machine()->ChangeCompressedSignedToTaggedSigned();
break; break;
case MachineRepresentation::kTagged: case MachineRepresentation::kTagged:
rep = MachineType::AnyCompressed(); type = MachineType::AnyCompressed();
change_op = machine()->ChangeCompressedToTagged(); change_op = machine()->ChangeCompressedToTagged();
break; break;
default: default:
break; break;
} }
} }
const Operator* op = machine()->Load(rep); const Operator* op = machine()->Load(type);
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_); CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
if (needs_poisoning == LoadSensitivity::kCritical && if (needs_poisoning == LoadSensitivity::kCritical &&
poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) { poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
op = machine()->PoisonedLoad(rep); op = machine()->PoisonedLoad(type);
} }
Node* load = AddNode(op, base, index); Node* load = AddNode(op, base, index);
...@@ -286,21 +286,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -286,21 +286,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
DCHECK_NULL(value_high); DCHECK_NULL(value_high);
return AddNode(machine()->Word32AtomicStore(rep), base, index, value); return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
} }
#define ATOMIC_FUNCTION(name) \ #define ATOMIC_FUNCTION(name) \
Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value, \ Node* Atomic##name(MachineType type, Node* base, Node* index, Node* value, \
Node* value_high) { \ Node* value_high) { \
if (rep.representation() == MachineRepresentation::kWord64) { \ if (type.representation() == MachineRepresentation::kWord64) { \
if (machine()->Is64()) { \ if (machine()->Is64()) { \
DCHECK_NULL(value_high); \ DCHECK_NULL(value_high); \
return AddNode(machine()->Word64Atomic##name(rep), base, index, \ return AddNode(machine()->Word64Atomic##name(type), base, index, \
value); \ value); \
} else { \ } else { \
return AddNode(machine()->Word32AtomicPair##name(), base, index, \ return AddNode(machine()->Word32AtomicPair##name(), base, index, \
VALUE_HALVES); \ VALUE_HALVES); \
} \ } \
} \ } \
DCHECK_NULL(value_high); \ DCHECK_NULL(value_high); \
return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \ return AddNode(machine()->Word32Atomic##name(type), base, index, value); \
} }
ATOMIC_FUNCTION(Exchange) ATOMIC_FUNCTION(Exchange)
ATOMIC_FUNCTION(Add) ATOMIC_FUNCTION(Add)
...@@ -311,15 +311,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -311,15 +311,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#undef ATOMIC_FUNCTION #undef ATOMIC_FUNCTION
#undef VALUE_HALVES #undef VALUE_HALVES
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index, Node* AtomicCompareExchange(MachineType type, Node* base, Node* index,
Node* old_value, Node* old_value_high, Node* old_value, Node* old_value_high,
Node* new_value, Node* new_value_high) { Node* new_value, Node* new_value_high) {
if (rep.representation() == MachineRepresentation::kWord64) { if (type.representation() == MachineRepresentation::kWord64) {
if (machine()->Is64()) { if (machine()->Is64()) {
DCHECK_NULL(old_value_high); DCHECK_NULL(old_value_high);
DCHECK_NULL(new_value_high); DCHECK_NULL(new_value_high);
return AddNode(machine()->Word64AtomicCompareExchange(rep), base, index, return AddNode(machine()->Word64AtomicCompareExchange(type), base,
old_value, new_value); index, old_value, new_value);
} else { } else {
return AddNode(machine()->Word32AtomicPairCompareExchange(), base, return AddNode(machine()->Word32AtomicPairCompareExchange(), base,
index, old_value, old_value_high, new_value, index, old_value, old_value_high, new_value,
...@@ -328,7 +328,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -328,7 +328,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
} }
DCHECK_NULL(old_value_high); DCHECK_NULL(old_value_high);
DCHECK_NULL(new_value_high); DCHECK_NULL(new_value_high);
return AddNode(machine()->Word32AtomicCompareExchange(rep), base, index, return AddNode(machine()->Word32AtomicCompareExchange(type), base, index,
old_value, new_value); old_value, new_value);
} }
...@@ -889,15 +889,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -889,15 +889,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Parameter(size_t index); Node* Parameter(size_t index);
// Pointer utilities. // Pointer utilities.
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) { Node* LoadFromPointer(void* address, MachineType type, int32_t offset = 0) {
return Load(rep, PointerConstant(address), Int32Constant(offset)); return Load(type, PointerConstant(address), Int32Constant(offset));
} }
Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) { Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
return Store(rep, PointerConstant(address), node, kNoWriteBarrier); return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
} }
Node* UnalignedLoadFromPointer(void* address, MachineType rep, Node* UnalignedLoadFromPointer(void* address, MachineType type,
int32_t offset = 0) { int32_t offset = 0) {
return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset)); return UnalignedLoad(type, PointerConstant(address), Int32Constant(offset));
} }
Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep, Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
Node* node) { Node* node) {
......
...@@ -216,11 +216,11 @@ template <typename CType, bool use_result_buffer> ...@@ -216,11 +216,11 @@ template <typename CType, bool use_result_buffer>
class BinopTester { class BinopTester {
public: public:
explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester, explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester,
MachineType rep) MachineType type)
: T(tester), : T(tester),
param0(T->LoadFromPointer(&p0, rep)), param0(T->LoadFromPointer(&p0, type)),
param1(T->LoadFromPointer(&p1, rep)), param1(T->LoadFromPointer(&p1, type)),
rep(rep), type(type),
p0(static_cast<CType>(0)), p0(static_cast<CType>(0)),
p1(static_cast<CType>(0)), p1(static_cast<CType>(0)),
result(static_cast<CType>(0)) {} result(static_cast<CType>(0)) {}
...@@ -242,7 +242,7 @@ class BinopTester { ...@@ -242,7 +242,7 @@ class BinopTester {
void AddReturn(Node* val) { void AddReturn(Node* val) {
if (use_result_buffer) { if (use_result_buffer) {
T->Store(rep.representation(), T->PointerConstant(&result), T->Store(type.representation(), T->PointerConstant(&result),
T->Int32Constant(0), val, kNoWriteBarrier); T->Int32Constant(0), val, kNoWriteBarrier);
T->Return(T->Int32Constant(CHECK_VALUE)); T->Return(T->Int32Constant(CHECK_VALUE));
} else { } else {
...@@ -262,7 +262,7 @@ class BinopTester { ...@@ -262,7 +262,7 @@ class BinopTester {
} }
protected: protected:
MachineType rep; MachineType type;
CType p0; CType p0;
CType p1; CType p1;
CType result; CType result;
......
...@@ -41,8 +41,8 @@ Node* SmiFromInt32(CodeAssembler& m, Node* value) { ...@@ -41,8 +41,8 @@ Node* SmiFromInt32(CodeAssembler& m, Node* value) {
} }
Node* LoadObjectField(CodeAssembler& m, Node* object, int offset, Node* LoadObjectField(CodeAssembler& m, Node* object, int offset,
MachineType rep = MachineType::AnyTagged()) { MachineType type = MachineType::AnyTagged()) {
return m.Load(rep, object, m.IntPtrConstant(offset - kHeapObjectTag)); return m.Load(type, object, m.IntPtrConstant(offset - kHeapObjectTag));
} }
Node* LoadMap(CodeAssembler& m, Node* object) { Node* LoadMap(CodeAssembler& m, Node* object) {
......
...@@ -218,9 +218,9 @@ void CheckEq<Smi>(Smi in_value, Smi out_value) { ...@@ -218,9 +218,9 @@ void CheckEq<Smi>(Smi in_value, Smi out_value) {
// Initializes the buffer with some raw data respecting requested representation // Initializes the buffer with some raw data respecting requested representation
// of the values. // of the values.
template <typename CType> template <typename CType>
void InitBuffer(CType* buffer, size_t length, MachineType rep) { void InitBuffer(CType* buffer, size_t length, MachineType type) {
const size_t kBufferSize = sizeof(CType) * length; const size_t kBufferSize = sizeof(CType) * length;
if (!rep.IsTagged()) { if (!type.IsTagged()) {
byte* raw = reinterpret_cast<byte*>(buffer); byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < kBufferSize; i++) { for (size_t i = 0; i < kBufferSize; i++) {
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA); raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
...@@ -232,13 +232,13 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) { ...@@ -232,13 +232,13 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) {
// pointer decompression that may be happenning during load. // pointer decompression that may be happenning during load.
Isolate* isolate = CcTest::InitIsolateOnce(); Isolate* isolate = CcTest::InitIsolateOnce();
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]); Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
if (rep.IsTaggedSigned()) { if (type.IsTaggedSigned()) {
for (size_t i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0); smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
} }
} else { } else {
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize); memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
if (!rep.IsTaggedPointer()) { if (!type.IsTaggedPointer()) {
// Also add some Smis if we are checking AnyTagged case. // Also add some Smis if we are checking AnyTagged case.
for (size_t i = 0; i < length / 2; i++) { for (size_t i = 0; i < length / 2; i++) {
smi_view[i] = smi_view[i] =
...@@ -249,11 +249,11 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) { ...@@ -249,11 +249,11 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) {
} }
template <typename CType> template <typename CType>
void RunLoadImmIndex(MachineType rep, TestAlignment t) { void RunLoadImmIndex(MachineType type, TestAlignment t) {
const int kNumElems = 16; const int kNumElems = 16;
CType buffer[kNumElems]; CType buffer[kNumElems];
InitBuffer(buffer, kNumElems, rep); InitBuffer(buffer, kNumElems, type);
// Test with various large and small offsets. // Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) { for (int offset = -1; offset <= 200000; offset *= -5) {
...@@ -261,7 +261,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { ...@@ -261,7 +261,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
BufferedRawMachineAssemblerTester<CType> m; BufferedRawMachineAssemblerTester<CType> m;
void* base_pointer = &buffer[0] - offset; void* base_pointer = &buffer[0] - offset;
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
if (rep.IsTagged()) { if (type.IsTagged()) {
// When pointer compression is enabled then we need to access only // When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains // the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values. // full 64-bit values.
...@@ -271,9 +271,9 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { ...@@ -271,9 +271,9 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
Node* base = m.PointerConstant(base_pointer); Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) { if (t == TestAlignment::kAligned) {
m.Return(m.Load(rep, base, index)); m.Return(m.Load(type, base, index));
} else if (t == TestAlignment::kUnaligned) { } else if (t == TestAlignment::kUnaligned) {
m.Return(m.UnalignedLoad(rep, base, index)); m.Return(m.UnalignedLoad(type, base, index));
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
...@@ -284,7 +284,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { ...@@ -284,7 +284,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
} }
template <typename CType> template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) { void RunLoadStore(MachineType type, TestAlignment t) {
const int kNumElems = 16; const int kNumElems = 16;
CType in_buffer[kNumElems]; CType in_buffer[kNumElems];
CType out_buffer[kNumElems]; CType out_buffer[kNumElems];
...@@ -293,7 +293,7 @@ void RunLoadStore(MachineType rep, TestAlignment t) { ...@@ -293,7 +293,7 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data)); STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
MemCopy(&zap_value, &zap_data, sizeof(CType)); MemCopy(&zap_value, &zap_data, sizeof(CType));
InitBuffer(in_buffer, kNumElems, rep); InitBuffer(in_buffer, kNumElems, type);
for (int32_t x = 0; x < kNumElems; x++) { for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1; int32_t y = kNumElems - x - 1;
...@@ -305,11 +305,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) { ...@@ -305,11 +305,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
Node* out_base = m.PointerConstant(out_buffer); Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y * sizeof(CType)); Node* out_index = m.IntPtrConstant(y * sizeof(CType));
if (t == TestAlignment::kAligned) { if (t == TestAlignment::kAligned) {
Node* load = m.Load(rep, in_base, in_index); Node* load = m.Load(type, in_base, in_index);
m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier); m.Store(type.representation(), out_base, out_index, load,
kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) { } else if (t == TestAlignment::kUnaligned) {
Node* load = m.UnalignedLoad(rep, in_base, in_index); Node* load = m.UnalignedLoad(type, in_base, in_index);
m.UnalignedStore(rep.representation(), out_base, out_index, load); m.UnalignedStore(type.representation(), out_base, out_index, load);
} }
m.Return(m.Int32Constant(OK)); m.Return(m.Int32Constant(OK));
...@@ -328,12 +329,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) { ...@@ -328,12 +329,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
} }
template <typename CType> template <typename CType>
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) { void RunUnalignedLoadStoreUnalignedAccess(MachineType type) {
CType in, out; CType in, out;
byte in_buffer[2 * sizeof(CType)]; byte in_buffer[2 * sizeof(CType)];
byte out_buffer[2 * sizeof(CType)]; byte out_buffer[2 * sizeof(CType)];
InitBuffer(&in, 1, rep); InitBuffer(&in, 1, type);
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) { for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
// Direct write to &in_buffer[x] may cause unaligned access in C++ code so // Direct write to &in_buffer[x] may cause unaligned access in C++ code so
...@@ -346,11 +347,11 @@ void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) { ...@@ -346,11 +347,11 @@ void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
Node* in_base = m.PointerConstant(in_buffer); Node* in_base = m.PointerConstant(in_buffer);
Node* in_index = m.IntPtrConstant(x); Node* in_index = m.IntPtrConstant(x);
Node* load = m.UnalignedLoad(rep, in_base, in_index); Node* load = m.UnalignedLoad(type, in_base, in_index);
Node* out_base = m.PointerConstant(out_buffer); Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y); Node* out_index = m.IntPtrConstant(y);
m.UnalignedStore(rep.representation(), out_base, out_index, load); m.UnalignedStore(type.representation(), out_base, out_index, load);
m.Return(m.Int32Constant(OK)); m.Return(m.Int32Constant(OK));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment