Commit b8e8b0de authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Fix incorrectly used machine types

in TurboFan, CSA, Wasm and compiler tests. Tagged values decompression
logic will depend on the machine type of the value being loaded so it must
be correct.

Bug: v8:7703
Change-Id: Ia9e7cc1e273e5a458d9de8aaa4adb0c970413b8b
Reviewed-on: https://chromium-review.googlesource.com/c/1319573
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57280}
parent 1444bebe
...@@ -524,8 +524,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( ...@@ -524,8 +524,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
{ {
// Copy over in-object properties. // Copy over in-object properties.
Label continue_with_write_barrier(this), done_init(this); Label continue_with_write_barrier(this), done_init(this);
VARIABLE(offset, MachineType::PointerRepresentation(), TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize));
IntPtrConstant(JSObject::kHeaderSize));
// Mutable heap numbers only occur on 32-bit platforms. // Mutable heap numbers only occur on 32-bit platforms.
bool may_use_mutable_heap_numbers = bool may_use_mutable_heap_numbers =
FLAG_track_double_fields && !FLAG_unbox_double_fields; FLAG_track_double_fields && !FLAG_unbox_double_fields;
...@@ -535,16 +534,21 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( ...@@ -535,16 +534,21 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
Branch(WordEqual(offset.value(), instance_size), &done_init, Branch(WordEqual(offset.value(), instance_size), &done_init,
&continue_fast); &continue_fast);
BIND(&continue_fast); BIND(&continue_fast);
Node* field = LoadObjectField(boilerplate, offset.value());
if (may_use_mutable_heap_numbers) { if (may_use_mutable_heap_numbers) {
TNode<Object> field = LoadObjectField(boilerplate, offset.value());
Label store_field(this); Label store_field(this);
GotoIf(TaggedIsSmi(field), &store_field); GotoIf(TaggedIsSmi(field), &store_field);
GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier); GotoIf(IsMutableHeapNumber(CAST(field)), &continue_with_write_barrier);
Goto(&store_field); Goto(&store_field);
BIND(&store_field); BIND(&store_field);
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
} else {
// Copy fields as raw data.
TNode<IntPtrT> field =
LoadObjectField<IntPtrT>(boilerplate, offset.value());
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
} }
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field); offset = IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize));
offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast, Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
&done_init); &done_init);
} }
......
...@@ -864,6 +864,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { ...@@ -864,6 +864,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<Object>( return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged())); LoadObjectField(object, offset, MachineType::AnyTagged()));
} }
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
return UncheckedCast<T>(
LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
// Load a SMI field and untag it. // Load a SMI field and untag it.
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object, TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
int offset); int offset);
...@@ -1231,6 +1238,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { ...@@ -1231,6 +1238,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StoreObjectFieldNoWriteBarrier( Node* StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value, Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged); MachineRepresentation rep = MachineRepresentation::kTagged);
template <class T = Object>
TNode<T> StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
TNode<IntPtrT> offset,
TNode<T> value) {
return UncheckedCast<T>(StoreObjectFieldNoWriteBarrier(
object, offset, value, MachineRepresentationOf<T>::value));
}
// Store the Map of an HeapObject. // Store the Map of an HeapObject.
Node* StoreMap(Node* object, Node* map); Node* StoreMap(Node* object, Node* map);
Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index); Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
......
...@@ -2947,7 +2947,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) { ...@@ -2947,7 +2947,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
Node* frame = __ LoadFramePointer(); Node* frame = __ LoadFramePointer();
Node* parent_frame = Node* parent_frame =
__ Load(MachineType::AnyTagged(), frame, __ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset)); __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load( Node* parent_frame_type = __ Load(
MachineType::AnyTagged(), parent_frame, MachineType::AnyTagged(), parent_frame,
......
...@@ -81,22 +81,33 @@ MachineType assert_size(int expected_size, MachineType type) { ...@@ -81,22 +81,33 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \ #define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset) wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
#define LOAD_INSTANCE_FIELD(name, type) \ #define LOAD_RAW(base_pointer, byte_offset, type) \
SetEffect(graph()->NewNode( \ SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
mcgraph()->machine()->Load( \ mcgraph()->Int32Constant(byte_offset), Effect(), \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)), \ Control()))
instance_node_.get(), \
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \ #define LOAD_INSTANCE_FIELD(name, type) \
Control())) LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type))
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
SetEffect(graph()->NewNode( \ #define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
mcgraph()->machine()->Load(MachineType::TaggedPointer()), base_pointer, \ LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer())
mcgraph()->Int32Constant(byte_offset), Effect(), Control()))
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \ LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
LOAD_TAGGED_POINTER( \
array_node, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index)) #define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
LOAD_RAW(array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
// This can be used to store tagged Smi values only. // This can be used to store tagged Smi values only.
#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \ #define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
...@@ -2179,11 +2190,11 @@ Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array, ...@@ -2179,11 +2190,11 @@ Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) { uint32_t* index) {
MachineOperatorBuilder* machine = mcgraph()->machine(); MachineOperatorBuilder* machine = mcgraph()->machine();
Node* upper = Node* upper =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index)); BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++; (*index)++;
upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16)); upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
Node* lower = Node* lower =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index)); BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++; (*index)++;
Node* value = graph()->NewNode(machine->Word32Or(), upper, lower); Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
return value; return value;
...@@ -2223,7 +2234,7 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag, ...@@ -2223,7 +2234,7 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) { Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* exceptions_table = Node* exceptions_table =
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
Node* tag = LOAD_FIXED_ARRAY_SLOT(exceptions_table, exception_index); Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index);
return tag; return tag;
} }
...@@ -2259,7 +2270,7 @@ Node** WasmGraphBuilder::GetExceptionValues( ...@@ -2259,7 +2270,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
break; break;
} }
case wasm::kWasmAnyRef: case wasm::kWasmAnyRef:
value = LOAD_FIXED_ARRAY_SLOT(values_array, index); value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index; ++index;
break; break;
default: default:
...@@ -2659,7 +2670,8 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args, ...@@ -2659,7 +2670,8 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
// Load the imported function refs array from the instance. // Load the imported function refs array from the instance.
Node* imported_function_refs = Node* imported_function_refs =
LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
Node* ref_node = LOAD_FIXED_ARRAY_SLOT(imported_function_refs, func_index); Node* ref_node =
LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
// Load the target from the imported_targets array at a known offset. // Load the target from the imported_targets array at a known offset.
Node* imported_targets = Node* imported_targets =
...@@ -4615,7 +4627,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -4615,7 +4627,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = callable_node; // target callable. args[pos++] = callable_node; // target callable.
// Receiver. // Receiver.
if (sloppy_receiver) { if (sloppy_receiver) {
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT( Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
native_context, Context::GLOBAL_PROXY_INDEX); native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy; args[pos++] = global_proxy;
} else { } else {
...@@ -4678,7 +4690,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -4678,7 +4690,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Receiver. // Receiver.
if (sloppy_receiver) { if (sloppy_receiver) {
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT( Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
native_context, Context::GLOBAL_PROXY_INDEX); native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy; args[pos++] = global_proxy;
} else { } else {
...@@ -5539,9 +5551,14 @@ AssemblerOptions WasmAssemblerOptions() { ...@@ -5539,9 +5551,14 @@ AssemblerOptions WasmAssemblerOptions() {
#undef FATAL_UNSUPPORTED_OPCODE #undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE #undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET #undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_RAW
#undef LOAD_INSTANCE_FIELD #undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER #undef LOAD_TAGGED_POINTER
#undef LOAD_TAGGED_ANY
#undef LOAD_FIXED_ARRAY_SLOT #undef LOAD_FIXED_ARRAY_SLOT
#undef LOAD_FIXED_ARRAY_SLOT_SMI
#undef LOAD_FIXED_ARRAY_SLOT_PTR
#undef LOAD_FIXED_ARRAY_SLOT_ANY
#undef STORE_FIXED_ARRAY_SLOT_SMI #undef STORE_FIXED_ARRAY_SLOT_SMI
#undef STORE_FIXED_ARRAY_SLOT_ANY #undef STORE_FIXED_ARRAY_SLOT_ANY
......
...@@ -2336,7 +2336,7 @@ void AccessorAssembler::TryProbeStubCacheTable( ...@@ -2336,7 +2336,7 @@ void AccessorAssembler::TryProbeStubCacheTable(
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() - DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address()); stub_cache->key_reference(table).address());
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>( TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
Load(MachineType::TaggedPointer(), key_base, Load(MachineType::AnyTagged(), key_base,
IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)))); IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize))));
// We found the handler. // We found the handler.
......
...@@ -94,6 +94,12 @@ class MachineType { ...@@ -94,6 +94,12 @@ class MachineType {
representation() == MachineRepresentation::kTaggedSigned || representation() == MachineRepresentation::kTaggedSigned ||
representation() == MachineRepresentation::kTagged; representation() == MachineRepresentation::kTagged;
} }
constexpr bool IsTaggedSigned() const {
return representation() == MachineRepresentation::kTaggedSigned;
}
constexpr bool IsTaggedPointer() const {
return representation() == MachineRepresentation::kTaggedPointer;
}
constexpr static MachineRepresentation PointerRepresentation() { constexpr static MachineRepresentation PointerRepresentation() {
return (kPointerSize == 4) ? MachineRepresentation::kWord32 return (kPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64; : MachineRepresentation::kWord64;
......
...@@ -25,6 +25,14 @@ enum TestAlignment { ...@@ -25,6 +25,14 @@ enum TestAlignment {
kUnaligned, kUnaligned,
}; };
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
#else
#error "Unknown Architecture"
#endif
// This is a America! // This is a America!
#define A_BILLION 1000000000ULL #define A_BILLION 1000000000ULL
#define A_GIG (1024ULL * 1024ULL * 1024ULL) #define A_GIG (1024ULL * 1024ULL * 1024ULL)
...@@ -178,22 +186,61 @@ TEST(RunUnalignedLoadStoreFloat64Offset) { ...@@ -178,22 +186,61 @@ TEST(RunUnalignedLoadStoreFloat64Offset) {
} }
namespace { namespace {
template <typename Type>
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
const int kNumElems = 3;
Type buffer[kNumElems];
// initialize the buffer with some raw data. // Initializes the buffer with some raw data respecting requested representation
byte* raw = reinterpret_cast<byte*>(buffer); // of the values.
for (size_t i = 0; i < sizeof(buffer); i++) { template <typename CType>
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); void InitBuffer(CType* buffer, size_t length, MachineType rep) {
const size_t kBufferSize = sizeof(CType) * length;
if (!rep.IsTagged()) {
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < kBufferSize; i++) {
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
}
return;
}
// Tagged field loads require values to be properly tagged because of
// pointer decompression that may be happenning during load.
Isolate* isolate = CcTest::InitIsolateOnce();
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
if (rep.IsTaggedSigned()) {
for (size_t i = 0; i < length; i++) {
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
} else {
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
if (!rep.IsTaggedPointer()) {
// Also add some Smis if we are checking AnyTagged case.
for (size_t i = 0; i < length / 2; i++) {
smi_view[i] =
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
}
} }
}
template <typename CType>
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
const int kNumElems = 16;
CType buffer[kNumElems];
InitBuffer(buffer, kNumElems, rep);
// Test with various large and small offsets. // Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) { for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) { for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<Type> m; BufferedRawMachineAssemblerTester<CType> m;
Node* base = m.PointerConstant(buffer - offset); void* base_pointer = &buffer[0] - offset;
#if V8_POINTER_COMPRESSION
if (rep.IsTagged()) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
base_pointer = LSB(base_pointer, kPointerSize / 2);
}
#endif
Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) { if (t == TestAlignment::kAligned) {
m.Return(m.Load(rep, base, index)); m.Return(m.Load(rep, base, index));
...@@ -203,82 +250,76 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { ...@@ -203,82 +250,76 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
UNREACHABLE(); UNREACHABLE();
} }
volatile Type expected = buffer[i]; CHECK_EQ(buffer[i], m.Call());
volatile Type actual = m.Call();
CHECK_EQ(expected, actual);
} }
} }
} }
template <typename CType> template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) { void RunLoadStore(MachineType rep, TestAlignment t) {
const int kNumElems = 4; const int kNumElems = 16;
CType buffer[kNumElems]; CType in_buffer[kNumElems];
CType out_buffer[kNumElems];
InitBuffer(in_buffer, kNumElems, rep);
for (int32_t x = 0; x < kNumElems; x++) { for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1; int32_t y = kNumElems - x - 1;
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
RawMachineAssemblerTester<int32_t> m; RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x; int32_t OK = 0x29000 + x;
Node* base = m.PointerConstant(buffer); Node* in_base = m.PointerConstant(in_buffer);
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0])); Node* in_index = m.IntPtrConstant(x * sizeof(CType));
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0])); Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y * sizeof(CType));
if (t == TestAlignment::kAligned) { if (t == TestAlignment::kAligned) {
Node* load = m.Load(rep, base, index0); Node* load = m.Load(rep, in_base, in_index);
m.Store(rep.representation(), base, index1, load, kNoWriteBarrier); m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) { } else if (t == TestAlignment::kUnaligned) {
Node* load = m.UnalignedLoad(rep, base, index0); Node* load = m.UnalignedLoad(rep, in_base, in_index);
m.UnalignedStore(rep.representation(), base, index1, load); m.UnalignedStore(rep.representation(), out_base, out_index, load);
} }
m.Return(m.Int32Constant(OK)); m.Return(m.Int32Constant(OK));
CHECK(buffer[x] != buffer[y]); memset(out_buffer, 0, sizeof(out_buffer));
CHECK_NE(in_buffer[x], out_buffer[y]);
CHECK_EQ(OK, m.Call()); CHECK_EQ(OK, m.Call());
CHECK(buffer[x] == buffer[y]); CHECK_EQ(in_buffer[x], out_buffer[y]);
for (int32_t z = 0; z < kNumElems; z++) {
if (z != y) CHECK_EQ(CType{0}, out_buffer[z]);
}
} }
} }
template <typename CType> template <typename CType>
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) { void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
CType in, out; byte in_buffer[2 * sizeof(CType)];
CType in_buffer[2]; byte out_buffer[2 * sizeof(CType)];
CType out_buffer[2];
byte* raw;
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) { for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
int y = sizeof(CType) - x; CType* in = reinterpret_cast<CType*>(&in_buffer[x]);
InitBuffer(in, 1, rep);
raw = reinterpret_cast<byte*>(&in); for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
for (size_t i = 0; i < sizeof(CType); i++) { CType* out = reinterpret_cast<CType*>(&out_buffer[y]);
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
}
raw = reinterpret_cast<byte*>(in_buffer);
MemCopy(raw + x, &in, sizeof(CType));
RawMachineAssemblerTester<int32_t> m; RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x; int32_t OK = 0x29000 + x;
Node* base0 = m.PointerConstant(in_buffer); Node* in_base = m.PointerConstant(in_buffer);
Node* base1 = m.PointerConstant(out_buffer); Node* in_index = m.IntPtrConstant(x);
Node* index0 = m.IntPtrConstant(x); Node* load = m.UnalignedLoad(rep, in_base, in_index);
Node* index1 = m.IntPtrConstant(y);
Node* load = m.UnalignedLoad(rep, base0, index0);
m.UnalignedStore(rep.representation(), base1, index1, load);
m.Return(m.Int32Constant(OK)); Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y);
m.UnalignedStore(rep.representation(), out_base, out_index, load);
CHECK_EQ(OK, m.Call()); m.Return(m.Int32Constant(OK));
raw = reinterpret_cast<byte*>(&out_buffer); CHECK_EQ(OK, m.Call());
MemCopy(&out, raw + y, sizeof(CType)); CHECK_EQ(in[0], out[0]);
CHECK(in == out); }
} }
} }
} // namespace } // namespace
...@@ -290,7 +331,11 @@ TEST(RunLoadImmIndex) { ...@@ -290,7 +331,11 @@ TEST(RunLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned); RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kAligned);
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -304,8 +349,11 @@ TEST(RunUnalignedLoadImmIndex) { ...@@ -304,8 +349,11 @@ TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
TestAlignment::kUnaligned); RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -321,7 +369,11 @@ TEST(RunLoadStore) { ...@@ -321,7 +369,11 @@ TEST(RunLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned); RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kAligned);
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -334,7 +386,11 @@ TEST(RunUnalignedLoadStore) { ...@@ -334,7 +386,11 @@ TEST(RunUnalignedLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned); RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -347,7 +403,11 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) { ...@@ -347,7 +403,11 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16()); RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32()); RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32()); RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged()); RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
RunUnalignedLoadStoreUnalignedAccess<HeapObject*>(
MachineType::TaggedPointer());
RunUnalignedLoadStoreUnalignedAccess<Object*>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32()); RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64()); RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
...@@ -355,14 +415,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) { ...@@ -355,14 +415,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
#endif #endif
} }
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
#else
#error "Unknown Architecture"
#endif
namespace { namespace {
void RunLoadStoreSignExtend32(TestAlignment t) { void RunLoadStoreSignExtend32(TestAlignment t) {
int32_t buffer[4]; int32_t buffer[4];
...@@ -608,6 +660,10 @@ TEST(RunUnalignedLoadStoreTruncation) { ...@@ -608,6 +660,10 @@ TEST(RunUnalignedLoadStoreTruncation) {
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned); LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
} }
#undef LSB
#undef A_BILLION
#undef A_GIG
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment