Commit ff2bd0fd authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

Implement UnalignedLoad and UnalignedStore in WASM using

combination of LoadByte/Shift/Or and StoreByte/Shift/And.

BUG=

Review-Url: https://codereview.chromium.org/1928513002
Cr-Commit-Position: refs/heads/master@{#36422}
parent b3bfc0bd
......@@ -1968,6 +1968,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -2417,6 +2417,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kWord64ReverseBits;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -1646,6 +1646,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -139,6 +139,8 @@ class InstructionSelector final {
// TODO(sigurds) This should take a CpuFeatures argument.
static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
......
......@@ -539,11 +539,13 @@ struct MachineOperatorGlobalCache {
static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
LAZY_INSTANCE_INITIALIZER;
MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
MachineRepresentation word,
Flags flags)
: cache_(kCache.Get()), word_(word), flags_(flags) {
MachineOperatorBuilder::MachineOperatorBuilder(
Zone* zone, MachineRepresentation word, Flags flags,
AlignmentRequirements alignmentRequirements)
: cache_(kCache.Get()),
word_(word),
flags_(flags),
alignment_requirements_(alignmentRequirements) {
DCHECK(word == MachineRepresentation::kWord32 ||
word == MachineRepresentation::kWord64);
}
......
......@@ -123,10 +123,74 @@ class MachineOperatorBuilder final : public ZoneObject {
};
typedef base::Flags<Flag, unsigned> Flags;
class AlignmentRequirements {
public:
enum UnalignedAccessSupport { kNoSupport, kSomeSupport, kFullSupport };
bool IsUnalignedLoadSupported(const MachineType& machineType,
uint8_t alignment) const {
return IsUnalignedSupported(unalignedLoadSupportedTypes_, machineType,
alignment);
}
bool IsUnalignedStoreSupported(const MachineType& machineType,
uint8_t alignment) const {
return IsUnalignedSupported(unalignedStoreSupportedTypes_, machineType,
alignment);
}
static AlignmentRequirements FullUnalignedAccessSupport() {
return AlignmentRequirements(kFullSupport);
}
static AlignmentRequirements NoUnalignedAccessSupport() {
return AlignmentRequirements(kNoSupport);
}
static AlignmentRequirements SomeUnalignedAccessSupport(
const Vector<MachineType>& unalignedLoadSupportedTypes,
const Vector<MachineType>& unalignedStoreSupportedTypes) {
return AlignmentRequirements(kSomeSupport, unalignedLoadSupportedTypes,
unalignedStoreSupportedTypes);
}
private:
explicit AlignmentRequirements(
AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
Vector<MachineType> unalignedLoadSupportedTypes =
Vector<MachineType>(NULL, 0),
Vector<MachineType> unalignedStoreSupportedTypes =
Vector<MachineType>(NULL, 0))
: unalignedSupport_(unalignedAccessSupport),
unalignedLoadSupportedTypes_(unalignedLoadSupportedTypes),
unalignedStoreSupportedTypes_(unalignedStoreSupportedTypes) {}
bool IsUnalignedSupported(const Vector<MachineType>& supported,
const MachineType& machineType,
uint8_t alignment) const {
if (unalignedSupport_ == kFullSupport) {
return true;
} else if (unalignedSupport_ == kNoSupport) {
return false;
} else {
for (MachineType m : supported) {
if (m == machineType) {
return true;
}
}
return false;
}
}
const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
const Vector<MachineType> unalignedLoadSupportedTypes_;
const Vector<MachineType> unalignedStoreSupportedTypes_;
};
explicit MachineOperatorBuilder(
Zone* zone,
MachineRepresentation word = MachineType::PointerRepresentation(),
Flags supportedOperators = kNoFlags);
Flags supportedOperators = kNoFlags,
AlignmentRequirements alignmentRequirements =
AlignmentRequirements::NoUnalignedAccessSupport());
const Operator* DebugBreak();
......@@ -515,6 +579,18 @@ class MachineOperatorBuilder final : public ZoneObject {
bool Is64() const { return word() == MachineRepresentation::kWord64; }
MachineRepresentation word() const { return word_; }
bool UnalignedLoadSupported(const MachineType& machineType,
uint8_t alignment) {
return alignment_requirements_.IsUnalignedLoadSupported(machineType,
alignment);
}
bool UnalignedStoreSupported(const MachineType& machineType,
uint8_t alignment) {
return alignment_requirements_.IsUnalignedStoreSupported(machineType,
alignment);
}
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
#define PSEUDO_OP_LIST(V) \
......@@ -549,6 +625,7 @@ class MachineOperatorBuilder final : public ZoneObject {
MachineOperatorGlobalCache const& cache_;
MachineRepresentation const word_;
Flags const flags_;
AlignmentRequirements const alignment_requirements_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
......
......@@ -1548,6 +1548,20 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32RoundTiesEven;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
if (IsMipsArchVariant(kMips32r6)) {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
} else {
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
return MachineOperatorBuilder::AlignmentRequirements::
NoUnalignedAccessSupport();
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -2057,6 +2057,19 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32RoundTiesEven;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
if (kArchVariant == kMips64r6) {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
} else {
DCHECK(kArchVariant == kMips64r2);
return MachineOperatorBuilder::AlignmentRequirements::
NoUnalignedAccessSupport();
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -1991,6 +1991,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -1822,6 +1822,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kWord64Popcnt;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -2614,16 +2614,148 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
MachineType WasmGraphBuilder::GetTypeForUnalignedAccess(uint32_t alignment,
bool signExtend) {
switch (alignment) {
case 0:
return signExtend ? MachineType::Int8() : MachineType::Uint8();
case 1:
return signExtend ? MachineType::Int16() : MachineType::Uint16();
case 2:
return signExtend ? MachineType::Int32() : MachineType::Uint32();
default:
UNREACHABLE();
return MachineType::None();
}
}
Node* WasmGraphBuilder::GetUnalignedLoadOffsetNode(Node* baseOffset,
int numberOfBytes,
int stride, int current) {
int offset;
wasm::WasmOpcode addOpcode;
#if defined(V8_TARGET_LITTLE_ENDIAN)
offset = numberOfBytes - stride - current;
#elif defined(V8_TARGET_BIG_ENDIAN)
offset = current;
#else
#error Unsupported endianness
#endif
#if WASM_64
addOpcode = wasm::kExprI64Add;
#else
addOpcode = wasm::kExprI32Add;
#endif
if (offset == 0) {
return baseOffset;
} else {
return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
}
}
Node* WasmGraphBuilder::BuildUnalignedLoad(wasm::LocalType type,
MachineType memtype, Node* index,
uint32_t offset,
uint32_t alignment) {
Node* result;
Node* load;
bool extendTo64Bit = false;
wasm::WasmOpcode shiftOpcode;
wasm::WasmOpcode orOpcode;
Node* shiftConst;
bool signExtend = memtype.IsSigned();
bool isFloat = IsFloatingPoint(memtype.representation());
int stride =
1 << ElementSizeLog2Of(
GetTypeForUnalignedAccess(alignment, false).representation());
int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
DCHECK(numberOfBytes % stride == 0);
switch (type) {
case wasm::kAstI64:
case wasm::kAstF64:
shiftOpcode = wasm::kExprI64Shl;
orOpcode = wasm::kExprI64Ior;
result = jsgraph()->Int64Constant(0);
shiftConst = jsgraph()->Int64Constant(8 * stride);
extendTo64Bit = true;
break;
case wasm::kAstI32:
case wasm::kAstF32:
shiftOpcode = wasm::kExprI32Shl;
orOpcode = wasm::kExprI32Ior;
result = jsgraph()->Int32Constant(0);
shiftConst = jsgraph()->Int32Constant(8 * stride);
break;
default:
UNREACHABLE();
}
Node* baseOffset = MemBuffer(offset);
for (int i = 0; i < numberOfBytes; i += stride) {
result = Binop(shiftOpcode, result, shiftConst);
load = graph()->NewNode(
jsgraph()->machine()->Load(
GetTypeForUnalignedAccess(alignment, signExtend)),
GetUnalignedLoadOffsetNode(baseOffset, numberOfBytes, stride, i), index,
*effect_, *control_);
*effect_ = load;
if (extendTo64Bit) {
if (signExtend) {
load =
graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
} else {
load = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
load);
}
}
signExtend = false;
result = Binop(orOpcode, result, load);
}
// Convert to float
if (isFloat) {
switch (type) {
case wasm::kAstF32:
result = Unop(wasm::kExprF32ReinterpretI32, result);
break;
case wasm::kAstF64:
result = Unop(wasm::kExprF64ReinterpretI64, result);
break;
default:
UNREACHABLE();
}
}
return result;
}
Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
// WASM semantics throw on OOB. Introduce explicit bounds check.
BoundsCheckMem(memtype, index, offset, position);
load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_);
*effect_ = load;
bool aligned = static_cast<int>(alignment) >=
ElementSizeLog2Of(memtype.representation());
if (aligned ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_);
*effect_ = load;
} else {
load = BuildUnalignedLoad(type, memtype, index, offset, alignment);
}
if (type == wasm::kAstI64 &&
ElementSizeLog2Of(memtype.representation()) < 3) {
......@@ -2641,16 +2773,120 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
return load;
}
Node* WasmGraphBuilder::GetUnalignedStoreOffsetNode(Node* baseOffset,
int numberOfBytes,
int stride, int current) {
int offset;
wasm::WasmOpcode addOpcode;
#if defined(V8_TARGET_LITTLE_ENDIAN)
offset = current;
#elif defined(V8_TARGET_BIG_ENDIAN)
offset = numberOfBytes - stride - current;
#else
#error Unsupported endianness
#endif
#if WASM_64
addOpcode = wasm::kExprI64Add;
#else
addOpcode = wasm::kExprI32Add;
#endif
if (offset == 0) {
return baseOffset;
} else {
return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
}
}
Node* WasmGraphBuilder::BuildUnalignedStore(MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
Node* val) {
Node* store;
Node* newValue;
wasm::WasmOpcode shiftOpcode;
Node* shiftConst;
bool extendTo64Bit = false;
bool isFloat = IsFloatingPoint(memtype.representation());
int stride = 1 << ElementSizeLog2Of(
GetTypeForUnalignedAccess(alignment).representation());
int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
DCHECK(numberOfBytes % stride == 0);
StoreRepresentation rep(GetTypeForUnalignedAccess(alignment).representation(),
kNoWriteBarrier);
if (ElementSizeLog2Of(memtype.representation()) <= 2) {
shiftOpcode = wasm::kExprI32ShrU;
shiftConst = jsgraph()->Int32Constant(8 * stride);
} else {
shiftOpcode = wasm::kExprI64ShrU;
shiftConst = jsgraph()->Int64Constant(8 * stride);
extendTo64Bit = true;
}
newValue = val;
if (isFloat) {
switch (memtype.representation()) {
case MachineRepresentation::kFloat64:
newValue = Unop(wasm::kExprI64ReinterpretF64, val);
break;
case MachineRepresentation::kFloat32:
newValue = Unop(wasm::kExprI32ReinterpretF32, val);
break;
default:
UNREACHABLE();
}
}
Node* baseOffset = MemBuffer(offset);
for (int i = 0; i < numberOfBytes - stride; i += stride) {
store = graph()->NewNode(
jsgraph()->machine()->Store(rep),
GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride, i),
index,
extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
*effect_, *control_);
newValue = Binop(shiftOpcode, newValue, shiftConst);
*effect_ = store;
}
store = graph()->NewNode(
jsgraph()->machine()->Store(rep),
GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride,
numberOfBytes - stride),
index,
extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
*effect_, *control_);
*effect_ = store;
return val;
}
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
uint32_t offset, Node* val,
uint32_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position) {
Node* store;
// WASM semantics throw on OOB. Introduce explicit bounds check.
BoundsCheckMem(memtype, index, offset, position);
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
store = graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
*effect_ = store;
bool aligned = static_cast<int>(alignment) >=
ElementSizeLog2Of(memtype.representation());
if (aligned ||
jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
*effect_ = store;
} else {
store = BuildUnalignedStore(memtype, index, offset, alignment, val);
}
return store;
}
......@@ -2896,6 +3132,7 @@ std::pair<JSGraph*, SourcePositionTable*> BuildGraphForWasmFunction(
decode_timer.Start();
}
// Create a TF graph during decoding.
Graph* graph = jsgraph->graph();
CommonOperatorBuilder* common = jsgraph->common();
MachineOperatorBuilder* machine = jsgraph->machine();
......
......@@ -162,8 +162,10 @@ class WasmGraphBuilder {
Node* LoadGlobal(uint32_t index);
Node* StoreGlobal(uint32_t index, Node* val);
Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
uint32_t offset, wasm::WasmCodePosition position);
Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
Node* StoreMem(MachineType type, Node* index, uint32_t offset,
uint32_t alignment, Node* val,
wasm::WasmCodePosition position);
static void PrintDebugName(Node* node);
......@@ -214,6 +216,19 @@ class WasmGraphBuilder {
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
MachineType GetTypeForUnalignedAccess(uint32_t alignment,
bool signExtend = false);
Node* GetUnalignedLoadOffsetNode(Node* baseOffset, int numberOfBytes,
int stride, int current);
Node* BuildUnalignedLoad(wasm::LocalType type, MachineType memtype,
Node* index, uint32_t offset, uint32_t alignment);
Node* GetUnalignedStoreOffsetNode(Node* baseOffset, int numberOfBytes,
int stride, int current);
Node* BuildUnalignedStore(MachineType memtype, Node* index, uint32_t offset,
uint32_t alignment, Node* val);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
......
......@@ -2100,6 +2100,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -1718,6 +1718,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -1104,8 +1104,8 @@ class SR_WasmDecoder : public WasmDecoder {
int DecodeLoadMem(LocalType type, MachineType mem_type) {
MemoryAccessOperand operand(this, pc_);
Value index = Pop(0, kAstI32);
TFNode* node =
BUILD(LoadMem, type, mem_type, index.node, operand.offset, position());
TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
operand.alignment, position());
Push(type, node);
return 1 + operand.length;
}
......@@ -1114,7 +1114,8 @@ class SR_WasmDecoder : public WasmDecoder {
MemoryAccessOperand operand(this, pc_);
Value val = Pop(1, type);
Value index = Pop(0, kAstI32);
BUILD(StoreMem, mem_type, index.node, operand.offset, val.node, position());
BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
val.node, position());
Push(type, val.node);
return 1 + operand.length;
}
......
......@@ -364,6 +364,15 @@ class LocalDeclEncoder {
static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
ZERO_ALIGNMENT, static_cast<byte>(offset)
#define WASM_LOAD_MEM_ALIGNMENT(type, index, alignment) \
index, static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
alignment, ZERO_OFFSET
#define WASM_STORE_MEM_ALIGNMENT(type, index, alignment, val) \
index, val, \
static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
alignment, ZERO_OFFSET
#define WASM_CALL_FUNCTION0(index) \
kExprCallFunction, 0, static_cast<byte>(index)
......
......@@ -1247,7 +1247,29 @@ WASM_EXEC_TEST(LoadMemI64) {
CHECK_EQ(77777777, r.Call());
}
WASM_EXEC_TEST(MemI64_Sum) {
WASM_EXEC_TEST(Run_Wasm_LoadMemI64_alignment) {
REQUIRE(I64LoadStore);
TestingModule module;
int64_t* memory = module.AddMemoryElems<int64_t>(8);
for (byte alignment = 0; alignment <= 3; alignment++) {
module.RandomizeMemory(1111);
WasmRunner<int64_t> r(&module);
BUILD(r,
WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_I8(0), alignment));
memory[0] = 0xaabbccdd00112233LL;
CHECK_EQ(0xaabbccdd00112233LL, r.Call());
memory[0] = 0x33aabbccdd001122LL;
CHECK_EQ(0x33aabbccdd001122LL, r.Call());
memory[0] = 77777777;
CHECK_EQ(77777777, r.Call());
}
}
WASM_EXEC_TEST(Run_Wasm_MemI64_Sum) {
REQUIRE(I64LoadStore);
REQUIRE(I64Add);
REQUIRE(I64Sub);
......@@ -1283,7 +1305,24 @@ WASM_EXEC_TEST(MemI64_Sum) {
}
}
WASM_EXEC_TEST(I64Global) {
WASM_EXEC_TEST(Run_Wasm_StoreMemI64_alignment) {
TestingModule module;
int64_t* memory = module.AddMemoryElems<int64_t>(4);
const int64_t kWritten = 0x12345678abcd0011ll;
for (byte i = 0; i <= 3; i++) {
WasmRunner<int64_t> r(&module, MachineType::Int64());
BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, i,
WASM_GET_LOCAL(0)));
module.RandomizeMemory(1111);
memory[0] = 0;
CHECK_EQ(kWritten, r.Call(kWritten));
CHECK_EQ(kWritten, memory[0]);
}
}
WASM_EXEC_TEST(Run_Wasm_I64Global) {
REQUIRE(I64LoadStore);
REQUIRE(I64SConvertI32);
REQUIRE(I64And);
......
......@@ -1287,7 +1287,28 @@ WASM_EXEC_TEST(LoadMemI32) {
CHECK_EQ(77777777, r.Call(0));
}
WASM_EXEC_TEST(LoadMemI32_oob) {
WASM_EXEC_TEST(Run_Wasm_LoadMemI32_alignment) {
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
for (byte alignment = 0; alignment <= 2; alignment++) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(1111);
BUILD(r,
WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_I8(0), alignment));
memory[0] = 0x1a2b3c4d;
CHECK_EQ(0x1a2b3c4d, r.Call(0));
memory[0] = 0x5e6f7a8b;
CHECK_EQ(0x5e6f7a8b, r.Call(0));
memory[0] = 0x9ca0b1c2;
CHECK_EQ(0x9ca0b1c2, r.Call(0));
}
}
WASM_EXEC_TEST(Run_Wasm_LoadMemI32_oob) {
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Uint32());
......@@ -1357,9 +1378,7 @@ WASM_EXEC_TEST(LoadMemI32_offset) {
CHECK_EQ(44444444, r.Call(8));
}
#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
WASM_EXEC_TEST(Run_Wasm_LoadMemI32_const_oob_misaligned) {
const int kMemSize = 12;
// TODO(titzer): Fix misaligned accesses on MIPS and re-enable.
for (int offset = 0; offset < kMemSize + 5; offset++) {
......@@ -1382,9 +1401,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
}
}
#endif
WASM_EXEC_TEST(LoadMemI32_const_oob) {
WASM_EXEC_TEST(Run_Wasm_LoadMemI32_const_oob) {
const int kMemSize = 24;
for (int offset = 0; offset < kMemSize + 5; offset += 4) {
for (int index = 0; index < kMemSize + 5; index += 4) {
......@@ -1406,7 +1423,24 @@ WASM_EXEC_TEST(LoadMemI32_const_oob) {
}
}
WASM_EXEC_TEST(StoreMemI32_offset) {
WASM_EXEC_TEST(Run_Wasm_StoreMemI32_alignment) {
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(4);
const int32_t kWritten = 0x12345678;
for (byte i = 0; i <= 2; i++) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, i,
WASM_GET_LOCAL(0)));
module.RandomizeMemory(1111);
memory[0] = 0;
CHECK_EQ(kWritten, r.Call(kWritten));
CHECK_EQ(kWritten, memory[0]);
}
}
WASM_EXEC_TEST(Run_Wasm_StoreMemI32_offset) {
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(4);
WasmRunner<int32_t> r(&module, MachineType::Int32());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment