test-run-load-store.cc 24.8 KB
Newer Older
1 2 3 4 5 6 7 8 9
// Copyright 2016 the V8 project authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.

#include <cmath>
#include <functional>
#include <limits>

#include "src/base/bits.h"
10
#include "src/base/overflowing-math.h"
11
#include "src/base/utils/random-number-generator.h"
12
#include "src/objects/objects-inl.h"
13
#include "test/cctest/cctest.h"
14
#include "test/cctest/compiler/test-codegen.h"
15 16 17 18 19 20 21
#include "test/cctest/compiler/value-helper.h"


namespace v8 {
namespace internal {
namespace compiler {

22 23 24 25 26
enum TestAlignment {
  kAligned,
  kUnaligned,
};

27 28 29 30 31 32 33 34
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
#else
#error "Unknown Architecture"
#endif

35 36 37 38
// This is a America!
#define A_BILLION 1000000000ULL
#define A_GIG (1024ULL * 1024ULL * 1024ULL)

39
namespace {
40 41 42 43 44
byte* ComputeOffset(void* real_address, int32_t offset) {
  return reinterpret_cast<byte*>(reinterpret_cast<Address>(real_address) -
                                 offset);
}

45
void RunLoadInt32(const TestAlignment t) {
46 47 48
  RawMachineAssemblerTester<int32_t> m;

  int32_t p1 = 0;  // loads directly from this location.
49 50 51 52 53 54 55 56

  if (t == TestAlignment::kAligned) {
    m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
  } else if (t == TestAlignment::kUnaligned) {
    m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
  } else {
    UNREACHABLE();
  }
57 58

  FOR_INT32_INPUTS(i) {
59
    p1 = i;
60 61 62 63
    CHECK_EQ(p1, m.Call());
  }
}

64
void RunLoadInt32Offset(TestAlignment t) {
65 66 67
  int32_t p1 = 0;  // loads directly from this location.

  int32_t offsets[] = {-2000000, -100, -101, 1,          3,
68
                       7,        120,  2000, 2000000000, 0xFF};
69 70 71 72

  for (size_t i = 0; i < arraysize(offsets); i++) {
    RawMachineAssemblerTester<int32_t> m;
    int32_t offset = offsets[i];
73
    byte* pointer = ComputeOffset(&p1, offset);
74

75
    // generate load [#base + #index]
76 77 78 79 80 81 82 83
    if (t == TestAlignment::kAligned) {
      m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
    } else if (t == TestAlignment::kUnaligned) {
      m.Return(
          m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
    } else {
      UNREACHABLE();
    }
84 85

    FOR_INT32_INPUTS(j) {
86
      p1 = j;
87 88 89 90 91
      CHECK_EQ(p1, m.Call());
    }
  }
}

92
void RunLoadStoreFloat32Offset(TestAlignment t) {
93 94 95 96
  float p1 = 0.0f;  // loads directly from this location.
  float p2 = 0.0f;  // and stores directly into this location.

  FOR_INT32_INPUTS(i) {
97
    int32_t magic =
98
        base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
99
    RawMachineAssemblerTester<int32_t> m;
100
    int32_t offset = i;
101 102
    byte* from = ComputeOffset(&p1, offset);
    byte* to = ComputeOffset(&p2, offset);
103
    // generate load [#base + #index]
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
    if (t == TestAlignment::kAligned) {
      Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
                          m.IntPtrConstant(offset));
      m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
              m.IntPtrConstant(offset), load, kNoWriteBarrier);
    } else if (t == TestAlignment::kUnaligned) {
      Node* load =
          m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
                          m.IntPtrConstant(offset));
      m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
                       m.IntPtrConstant(offset), load);

    } else {
      UNREACHABLE();
    }
119 120 121
    m.Return(m.Int32Constant(magic));

    FOR_FLOAT32_INPUTS(j) {
122 123
      p1 = j;
      p2 = j - 5;
124
      CHECK_EQ(magic, m.Call());
125
      CHECK_DOUBLE_EQ(p1, p2);
126 127 128 129
    }
  }
}

130
void RunLoadStoreFloat64Offset(TestAlignment t) {
131 132 133 134
  double p1 = 0;  // loads directly from this location.
  double p2 = 0;  // and stores directly into this location.

  FOR_INT32_INPUTS(i) {
135
    int32_t magic =
136
        base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
137
    RawMachineAssemblerTester<int32_t> m;
138
    int32_t offset = i;
139 140
    byte* from = ComputeOffset(&p1, offset);
    byte* to = ComputeOffset(&p2, offset);
141
    // generate load [#base + #index]
142 143 144 145 146 147 148 149 150 151 152 153 154 155
    if (t == TestAlignment::kAligned) {
      Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
                          m.IntPtrConstant(offset));
      m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
              m.IntPtrConstant(offset), load, kNoWriteBarrier);
    } else if (t == TestAlignment::kUnaligned) {
      Node* load =
          m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
                          m.IntPtrConstant(offset));
      m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
                       m.IntPtrConstant(offset), load);
    } else {
      UNREACHABLE();
    }
156 157 158
    m.Return(m.Int32Constant(magic));

    FOR_FLOAT64_INPUTS(j) {
159 160
      p1 = j;
      p2 = j - 5;
161
      CHECK_EQ(magic, m.Call());
162
      CHECK_DOUBLE_EQ(p1, p2);
163 164 165
    }
  }
}
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
}  // namespace

TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }

TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }

TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }

TEST(RunUnalignedLoadInt32Offset) {
  RunLoadInt32Offset(TestAlignment::kUnaligned);
}

TEST(RunLoadStoreFloat32Offset) {
  RunLoadStoreFloat32Offset(TestAlignment::kAligned);
}

TEST(RunUnalignedLoadStoreFloat32Offset) {
  RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
}

TEST(RunLoadStoreFloat64Offset) {
  RunLoadStoreFloat64Offset(TestAlignment::kAligned);
}

TEST(RunUnalignedLoadStoreFloat64Offset) {
  RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
}
193 194

namespace {
195

196 197 198 199 200 201 202 203 204 205
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
template <typename CType>
void CheckEq(CType in_value, CType out_value) {
  CHECK_EQ(in_value, out_value);
}

#ifdef V8_COMPRESS_POINTERS
// Specializations for checking the result of compressing store.
template <>
void CheckEq<Object>(Object in_value, Object out_value) {
206 207 208 209
  // Compare only lower 32-bits of the value because tagged load/stores are
  // 32-bit operations anyway.
  CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
           static_cast<Tagged_t>(out_value.ptr()));
210 211 212 213 214 215 216 217 218 219 220 221 222
}

template <>
void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
  return CheckEq<Object>(in_value, out_value);
}

template <>
void CheckEq<Smi>(Smi in_value, Smi out_value) {
  return CheckEq<Object>(in_value, out_value);
}
#endif

223 224 225
// Initializes the buffer with some raw data respecting requested representation
// of the values.
template <typename CType>
226
void InitBuffer(CType* buffer, size_t length, MachineType type) {
227
  const size_t kBufferSize = sizeof(CType) * length;
228
  if (!type.IsTagged()) {
229 230 231 232 233
    byte* raw = reinterpret_cast<byte*>(buffer);
    for (size_t i = 0; i < kBufferSize; i++) {
      raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
    }
    return;
234
  }
235

236 237 238 239
  // Tagged field loads require values to be properly tagged because of
  // pointer decompression that may be happenning during load.
  Isolate* isolate = CcTest::InitIsolateOnce();
  Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
240
  if (type.IsTaggedSigned()) {
241 242 243 244 245
    for (size_t i = 0; i < length; i++) {
      smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
    }
  } else {
    memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
246
    if (!type.IsTaggedPointer()) {
247 248 249 250 251 252 253 254 255 256
      // Also add some Smis if we are checking AnyTagged case.
      for (size_t i = 0; i < length / 2; i++) {
        smi_view[i] =
            Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
      }
    }
  }
}

template <typename CType>
257
void RunLoadImmIndex(MachineType type, TestAlignment t) {
258 259 260
  const int kNumElems = 16;
  CType buffer[kNumElems];

261
  InitBuffer(buffer, kNumElems, type);
262

263 264 265
  // Test with various large and small offsets.
  for (int offset = -1; offset <= 200000; offset *= -5) {
    for (int i = 0; i < kNumElems; i++) {
266
      BufferedRawMachineAssemblerTester<CType> m;
267 268
      CType* base_pointer = reinterpret_cast<CType*>(
          ComputeOffset(&buffer[0], offset * sizeof(CType)));
269
#ifdef V8_COMPRESS_POINTERS
270
      if (type.IsTagged()) {
271 272 273
        // When pointer compression is enabled then we need to access only
        // the lower 32-bit of the tagged value while the buffer contains
        // full 64-bit values.
274
        base_pointer = reinterpret_cast<CType*>(LSB(base_pointer, kTaggedSize));
275 276
      }
#endif
277

278
      Node* base = m.PointerConstant(base_pointer);
279
      Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
280
      if (t == TestAlignment::kAligned) {
281
        m.Return(m.Load(type, base, index));
282
      } else if (t == TestAlignment::kUnaligned) {
283
        m.Return(m.UnalignedLoad(type, base, index));
284 285 286
      } else {
        UNREACHABLE();
      }
287

288
      CheckEq<CType>(buffer[i], m.Call());
289 290 291 292 293
    }
  }
}

template <typename CType>
294
void RunLoadStore(MachineType type, TestAlignment t) {
295 296 297
  const int kNumElems = 16;
  CType in_buffer[kNumElems];
  CType out_buffer[kNumElems];
298 299
  uintptr_t zap_data[] = {kZapValue, kZapValue};
  CType zap_value;
300

301 302
  STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
  MemCopy(&zap_value, &zap_data, sizeof(CType));
303
  InitBuffer(in_buffer, kNumElems, type);
304

305 306 307 308 309 310
#ifdef V8_TARGET_BIG_ENDIAN
  int offset = sizeof(CType) - ElementSizeInBytes(type.representation());
#else
  int offset = 0;
#endif

311 312 313 314 315
  for (int32_t x = 0; x < kNumElems; x++) {
    int32_t y = kNumElems - x - 1;

    RawMachineAssemblerTester<int32_t> m;
    int32_t OK = 0x29000 + x;
316
    Node* in_base = m.PointerConstant(in_buffer);
317
    Node* in_index = m.IntPtrConstant(x * sizeof(CType) + offset);
318
    Node* out_base = m.PointerConstant(out_buffer);
319
    Node* out_index = m.IntPtrConstant(y * sizeof(CType) + offset);
320
    if (t == TestAlignment::kAligned) {
321 322 323
      Node* load = m.Load(type, in_base, in_index);
      m.Store(type.representation(), out_base, out_index, load,
              kNoWriteBarrier);
324
    } else if (t == TestAlignment::kUnaligned) {
325 326
      Node* load = m.UnalignedLoad(type, in_base, in_index);
      m.UnalignedStore(type.representation(), out_base, out_index, load);
327 328
    }

329 330
    m.Return(m.Int32Constant(OK));

331 332 333
    for (int32_t z = 0; z < kNumElems; z++) {
      out_buffer[z] = zap_value;
    }
334
    CHECK_NE(in_buffer[x], out_buffer[y]);
335
    CHECK_EQ(OK, m.Call());
336 337
    // Mostly same as CHECK_EQ() but customized for compressed tagged values.
    CheckEq<CType>(in_buffer[x], out_buffer[y]);
338
    for (int32_t z = 0; z < kNumElems; z++) {
339
      if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
340
    }
341 342
  }
}
343 344

template <typename CType>
345
void RunUnalignedLoadStoreUnalignedAccess(MachineType type) {
346
  CType in, out;
347 348
  byte in_buffer[2 * sizeof(CType)];
  byte out_buffer[2 * sizeof(CType)];
349

350
  InitBuffer(&in, 1, type);
351

352 353 354 355
  for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
    // Direct write to &in_buffer[x] may cause unaligned access in C++ code so
    // we use MemCopy() to handle that.
    MemCopy(&in_buffer[x], &in, sizeof(CType));
356

357 358 359
    for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
      RawMachineAssemblerTester<int32_t> m;
      int32_t OK = 0x29000 + x;
360

361 362
      Node* in_base = m.PointerConstant(in_buffer);
      Node* in_index = m.IntPtrConstant(x);
363
      Node* load = m.UnalignedLoad(type, in_base, in_index);
364

365 366
      Node* out_base = m.PointerConstant(out_buffer);
      Node* out_index = m.IntPtrConstant(y);
367
      m.UnalignedStore(type.representation(), out_base, out_index, load);
368

369
      m.Return(m.Int32Constant(OK));
370

371 372 373 374
      CHECK_EQ(OK, m.Call());
      // Direct read of &out_buffer[y] may cause unaligned access in C++ code
      // so we use MemCopy() to handle that.
      MemCopy(&out, &out_buffer[y], sizeof(CType));
375 376
      // Mostly same as CHECK_EQ() but customized for compressed tagged values.
      CheckEq<CType>(in, out);
377
    }
378 379
  }
}
380 381 382
}  // namespace

TEST(RunLoadImmIndex) {
383 384 385 386 387 388
  RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
  RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
  RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
  RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
  RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
  RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
389 390
  RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
  RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
391 392
  RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
                              TestAlignment::kAligned);
393
  RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
394 395 396 397 398 399 400 401 402 403 404 405 406
  RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
  RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
  RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
#endif
  // TODO(titzer): test various indexing modes.
}

TEST(RunUnalignedLoadImmIndex) {
  RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
  RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
  RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
  RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
407
  RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
408 409
  RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
  RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
410
#if V8_TARGET_ARCH_64_BIT
411
  RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
412 413 414 415 416
#endif
  // TODO(titzer): test various indexing modes.
}

TEST(RunLoadStore) {
417 418 419 420 421 422
  RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
  RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
  RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
  RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
  RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
  RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
423 424
  RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
  RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
425 426
  RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
                           TestAlignment::kAligned);
427
  RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
428 429 430 431 432 433 434 435 436 437 438 439
  RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
  RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
  RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
#endif
}

TEST(RunUnalignedLoadStore) {
  RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
  RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
  RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
  RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
440
  RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
441 442
  RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
  RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
443
#if V8_TARGET_ARCH_64_BIT
444 445 446 447 448 449 450 451 452
  RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
#endif
}

TEST(RunUnalignedLoadStoreUnalignedAccess) {
  RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
  RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
  RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
  RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
453
  RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
454 455 456 457
  RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
  RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT
  RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
458 459 460
#endif
}

461 462
namespace {
void RunLoadStoreSignExtend32(TestAlignment t) {
463 464
  int32_t buffer[4];
  RawMachineAssemblerTester<int32_t> m;
465
  Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
  if (t == TestAlignment::kAligned) {
    Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
    Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
    m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
    m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
  } else if (t == TestAlignment::kUnaligned) {
    Node* load16 =
        m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
    Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
    m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
                              load16);
    m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
                              load32);
  } else {
    UNREACHABLE();
  }
484 485 486
  m.Return(load8);

  FOR_INT32_INPUTS(i) {
487
    buffer[0] = i;
488

489 490 491 492
    CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
    CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
    CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
    CHECK_EQ(i, buffer[3]);
493 494 495
  }
}

496
void RunLoadStoreZeroExtend32(TestAlignment t) {
497 498
  uint32_t buffer[4];
  RawMachineAssemblerTester<uint32_t> m;
499
  Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
  if (t == TestAlignment::kAligned) {
    Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
    Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
    m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
    m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
  } else if (t == TestAlignment::kUnaligned) {
    Node* load16 =
        m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
    Node* load32 =
        m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
    m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
                              load16);
    m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
                              load32);
  }
517 518 519
  m.Return(load8);

  FOR_UINT32_INPUTS(i) {
520
    buffer[0] = i;
521

522 523 524 525
    CHECK_EQ((i & 0xFF), m.Call());
    CHECK_EQ((i & 0xFF), buffer[1]);
    CHECK_EQ((i & 0xFFFF), buffer[2]);
    CHECK_EQ(i, buffer[3]);
526 527
  }
}
528
}  // namespace
529

530 531 532
TEST(RunLoadStoreSignExtend32) {
  RunLoadStoreSignExtend32(TestAlignment::kAligned);
}
533

534 535
TEST(RunUnalignedLoadStoreSignExtend32) {
  RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
536 537
}

538 539 540 541 542 543 544 545 546 547 548 549
TEST(RunLoadStoreZeroExtend32) {
  RunLoadStoreZeroExtend32(TestAlignment::kAligned);
}

TEST(RunUnalignedLoadStoreZeroExtend32) {
  RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
}

#if V8_TARGET_ARCH_64_BIT

namespace {
void RunLoadStoreSignExtend64(TestAlignment t) {
550
  if ((true)) return;  // TODO(titzer): sign extension of loads to 64-bit.
551 552
  int64_t buffer[5];
  RawMachineAssemblerTester<int64_t> m;
553
  Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
  if (t == TestAlignment::kAligned) {
    Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
    Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
    Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
    m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
    m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
    m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
  } else if (t == TestAlignment::kUnaligned) {
    Node* load16 =
        m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
    Node* load32 =
        m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
    Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
    m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
                              load16);
    m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
                              load32);
    m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
                              load64);
  } else {
    UNREACHABLE();
  }
578 579 580
  m.Return(load8);

  FOR_INT64_INPUTS(i) {
581
    buffer[0] = i;
582

583 584 585 586 587
    CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
    CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
    CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
    CHECK_EQ(static_cast<int32_t>(i & 0xFFFFFFFF), buffer[3]);
    CHECK_EQ(i, buffer[4]);
588 589 590
  }
}

591
void RunLoadStoreZeroExtend64(TestAlignment t) {
592
  if (kSystemPointerSize < 8) return;
593
  uint64_t buffer[5];
594
  RawMachineAssemblerTester<uint64_t> m;
595
  Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
  if (t == TestAlignment::kAligned) {
    Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
    Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
    Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
    m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
    m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
    m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
  } else if (t == TestAlignment::kUnaligned) {
    Node* load16 =
        m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
    Node* load32 =
        m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
    Node* load64 =
        m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
    m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
    m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
                              load16);
    m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
                              load32);
    m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
                              load64);
  } else {
    UNREACHABLE();
  }
621 622 623
  m.Return(load8);

  FOR_UINT64_INPUTS(i) {
624
    buffer[0] = i;
625

626 627 628 629 630
    CHECK_EQ((i & 0xFF), m.Call());
    CHECK_EQ((i & 0xFF), buffer[1]);
    CHECK_EQ((i & 0xFFFF), buffer[2]);
    CHECK_EQ((i & 0xFFFFFFFF), buffer[3]);
    CHECK_EQ(i, buffer[4]);
631 632 633
  }
}

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
}  // namespace

TEST(RunLoadStoreSignExtend64) {
  RunLoadStoreSignExtend64(TestAlignment::kAligned);
}

TEST(RunUnalignedLoadStoreSignExtend64) {
  RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
}

TEST(RunLoadStoreZeroExtend64) {
  RunLoadStoreZeroExtend64(TestAlignment::kAligned);
}

TEST(RunUnalignedLoadStoreZeroExtend64) {
  RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
}

652 653 654 655
#endif

namespace {
template <typename IntType>
656
void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
657 658 659
  IntType input;

  RawMachineAssemblerTester<int32_t> m;
660 661 662 663 664 665 666 667 668 669 670 671
  Node* ap1;
  if (t == TestAlignment::kAligned) {
    Node* a = m.LoadFromPointer(&input, kRepresentation);
    ap1 = m.Int32Add(a, m.Int32Constant(1));
    m.StoreToPointer(&input, kRepresentation.representation(), ap1);
  } else if (t == TestAlignment::kUnaligned) {
    Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
    ap1 = m.Int32Add(a, m.Int32Constant(1));
    m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
  } else {
    UNREACHABLE();
  }
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
  m.Return(ap1);

  const IntType max = std::numeric_limits<IntType>::max();
  const IntType min = std::numeric_limits<IntType>::min();

  // Test upper bound.
  input = max;
  CHECK_EQ(max + 1, m.Call());
  CHECK_EQ(min, input);

  // Test lower bound.
  input = min;
  CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
  CHECK_EQ(min + 1, input);

  // Test all one byte values that are not one byte bounds.
  for (int i = -127; i < 127; i++) {
    input = i;
    int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
    CHECK_EQ(static_cast<IntType>(expected), m.Call());
    CHECK_EQ(static_cast<IntType>(i + 1), input);
  }
}
}  // namespace

TEST(RunLoadStoreTruncation) {
698 699 700 701 702 703
  LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
  LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
}

TEST(RunUnalignedLoadStoreTruncation) {
  LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
704 705
}

706 707 708 709
#undef LSB
#undef A_BILLION
#undef A_GIG

710 711 712
}  // namespace compiler
}  // namespace internal
}  // namespace v8