Commit 0854d353 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

Reland: [wasm][memory64] Decode memory offset as 64-bit LEB

After preparing Liftoff, TurboFan, and the interpreter for this change,
we now store the memory offset as uint64_t. {LoadLane} and {StoreLane}
were added after the TurboFan refactoring, so those two are adapted
similar to the other memory operations.

TBR=manoskouk@chromium.org

Bug: v8:10949
Cq-Include-Trybots: luci.v8.try:v8_win64_msvc_rel_ng
Change-Id: I8f3084c21a7d99f72df1bc18c2b507c4e84570cd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2560720
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71411}
parent 5053751b
......@@ -4082,7 +4082,7 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
#endif
Node* WasmGraphBuilder::LoadLane(MachineType memtype, Node* value, Node* index,
uint32_t offset, uint8_t laneidx,
uint64_t offset, uint8_t laneidx,
wasm::WasmCodePosition position) {
has_simd_ = true;
Node* load;
......@@ -4223,7 +4223,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
uint32_t offset, uint32_t alignment,
uint64_t offset, uint32_t alignment,
Node* val, uint8_t laneidx,
wasm::WasmCodePosition position,
wasm::ValueType type) {
......
......@@ -316,12 +316,12 @@ class WasmGraphBuilder {
wasm::LoadTransformationKind transform, Node* index,
uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
Node* LoadLane(MachineType memtype, Node* value, Node* index, uint32_t offset,
Node* LoadLane(MachineType memtype, Node* value, Node* index, uint64_t offset,
uint8_t laneidx, wasm::WasmCodePosition position);
Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint64_t offset,
uint32_t alignment, Node* val, wasm::WasmCodePosition position,
wasm::ValueType type);
Node* StoreLane(MachineRepresentation mem_rep, Node* index, uint32_t offset,
Node* StoreLane(MachineRepresentation mem_rep, Node* index, uint64_t offset,
uint32_t alignment, Node* val, uint8_t laneidx,
wasm::WasmCodePosition position, wasm::ValueType type);
static void PrintDebugName(Node* node);
......
......@@ -689,13 +689,16 @@ class BranchTableIterator {
const uint32_t table_count_; // the count of entries, not including default.
};
template <Decoder::ValidateFlag validate>
class WasmDecoder;
template <Decoder::ValidateFlag validate>
struct MemoryAccessImmediate {
uint32_t alignment;
uint32_t offset;
uint64_t offset;
uint32_t length = 0;
inline MemoryAccessImmediate(Decoder* decoder, const byte* pc,
uint32_t max_alignment) {
uint32_t max_alignment, bool is_memory64) {
uint32_t alignment_length;
alignment =
decoder->read_u32v<validate>(pc, &alignment_length, "alignment");
......@@ -707,10 +710,15 @@ struct MemoryAccessImmediate {
max_alignment, alignment);
}
uint32_t offset_length;
offset = decoder->read_u32v<validate>(pc + alignment_length, &offset_length,
"offset");
offset = is_memory64 ? decoder->read_u64v<validate>(
pc + alignment_length, &offset_length, "offset")
: decoder->read_u32v<validate>(
pc + alignment_length, &offset_length, "offset");
length = alignment_length + offset_length;
}
// Defined below, after the definition of WasmDecoder.
inline MemoryAccessImmediate(WasmDecoder<validate>* decoder, const byte* pc,
uint32_t max_alignment);
};
// Immediate for SIMD lane operations.
......@@ -1587,6 +1595,11 @@ class WasmDecoder : public Decoder {
// Returns the length of the opcode under {pc}.
static uint32_t OpcodeLength(WasmDecoder* decoder, const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// We don't have information about the module here, so we just assume that
// memory64 is enabled when parsing memory access immediates. This is
// backwards-compatible; decode errors will be detected at another time when
// actually decoding that opcode.
constexpr bool kConservativelyAssumeMemory64 = true;
switch (opcode) {
/********** Control opcodes **********/
case kExprUnreachable:
......@@ -1704,7 +1717,8 @@ class WasmDecoder : public Decoder {
return 1;
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE) {
MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX);
MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX,
kConservativelyAssumeMemory64);
return 1 + imm.length;
}
// clang-format on
......@@ -1783,7 +1797,8 @@ class WasmDecoder : public Decoder {
case kExprPrefetchT:
case kExprPrefetchNT: {
MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX);
UINT32_MAX,
kConservativelyAssumeMemory64);
return length + imm.length;
}
case kExprS128Load8Lane:
......@@ -1795,7 +1810,8 @@ class WasmDecoder : public Decoder {
case kExprS128Store32Lane:
case kExprS128Store64Lane: {
MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX);
UINT32_MAX,
kConservativelyAssumeMemory64);
// 1 more byte for lane index immediate.
return length + imm.length + 1;
}
......@@ -1817,7 +1833,8 @@ class WasmDecoder : public Decoder {
switch (opcode) {
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE) {
MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX);
UINT32_MAX,
kConservativelyAssumeMemory64);
return length + imm.length;
}
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE) {
......@@ -2075,6 +2092,12 @@ class WasmDecoder : public Decoder {
const FunctionSig* sig_;
};
template <Decoder::ValidateFlag validate>
MemoryAccessImmediate<validate>::MemoryAccessImmediate(
WasmDecoder<validate>* decoder, const byte* pc, uint32_t max_alignment)
: MemoryAccessImmediate(decoder, pc, max_alignment,
decoder->module_->is_memory64) {}
#define CALL_INTERFACE(name, ...) interface_.name(this, ##__VA_ARGS__)
#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
do { \
......
......@@ -1534,7 +1534,8 @@ class WasmInterpreterInternals {
// the operation to keep trap reporting and tracing accurate, otherwise
// those will report at the middle of an opcode.
MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + prefix_len), sizeof(ctype));
decoder, code->at(pc + prefix_len), sizeof(ctype),
module()->is_memory64);
uint64_t index = ToMemType(Pop());
Address addr = BoundsCheckMem<mtype>(imm.offset, index);
if (!addr) {
......@@ -1566,7 +1567,8 @@ class WasmInterpreterInternals {
// the operation to keep trap reporting and tracing accurate, otherwise
// those will report at the middle of an opcode.
MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + prefix_len), sizeof(ctype));
decoder, code->at(pc + prefix_len), sizeof(ctype),
module()->is_memory64);
ctype val = Pop().to<ctype>();
uint64_t index = ToMemType(Pop());
......@@ -1593,7 +1595,7 @@ class WasmInterpreterInternals {
Address* address, pc_t pc, int* const len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + *len), sizeof(type));
decoder, code->at(pc + *len), sizeof(type), module()->is_memory64);
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
if (val) *val = static_cast<type>(Pop().to<op_type>());
uint64_t index = ToMemType(Pop());
......@@ -1617,7 +1619,7 @@ class WasmInterpreterInternals {
int64_t* timeout = nullptr) {
// TODO(manoskouk): Introduce test which exposes wrong pc offset below.
MemoryAccessImmediate<Decoder::kFullValidation> imm(
decoder, code->at(pc + *len), sizeof(type));
decoder, code->at(pc + *len), sizeof(type), module()->is_memory64);
if (timeout) {
*timeout = Pop().to<int64_t>();
}
......@@ -2806,7 +2808,7 @@ class WasmInterpreterInternals {
case kExprPrefetchNT: {
// Max alignment doesn't matter, use an arbitrary value.
MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + *len), 4);
decoder, code->at(pc + *len), 4, module()->is_memory64);
// Pop address and do nothing.
Pop().to<uint32_t>();
*len += imm.length;
......@@ -2897,7 +2899,7 @@ class WasmInterpreterInternals {
s_type value = Pop().to_s128().to<s_type>();
MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + *len), sizeof(load_type));
decoder, code->at(pc + *len), sizeof(load_type), module()->is_memory64);
SimdLaneImmediate<Decoder::kNoValidation> lane_imm(
decoder, code->at(pc + *len + imm.length));
......
......@@ -67,6 +67,23 @@
static_cast<byte>((((x) >> 21) & MASK_7) | 0x80), \
static_cast<byte>((((x) >> 28) & MASK_7))
#define U64V_1(x) U32V_1(static_cast<uint32_t>(x))
#define U64V_2(x) U32V_2(static_cast<uint32_t>(x))
#define U64V_3(x) U32V_3(static_cast<uint32_t>(x))
#define U64V_4(x) U32V_4(static_cast<uint32_t>(x))
#define U64V_5(x) \
static_cast<uint8_t>((uint64_t{x} & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 7) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 14) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 21) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 28) & MASK_7))
#define U64V_6(x) \
static_cast<uint8_t>((uint64_t{x} & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 7) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 14) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 21) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 28) & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 35) & MASK_7))
#define U64V_10(x) \
static_cast<uint8_t>((uint64_t{x} & MASK_7) | 0x80), \
static_cast<uint8_t>(((uint64_t{x} >> 7) & MASK_7) | 0x80), \
......
......@@ -4945,8 +4945,11 @@ TEST_F(BytecodeIteratorTest, WithLocalDecls) {
* Memory64 tests
******************************************************************************/
using FunctionBodyDecoderTestOnBothMemoryTypes =
FunctionBodyDecoderTestBase<::testing::TestWithParam<MemoryType>>;
class FunctionBodyDecoderTestOnBothMemoryTypes
: public FunctionBodyDecoderTestBase<::testing::TestWithParam<MemoryType>> {
public:
bool is_memory64() const { return GetParam() == kMemory64; }
};
std::string PrintMemoryType(::testing::TestParamInfo<MemoryType> info) {
switch (info.param) {
......@@ -4964,17 +4967,40 @@ INSTANTIATE_TEST_SUITE_P(MemoryTypes, FunctionBodyDecoderTestOnBothMemoryTypes,
TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, IndexTypes) {
builder.InitializeMemory(GetParam());
const bool is_memory64 = GetParam() == kMemory64;
Validate(!is_memory64, sigs.i_v(),
Validate(!is_memory64(), sigs.i_v(),
{WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)});
Validate(is_memory64, sigs.i_v(),
Validate(is_memory64(), sigs.i_v(),
{WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO64)});
Validate(!is_memory64, sigs.v_v(),
Validate(!is_memory64(), sigs.v_v(),
{WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO)});
Validate(is_memory64, sigs.v_v(),
Validate(is_memory64(), sigs.v_v(),
{WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO64, WASM_ZERO)});
}
TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, 64BitOffset) {
builder.InitializeMemory(GetParam());
// Macro for defining a zero constant of the right type. Explicitly use
// {uint8_t} to make MSVC happy.
#define ZERO_FOR_TYPE \
WASM_SEQ(is_memory64() ? uint8_t{kExprI64Const} : uint8_t{kExprI32Const}, 0)
// Offset is zero encoded in 5 bytes (works always).
Validate(
true, sigs.i_v(),
{WASM_LOAD_MEM_OFFSET(MachineType::Int32(), U64V_5(0), ZERO_FOR_TYPE)});
// Offset is zero encoded in 6 bytes (works only in memory64).
Validate(
is_memory64(), sigs.i_v(),
{WASM_LOAD_MEM_OFFSET(MachineType::Int32(), U64V_6(0), ZERO_FOR_TYPE)});
// Same with store.
Validate(true, sigs.v_v(),
{WASM_STORE_MEM_OFFSET(MachineType::Int32(), U64V_5(0),
ZERO_FOR_TYPE, WASM_ZERO)});
Validate(is_memory64(), sigs.v_v(),
{WASM_STORE_MEM_OFFSET(MachineType::Int32(), U64V_6(0),
ZERO_FOR_TYPE, WASM_ZERO)});
#undef ZERO_FOR_TYPE
}
#undef B1
#undef B2
#undef B3
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment