Commit e48b5c6d authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][memory64] Fix Liftoff on 32-bit and add cctest

This adds a first execution test for memory64 in the form of a cctest.
Several things are still not working correctly, hence this test only
checks TurboFan on 64-bit systems, and Liftoff.

Bounds checks in Liftoff are fixed to work correctly on 32-bit.
Follow-up CLs will extend the test to also test TurboFan on 32-bit, the
interpreter, and traps. All of those features still have issues.

R=manoskouk@chromium.org

Bug: v8:10949
Change-Id: Ic7edcf3783421634fe2ec99eac6f257c557a29b5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2610968Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72014}
parent d5d45c61
......@@ -2206,11 +2206,12 @@ class LiftoffCompiler {
enum ForceCheck : bool { kDoForceCheck = true, kDontForceCheck = false };
// Returns true if the memory access is statically known to be out of bounds
// (a jump to the trap was generated then); return false otherwise.
bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint64_t offset, Register index, LiftoffRegList pinned,
ForceCheck force_check) {
// Returns {no_reg} if the memory access is statically known to be out of
// bounds (a jump to the trap was generated then); return the GP {index}
// register otherwise (holding the ptrsized index).
Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint64_t offset, LiftoffRegister index,
LiftoffRegList pinned, ForceCheck force_check) {
// If the offset does not fit in a uintptr_t, this can never succeed on this
// machine.
const bool statically_oob =
......@@ -2218,11 +2219,22 @@ class LiftoffCompiler {
!base::IsInBounds<uintptr_t>(offset, access_size,
env_->max_memory_size);
// After bounds checking, we know that the index must be ptrsize, hence only
// look at the lower word on 32-bit systems (the high word is bounds-checked
// further down).
Register index_ptrsize =
kNeedI64RegPair && index.is_gp_pair() ? index.low_gp() : index.gp();
if (!force_check && !statically_oob &&
(!FLAG_wasm_bounds_checks || env_->use_trap_handler)) {
return false;
// With trap handlers we should not have a register pair as input (we
// would only return the lower half).
DCHECK_IMPLIES(env_->use_trap_handler, index.is_gp());
return index_ptrsize;
}
DEBUG_CODE_COMMENT("bounds check memory");
// TODO(wasm): This adds protected instruction information for the jump
// instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0.
......@@ -2233,38 +2245,46 @@ class LiftoffCompiler {
if (statically_oob) {
__ emit_jump(trap_label);
decoder->SetSucceedingCodeDynamicallyUnreachable();
return true;
return no_reg;
}
// Convert the index to ptrsize, bounds-checking the high word on 32-bit
// systems for memory64.
if (!env_->module->is_memory64) {
__ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
// Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
// compile time. Otherwise, only one check is required (see below).
pinned.set(index_ptrsize);
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize);
__ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset));
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
// compile time. Otherwise, only one check is required (see below).
if (end_offset > env_->min_memory_size) {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
mem_size);
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerValueType,
end_offset_reg.gp(), mem_size.gp());
}
// Just reuse the end_offset register for computing the effective size
// (which is >= 0 because of the check above).
LiftoffRegister effective_size_reg = end_offset_reg;
__ emit_ptrsize_sub(effective_size_reg.gp(), mem_size, end_offset_reg.gp());
__ emit_u32_to_intptr(index, index);
__ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
LiftoffAssembler::kWasmIntPtr, index,
effective_size_reg.gp());
return false;
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerValueType,
index_ptrsize, effective_size_reg.gp());
return index_ptrsize;
}
void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
......@@ -2358,17 +2378,16 @@ class LiftoffCompiler {
// Make sure that we can overwrite {index}.
if (__ cache_state()->is_used(LiftoffRegister(index))) {
Register old_index = index;
pinned->clear(LiftoffRegister(old_index));
pinned->clear(LiftoffRegister{old_index});
index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
// TODO(clemensb): Use kWasmI64 if memory64 is used.
if (index != old_index) __ Move(index, old_index, kWasmI32);
if (index != old_index) {
__ Move(index, old_index, kPointerValueType);
}
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
// TODO(clemensb): Use 64-bit operations if memory64 is used.
DCHECK_GE(kMaxUInt32, *offset);
if (*offset) __ emit_i32_addi(index, index, static_cast<uint32_t>(*offset));
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_i32_and(index, index, tmp);
if (*offset) __ emit_ptrsize_addi(index, index, *offset);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
return index;
}
......@@ -2384,13 +2403,13 @@ class LiftoffCompiler {
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
if (!CheckSupportedType(decoder, value_type, "load")) return;
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDontForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......@@ -2422,19 +2441,18 @@ class LiftoffCompiler {
return;
}
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
LiftoffRegister full_index = __ PopToRegister();
// For load splats and load zero, LoadType is the size of the load, and for
// load extends, LoadType is the size of the lane, and it always loads 8
// bytes.
uint32_t access_size =
transform == LoadTransformationKind::kExtend ? 8 : type.size();
if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
kDontForceCheck)) {
return;
}
Register index = BoundsCheckMem(decoder, access_size, imm.offset,
full_index, {}, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load with transformation");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
......@@ -2470,13 +2488,13 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
Register index = pinned.set(__ PopToRegister()).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDontForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load lane");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
......@@ -2507,12 +2525,13 @@ class LiftoffCompiler {
if (!CheckSupportedType(decoder, value_type, "store")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDontForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......@@ -3282,11 +3301,12 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
......@@ -3305,12 +3325,12 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm) {
ValueType value_type = type.value_type();
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
......@@ -3353,11 +3373,12 @@ class LiftoffCompiler {
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
#endif
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
......@@ -3377,20 +3398,19 @@ class LiftoffCompiler {
// complete address calculation first, so that the address only needs a
// single register. Afterwards we load all remaining values into the
// other registers.
LiftoffRegList pinned;
Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index_reg, pinned,
kDoForceCheck)) {
return;
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned);
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
__ emit_i32_add(addr, addr, index_reg);
pinned.clear(LiftoffRegister(index_reg));
__ emit_i32_add(addr, addr, index);
pinned.clear(LiftoffRegister(index));
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
......@@ -3410,11 +3430,11 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
return;
}
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
......@@ -3444,12 +3464,12 @@ class LiftoffCompiler {
void AtomicWait(FullDecoder* decoder, ValueType type,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegList pinned;
Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
if (BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
index_reg, pinned, kDoForceCheck)) {
return;
}
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index_reg =
BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
pinned);
......@@ -3520,12 +3540,12 @@ class LiftoffCompiler {
void AtomicNotify(FullDecoder* decoder,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegList pinned;
Register index_reg = pinned.set(__ PeekToRegister(1, pinned)).gp();
if (BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
index_reg, pinned, kDoForceCheck)) {
return;
}
LiftoffRegister full_index = __ PeekToRegister(1, {});
Register index_reg =
BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
index_reg, pinned);
......
......@@ -307,6 +307,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-exceptions.cc",
"wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-memory64.cc",
"wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd-liftoff.cc",
......
......@@ -491,21 +491,7 @@
'test-gc/*': [SKIP],
'test-grow-memory/*': [SKIP],
'test-liftoff-inspection/*': [SKIP],
'test-run-wasm-64/*': [SKIP],
'test-run-wasm-asmjs/*': [SKIP],
'test-run-wasm-atomics64/*': [SKIP],
'test-run-wasm-atomics/*': [SKIP],
'test-run-wasm-bulk-memory/*': [SKIP],
'test-run-wasm/*': [SKIP],
'test-run-wasm-exceptions/*': [SKIP],
'test-run-wasm-interpreter/*': [SKIP],
'test-run-wasm-js/*': [SKIP],
'test-run-wasm-module/*': [SKIP],
'test-run-wasm-sign-extension/*': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd-scalar-lowering/*': [SKIP],
'test-run-wasm-simd/*': [SKIP],
'test-run-wasm-wrappers/*': [SKIP],
'test-run-wasm*': [SKIP],
'test-streaming-compilation/*': [SKIP],
'test-wasm-breakpoints/*': [SKIP],
'test-wasm-codegen/*': [SKIP],
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/wasm-opcodes-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
template <typename ReturnType, typename... ParamTypes>
class Memory64Runner : public WasmRunner<ReturnType, ParamTypes...> {
public:
explicit Memory64Runner(TestExecutionTier execution_tier)
: WasmRunner<ReturnType, ParamTypes...>(execution_tier) {
this->builder().EnableFeature(kFeature_memory64);
this->builder().SetMemory64();
}
};
WASM_EXEC_TEST(Load) {
// TODO(clemensb): Implement memory64 in the interpreter.
if (execution_tier == TestExecutionTier::kInterpreter) return;
// TODO(clemensb): Fix memory64 in Turbofan on 32-bit systems.
if (execution_tier == TestExecutionTier::kTurbofan &&
kSystemPointerSize == 4) {
return;
}
Memory64Runner<uint32_t, uint64_t> r(execution_tier);
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(int32_t));
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0)));
CHECK_EQ(0, r.Call(0));
memory[0] = 0x12345678;
CHECK_EQ(0x12345678, r.Call(0));
CHECK_EQ(0x123456, r.Call(1));
CHECK_EQ(0x1234, r.Call(2));
CHECK_EQ(0x12, r.Call(3));
CHECK_EQ(0x0, r.Call(4));
// TODO(clemensb): Check traps.
}
// TODO(clemensb): Test atomic instructions.
} // namespace wasm
} // namespace internal
} // namespace v8
......@@ -187,6 +187,8 @@ class TestingModuleBuilder {
void SetHasSharedMemory() { test_module_->has_shared_memory = true; }
void SetMemory64() { test_module_->is_memory64 = true; }
enum FunctionType { kImport, kWasm };
uint32_t AddFunction(const FunctionSig* sig, const char* name,
FunctionType type);
......@@ -254,6 +256,8 @@ class TestingModuleBuilder {
return runtime_exception_support_;
}
void EnableFeature(WasmFeature feature) { enabled_features_.Add(feature); }
private:
std::shared_ptr<WasmModule> test_module_;
Isolate* isolate_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment