Commit 118dff9d authored by Ilya Rezvov's avatar Ilya Rezvov Committed by V8 LUCI CQ

[wasm-atomics] Use traps for atomic Load and Store OOB handling

Bug: v8:12946
Change-Id: I3d9037a6dd940fe25f737efca49835b098d55081
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3691129Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Ilya Rezvov <irezvov@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81336}
parent 54c69fc5
...@@ -488,12 +488,14 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, ...@@ -488,12 +488,14 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \ #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \
do { \ do { \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
__ asm_instr(i.Output##reg(), i.TempRegister(0)); \ __ asm_instr(i.Output##reg(), i.TempRegister(0)); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \ #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \
do { \ do { \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
__ asm_instr(i.Input##reg(2), i.TempRegister(0)); \ __ asm_instr(i.Input##reg(2), i.TempRegister(0)); \
} while (0) } while (0)
......
...@@ -41,7 +41,9 @@ namespace compiler { ...@@ -41,7 +41,9 @@ namespace compiler {
V(Arm64Strh) \ V(Arm64Strh) \
V(Arm64StrQ) \ V(Arm64StrQ) \
V(Arm64StrS) \ V(Arm64StrS) \
V(Arm64StrW) V(Arm64StrW) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64)
#define TARGET_ARCH_OPCODE_LIST(V) \ #define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
...@@ -345,8 +347,6 @@ namespace compiler { ...@@ -345,8 +347,6 @@ namespace compiler {
V(Arm64I32x4AllTrue) \ V(Arm64I32x4AllTrue) \
V(Arm64I16x8AllTrue) \ V(Arm64I16x8AllTrue) \
V(Arm64I8x16AllTrue) \ V(Arm64I8x16AllTrue) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64) \
V(Arm64Word64AtomicAddUint64) \ V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicSubUint64) \ V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicAndUint64) \ V(Arm64Word64AtomicAndUint64) \
......
...@@ -2680,6 +2680,11 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node, ...@@ -2680,6 +2680,11 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
if (atomic_load_params.kind() == MemoryAccessKind::kProtected) {
code |= AccessModeField::encode(kMemoryAccessProtected);
}
code |= code |=
AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width); AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
...@@ -2751,6 +2756,10 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node, ...@@ -2751,6 +2756,10 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
code |= AtomicWidthField::encode(width); code |= AtomicWidthField::encode(width);
} }
if (store_params.kind() == MemoryAccessKind::kProtected) {
code |= AccessModeField::encode(kMemoryAccessProtected);
}
code |= AddressingModeField::encode(kMode_MRR); code |= AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
temps); temps);
......
...@@ -66,6 +66,18 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( ...@@ -66,6 +66,18 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
UNREACHABLE(); UNREACHABLE();
} }
#define COMMON_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(ArchStoreWithWriteBarrier) \
V(ArchAtomicStoreWithWriteBarrier) \
V(AtomicLoadInt8) \
V(AtomicLoadUint8) \
V(AtomicLoadInt16) \
V(AtomicLoadUint16) \
V(AtomicLoadWord32) \
V(AtomicStoreWord8) \
V(AtomicStoreWord16) \
V(AtomicStoreWord32)
// Target-specific opcodes that specify which assembly sequence to emit. // Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction. // Most opcodes specify a single instruction.
#define COMMON_ARCH_OPCODE_LIST(V) \ #define COMMON_ARCH_OPCODE_LIST(V) \
...@@ -101,19 +113,9 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( ...@@ -101,19 +113,9 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchFramePointer) \ V(ArchFramePointer) \
V(ArchParentFramePointer) \ V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \ V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchAtomicStoreWithWriteBarrier) \
V(ArchStackSlot) \ V(ArchStackSlot) \
V(ArchStackPointerGreaterThan) \ V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \ V(ArchStackCheckOffset) \
V(AtomicLoadInt8) \
V(AtomicLoadUint8) \
V(AtomicLoadInt16) \
V(AtomicLoadUint16) \
V(AtomicLoadWord32) \
V(AtomicStoreWord8) \
V(AtomicStoreWord16) \
V(AtomicStoreWord32) \
V(AtomicExchangeInt8) \ V(AtomicExchangeInt8) \
V(AtomicExchangeUint8) \ V(AtomicExchangeUint8) \
V(AtomicExchangeInt16) \ V(AtomicExchangeInt16) \
...@@ -169,7 +171,8 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( ...@@ -169,7 +171,8 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(Ieee754Float64Sin) \ V(Ieee754Float64Sin) \
V(Ieee754Float64Sinh) \ V(Ieee754Float64Sinh) \
V(Ieee754Float64Tan) \ V(Ieee754Float64Tan) \
V(Ieee754Float64Tanh) V(Ieee754Float64Tanh) \
COMMON_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V)
#define ARCH_OPCODE_LIST(V) \ #define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \ COMMON_ARCH_OPCODE_LIST(V) \
...@@ -320,6 +323,7 @@ inline bool HasMemoryAccessMode(ArchOpcode opcode) { ...@@ -320,6 +323,7 @@ inline bool HasMemoryAccessMode(ArchOpcode opcode) {
#define CASE(Name) \ #define CASE(Name) \
case k##Name: \ case k##Name: \
return true; return true;
COMMON_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE) TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
#undef CASE #undef CASE
default: default:
......
...@@ -48,7 +48,8 @@ namespace compiler { ...@@ -48,7 +48,8 @@ namespace compiler {
V(X64S128Load8x8S) \ V(X64S128Load8x8S) \
V(X64S128Load8x8U) \ V(X64S128Load8x8U) \
V(X64S128Store32Lane) \ V(X64S128Store32Lane) \
V(X64S128Store64Lane) V(X64S128Store64Lane) \
V(X64Word64AtomicStoreWord64)
#define TARGET_ARCH_OPCODE_LIST(V) \ #define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
...@@ -403,7 +404,6 @@ namespace compiler { ...@@ -403,7 +404,6 @@ namespace compiler {
V(X64Word64AtomicAndUint64) \ V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint64) \ V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \ V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicExchangeUint64) \ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64) V(X64Word64AtomicCompareExchangeUint64)
......
...@@ -502,7 +502,9 @@ void InstructionSelector::VisitLoad(Node* node, Node* value, ...@@ -502,7 +502,9 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
AddressingMode mode = AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count, reg_kind); g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count, reg_kind);
InstructionCode code = opcode | AddressingModeField::encode(mode); InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) { if (node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad) {
code |= AccessModeField::encode(kMemoryAccessProtected); code |= AccessModeField::encode(kMemoryAccessProtected);
} }
Emit(code, 1, outputs, input_count, inputs, temp_count, temps); Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
...@@ -537,7 +539,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node, ...@@ -537,7 +539,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
void VisitStoreCommon(InstructionSelector* selector, Node* node, void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep, StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order) { base::Optional<AtomicMemoryOrder> atomic_order,
MemoryAccessKind acs_kind = MemoryAccessKind::kNormal) {
X64OperandGenerator g(selector); X64OperandGenerator g(selector);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
...@@ -553,6 +556,10 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, ...@@ -553,6 +556,10 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
write_barrier_kind = kFullWriteBarrier; write_barrier_kind = kFullWriteBarrier;
} }
const auto access_mode = acs_kind == MemoryAccessKind::kProtected
? MemoryAccessMode::kMemoryAccessProtected
: MemoryAccessMode::kMemoryAccessDirect;
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) { if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation())); DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation()));
AddressingMode addressing_mode; AddressingMode addressing_mode;
...@@ -567,6 +574,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, ...@@ -567,6 +574,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
: kArchStoreWithWriteBarrier; : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode); code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode)); code |= MiscField::encode(static_cast<int>(record_write_mode));
code |= AccessModeField::encode(access_mode);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
arraysize(temps), temps); arraysize(temps), temps);
} else { } else {
...@@ -617,8 +625,9 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, ...@@ -617,8 +625,9 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
opcode = GetStoreOpcode(store_rep); opcode = GetStoreOpcode(store_rep);
} }
InstructionCode code = InstructionCode code = opcode
opcode | AddressingModeField::encode(addressing_mode); | AddressingModeField::encode(addressing_mode)
| AccessModeField::encode(access_mode);
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr), selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
input_count, inputs, temp_count, temps); input_count, inputs, temp_count, temps);
} }
...@@ -2901,14 +2910,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) { ...@@ -2901,14 +2910,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
DCHECK_NE(params.representation(), MachineRepresentation::kWord64); DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()), DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 4); kTaggedSize == 4);
VisitStoreCommon(this, node, params.store_representation(), params.order()); VisitStoreCommon(this, node, params.store_representation(), params.order(),
params.kind());
} }
void InstructionSelector::VisitWord64AtomicStore(Node* node) { void InstructionSelector::VisitWord64AtomicStore(Node* node) {
AtomicStoreParameters params = AtomicStoreParametersOf(node->op()); AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()), DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 8); kTaggedSize == 8);
VisitStoreCommon(this, node, params.store_representation(), params.order()); VisitStoreCommon(this, node, params.store_representation(), params.order(),
params.kind());
} }
void InstructionSelector::VisitWord32AtomicExchange(Node* node) { void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
This diff is collapsed.
...@@ -44,6 +44,15 @@ class OptionalOperator final { ...@@ -44,6 +44,15 @@ class OptionalOperator final {
const Operator* const op_; const Operator* const op_;
}; };
enum class MemoryAccessKind {
kNormal,
kUnaligned,
kProtected,
};
size_t hash_value(MemoryAccessKind);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
// A Load needs a MachineType. // A Load needs a MachineType.
using LoadRepresentation = MachineType; using LoadRepresentation = MachineType;
...@@ -56,15 +65,18 @@ V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*) ...@@ -56,15 +65,18 @@ V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
class AtomicLoadParameters final { class AtomicLoadParameters final {
public: public:
AtomicLoadParameters(LoadRepresentation representation, AtomicLoadParameters(LoadRepresentation representation,
AtomicMemoryOrder order) AtomicMemoryOrder order,
: representation_(representation), order_(order) {} MemoryAccessKind kind = MemoryAccessKind::kNormal)
: representation_(representation), order_(order), kind_(kind) {}
LoadRepresentation representation() const { return representation_; } LoadRepresentation representation() const { return representation_; }
AtomicMemoryOrder order() const { return order_; } AtomicMemoryOrder order() const { return order_; }
MemoryAccessKind kind() const { return kind_; }
private: private:
LoadRepresentation representation_; LoadRepresentation representation_;
AtomicMemoryOrder order_; AtomicMemoryOrder order_;
MemoryAccessKind kind_;
}; };
V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters); V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
...@@ -77,16 +89,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters); ...@@ -77,16 +89,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*) V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT; V8_WARN_UNUSED_RESULT;
enum class MemoryAccessKind {
kNormal,
kUnaligned,
kProtected,
};
size_t hash_value(MemoryAccessKind);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
enum class LoadTransformation { enum class LoadTransformation {
kS128Load8Splat, kS128Load8Splat,
kS128Load16Splat, kS128Load16Splat,
...@@ -167,9 +169,10 @@ class AtomicStoreParameters final { ...@@ -167,9 +169,10 @@ class AtomicStoreParameters final {
public: public:
AtomicStoreParameters(MachineRepresentation representation, AtomicStoreParameters(MachineRepresentation representation,
WriteBarrierKind write_barrier_kind, WriteBarrierKind write_barrier_kind,
AtomicMemoryOrder order) AtomicMemoryOrder order,
MemoryAccessKind kind = MemoryAccessKind::kNormal)
: store_representation_(representation, write_barrier_kind), : store_representation_(representation, write_barrier_kind),
order_(order) {} order_(order), kind_(kind) {}
MachineRepresentation representation() const { MachineRepresentation representation() const {
return store_representation_.representation(); return store_representation_.representation();
...@@ -178,6 +181,7 @@ class AtomicStoreParameters final { ...@@ -178,6 +181,7 @@ class AtomicStoreParameters final {
return store_representation_.write_barrier_kind(); return store_representation_.write_barrier_kind();
} }
AtomicMemoryOrder order() const { return order_; } AtomicMemoryOrder order() const { return order_; }
MemoryAccessKind kind() const { return kind_; }
StoreRepresentation store_representation() const { StoreRepresentation store_representation() const {
return store_representation_; return store_representation_;
...@@ -186,6 +190,7 @@ class AtomicStoreParameters final { ...@@ -186,6 +190,7 @@ class AtomicStoreParameters final {
private: private:
StoreRepresentation store_representation_; StoreRepresentation store_representation_;
AtomicMemoryOrder order_; AtomicMemoryOrder order_;
MemoryAccessKind kind_;
}; };
V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters); V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
......
...@@ -3334,14 +3334,16 @@ void WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val, ...@@ -3334,14 +3334,16 @@ void WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
gasm_->IntPtrConstant(table_index), index, val); gasm_->IntPtrConstant(table_index), index, val);
} }
Node* WasmGraphBuilder::CheckBoundsAndAlignment( std::pair<Node*, WasmGraphBuilder::BoundsCheckResult>
int8_t access_size, Node* index, uint64_t offset, WasmGraphBuilder::CheckBoundsAndAlignment(int8_t access_size, Node* index,
wasm::WasmCodePosition position) { uint64_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
// Atomic operations need bounds checks until the backend can emit protected // Atomic operations need bounds checks until the backend can emit protected
// loads. // loads.
index = BoundsCheckResult bounds_check_result;
BoundsCheckMem(access_size, index, offset, position, kNeedsBoundsCheck) std::tie(index, bounds_check_result) =
.first; BoundsCheckMem(access_size, index, offset, position, enforce_check);
const uintptr_t align_mask = access_size - 1; const uintptr_t align_mask = access_size - 1;
...@@ -3356,7 +3358,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment( ...@@ -3356,7 +3358,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
// statically known to be unaligned; trap. // statically known to be unaligned; trap.
TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position); TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position);
} }
return index; return {index, bounds_check_result};
} }
// Unlike regular memory accesses, atomic memory accesses should trap if // Unlike regular memory accesses, atomic memory accesses should trap if
...@@ -3368,7 +3370,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment( ...@@ -3368,7 +3370,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask)); gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask));
TrapIfFalse(wasm::kTrapUnalignedAccess, TrapIfFalse(wasm::kTrapUnalignedAccess,
gasm_->Word32Equal(cond, Int32Constant(0)), position); gasm_->Word32Equal(cond, Int32Constant(0)), position);
return index; return {index, bounds_check_result};
} }
// Insert code to bounds check a memory access if necessary. Return the // Insert code to bounds check a memory access if necessary. Return the
...@@ -4750,6 +4752,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4750,6 +4752,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr; const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr; const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
const wasm::ValueType wasm_type; const wasm::ValueType wasm_type;
const EnforceBoundsCheck enforce_bounds_check =
EnforceBoundsCheck::kNeedsBoundsCheck;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o) constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {} : type(t), machine_type(m), operator_by_type(o) {}
...@@ -4760,13 +4764,15 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4760,13 +4764,15 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
: type(t), : type(t),
machine_type(m), machine_type(m),
operator_by_atomic_load_params(o), operator_by_atomic_load_params(o),
wasm_type(v) {} wasm_type(v),
enforce_bounds_check(EnforceBoundsCheck::kCanOmitBoundsCheck) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o, constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o,
wasm::ValueType v) wasm::ValueType v)
: type(t), : type(t),
machine_type(m), machine_type(m),
operator_by_atomic_store_rep(o), operator_by_atomic_store_rep(o),
wasm_type(v) {} wasm_type(v),
enforce_bounds_check(EnforceBoundsCheck::kCanOmitBoundsCheck) {}
// Constexpr, hence just a table lookup in most compilers. // Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) { static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
...@@ -4888,8 +4894,16 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4888,8 +4894,16 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
AtomicOpInfo info = AtomicOpInfo::Get(opcode); AtomicOpInfo info = AtomicOpInfo::Get(opcode);
Node* index = CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0], Node* index;
offset, position); BoundsCheckResult bounds_check_result;
std::tie(index, bounds_check_result) =
CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0], offset,
position, info.enforce_bounds_check);
// MemoryAccessKind::kUnalligned is impossible due to explicit aligment check.
MemoryAccessKind access_kind =
bounds_check_result == WasmGraphBuilder::kTrapHandler
? MemoryAccessKind::kProtected
: MemoryAccessKind::kNormal;
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}. // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset); uintptr_t capped_offset = static_cast<uintptr_t>(offset);
...@@ -4902,12 +4916,14 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4902,12 +4916,14 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
info.machine_type.representation()); info.machine_type.representation());
} else if (info.operator_by_atomic_load_params) { } else if (info.operator_by_atomic_load_params) {
op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)( op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst)); AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst,
access_kind));
} else { } else {
op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)( op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
AtomicStoreParameters(info.machine_type.representation(), AtomicStoreParameters(info.machine_type.representation(),
WriteBarrierKind::kNoWriteBarrier, WriteBarrierKind::kNoWriteBarrier,
AtomicMemoryOrder::kSeqCst)); AtomicMemoryOrder::kSeqCst,
access_kind));
} }
Node* input_nodes[6] = {MemBuffer(capped_offset), index}; Node* input_nodes[6] = {MemBuffer(capped_offset), index};
...@@ -4928,6 +4944,10 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4928,6 +4944,10 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* result = gasm_->AddNode( Node* result = gasm_->AddNode(
graph()->NewNode(op, num_actual_inputs + 4, input_nodes)); graph()->NewNode(op, num_actual_inputs + 4, input_nodes));
if (access_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(result, position);
}
#ifdef V8_TARGET_BIG_ENDIAN #ifdef V8_TARGET_BIG_ENDIAN
// Reverse the value bytes after load. // Reverse the value bytes after load.
if (info.operator_by_atomic_load_params) { if (info.operator_by_atomic_load_params) {
......
...@@ -616,8 +616,9 @@ class WasmGraphBuilder { ...@@ -616,8 +616,9 @@ class WasmGraphBuilder {
wasm::WasmCodePosition, wasm::WasmCodePosition,
EnforceBoundsCheck); EnforceBoundsCheck);
Node* CheckBoundsAndAlignment(int8_t access_size, Node* index, std::pair<Node*, BoundsCheckResult> CheckBoundsAndAlignment(
uint64_t offset, wasm::WasmCodePosition); int8_t access_size, Node* index, uint64_t offset, wasm::WasmCodePosition,
EnforceBoundsCheck);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type); const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type); const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
......
...@@ -2401,6 +2401,8 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { ...@@ -2401,6 +2401,8 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
unsigned access_size = 1 << instr->LoadStoreXSizeLog2(); unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset); uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
DCHECK_EQ(address % access_size, 0); DCHECK_EQ(address % access_size, 0);
// First, check whether the memory is accessible (for wasm trap handling).
if (!ProbeMemory(address, access_size)) return;
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (is_load != 0) { if (is_load != 0) {
if (is_exclusive) { if (is_exclusive) {
......
...@@ -368,6 +368,50 @@ WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) { ...@@ -368,6 +368,50 @@ WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
CHECK_EQ(1, r.Call()); CHECK_EQ(1, r.Call());
} }
WASM_EXEC_TEST(I32AtomicLoad_trap) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_I32V_3(kWasmPageSize),
MachineRepresentation::kWord32));
CHECK_TRAP(r.Call());
}
WASM_EXEC_TEST(I64AtomicLoad_trap) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_I32V_3(kWasmPageSize),
MachineRepresentation::kWord64));
CHECK_TRAP64(r.Call());
}
WASM_EXEC_TEST(I32AtomicStore_trap) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
BUILD(r,
WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_I32V_3(kWasmPageSize),
WASM_ZERO, MachineRepresentation::kWord32),
WASM_ZERO);
CHECK_TRAP(r.Call());
}
WASM_EXEC_TEST(I64AtomicStore_trap) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
BUILD(r,
WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_I32V_3(kWasmPageSize),
WASM_ZERO64, MachineRepresentation::kWord64),
WASM_ZERO);
CHECK_TRAP(r.Call());
}
} // namespace test_run_wasm_atomics } // namespace test_run_wasm_atomics
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
......
...@@ -97,7 +97,13 @@ function VerifyBoundsCheck(func, memtype_size) { ...@@ -97,7 +97,13 @@ function VerifyBoundsCheck(func, memtype_size) {
// Test out of bounds at boundary // Test out of bounds at boundary
for (let i = memory.buffer.byteLength - memtype_size + 1; for (let i = memory.buffer.byteLength - memtype_size + 1;
i < memory.buffer.byteLength + memtype_size + 4; i++) { i < memory.buffer.byteLength + memtype_size + 4; i++) {
assertTraps(kTrapMemOutOfBounds, () => func(i, 5, 10)); assertTrapsOneOf(
// If an underlying platform uses traps for a bounds check,
// kTrapUnalignedAccess will be thrown before kTrapMemOutOfBounds.
// Otherwise, kTrapMemOutOfBounds will be first.
[kTrapMemOutOfBounds, kTrapUnalignedAccess],
() => func(i, 5, 10)
);
} }
// Test out of bounds at maximum + 1 // Test out of bounds at maximum + 1
assertTraps(kTrapMemOutOfBounds, () => func((maxSize + 1) * kPageSize, 5, 1)); assertTraps(kTrapMemOutOfBounds, () => func((maxSize + 1) * kPageSize, 5, 1));
......
...@@ -343,7 +343,10 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString(); ...@@ -343,7 +343,10 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
assertEquals(0xACED, instance.exports.atomic_load(0)); assertEquals(0xACED, instance.exports.atomic_load(0));
assertEquals(0xACED, instance.exports.atomic_load(5 * kPageSize - 4)); assertEquals(0xACED, instance.exports.atomic_load(5 * kPageSize - 4));
// Verify bounds. // Verify bounds.
assertTraps(kTrapMemOutOfBounds, // If an underlying platform uses traps for a bounds check,
// kTrapUnalignedAccess will be thrown before kTrapMemOutOfBounds.
// Otherwise, kTrapMemOutOfBounds will be first.
assertTrapsOneOf([kTrapMemOutOfBounds, kTrapUnalignedAccess],
() => instance.exports.atomic_load(5 * kPageSize - 3)); () => instance.exports.atomic_load(5 * kPageSize - 3));
let obj = {memory: memory, module: module}; let obj = {memory: memory, module: module};
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize); assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
...@@ -358,11 +361,11 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString(); ...@@ -358,11 +361,11 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4)); assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4)); assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4)); assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4));
assertTraps(kTrapMemOutOfBounds, assertTrapsOneOf([kTrapMemOutOfBounds, kTrapUnalignedAccess],
() => instance.exports.atomic_load(19 * kPageSize - 3)); () => instance.exports.atomic_load(19 * kPageSize - 3));
assertEquals(19, memory.grow(6)); assertEquals(19, memory.grow(6));
assertEquals(obj.memory.buffer.byteLength, 25 * kPageSize); assertEquals(obj.memory.buffer.byteLength, 25 * kPageSize);
assertTraps(kTrapMemOutOfBounds, assertTrapsOneOf([kTrapMemOutOfBounds, kTrapUnalignedAccess],
() => instance.exports.atomic_load(25 * kPageSize - 3)); () => instance.exports.atomic_load(25 * kPageSize - 3));
})(); })();
......
...@@ -934,6 +934,13 @@ function assertTraps(trap, code) { ...@@ -934,6 +934,13 @@ function assertTraps(trap, code) {
assertThrows(code, WebAssembly.RuntimeError, kTrapMsgs[trap]); assertThrows(code, WebAssembly.RuntimeError, kTrapMsgs[trap]);
} }
function assertTrapsOneOf(traps, code) {
const errorChecker = new RegExp(
'(' + traps.map(trap => kTrapMsgs[trap]).join('|') + ')'
);
assertThrows(code, WebAssembly.RuntimeError,errorChecker);
}
class Binary { class Binary {
constructor() { constructor() {
this.length = 0; this.length = 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment