Commit 7dcfe1a7 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Additional masking to memory accesses

Add additional protection against OOB accesses by masking the
index to access by a mask precomputed from the memory size.

R=clemensh@chromium.org, bradnelson@chromium.org

Change-Id: I1d5875121e1904074b115a2c88ca773b6c1c1a66
Reviewed-on: https://chromium-review.googlesource.com/830394Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50162}
parent ce77c8cb
......@@ -3228,6 +3228,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
DCHECK_NOT_NULL(wasm_context_);
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
// Load the memory start.
Node* mem_start = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
......@@ -3235,7 +3236,6 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
static_cast<int32_t>(offsetof(WasmContext, mem_start))),
*effect_, *control_);
*effect_ = mem_start;
context_cache->mem_start = mem_start;
// Load the memory size.
......@@ -3245,34 +3245,44 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
static_cast<int32_t>(offsetof(WasmContext, mem_size))),
*effect_, *control_);
*effect_ = mem_size;
if (jsgraph()->machine()->Is64()) {
mem_size = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
mem_size);
}
context_cache->mem_size = mem_size;
// Load the memory mask.
Node* mem_mask = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_mask))),
*effect_, *control_);
*effect_ = mem_mask;
context_cache->mem_mask = mem_mask;
}
void WasmGraphBuilder::PrepareContextCacheForLoop(
WasmContextCacheNodes* context_cache, Node* control) {
// Introduce phis for mem_size and mem_start.
context_cache->mem_size =
Phi(MachineRepresentation::kWord32, 1, &context_cache->mem_size, control);
context_cache->mem_start = Phi(MachineType::PointerRepresentation(), 1,
&context_cache->mem_start, control);
#define INTRODUCE_PHI(field, rep) \
context_cache->field = Phi(rep, 1, &context_cache->field, control);
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
#undef INTRODUCE_PHI
}
void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to,
WasmContextCacheNodes* from,
Node* merge) {
if (to->mem_size != from->mem_size) {
Node* vals[] = {to->mem_size, from->mem_size};
to->mem_size = Phi(MachineRepresentation::kWord32, 2, vals, merge);
}
if (to->mem_start != from->mem_start) {
Node* vals[] = {to->mem_start, from->mem_start};
to->mem_start = Phi(MachineType::PointerRepresentation(), 2, vals, merge);
#define INTRODUCE_PHI(field, rep) \
if (to->field != from->field) { \
Node* vals[] = {to->field, from->field}; \
to->field = Phi(rep, 2, vals, merge); \
}
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
#undef INTRODUCE_PHI
}
void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to,
......@@ -3282,6 +3292,8 @@ void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to,
to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
to->mem_mask = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
to->mem_mask, from->mem_mask);
}
Node* WasmGraphBuilder::CreateOrMergeIntoPhi(wasm::ValueType type, Node* merge,
......@@ -3483,13 +3495,24 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
return node;
}
void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position) {
if (FLAG_wasm_no_bounds_checks) return;
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
if (FLAG_wasm_no_bounds_checks) return index;
DCHECK_NOT_NULL(context_cache_);
Node* mem_size = context_cache_->mem_size;
Node* mem_mask = context_cache_->mem_mask;
DCHECK_NOT_NULL(mem_size);
DCHECK_NOT_NULL(mem_mask);
auto m = jsgraph()->machine();
if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
// Simply zero out the 32-bits on 64-bit targets and let the trap handler
// do its job.
return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
: index;
}
uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize;
uint32_t max_size =
......@@ -3501,7 +3524,7 @@ void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
position);
return;
return jsgraph()->IntPtrConstant(0);
}
uint32_t end_offset = offset + access_size;
......@@ -3522,17 +3545,21 @@ void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
} else {
// The end offset is within the bounds of the smallest memory, so only
// one check is required. Check to see if the index is also a constant.
UintPtrMatcher m(index);
if (m.HasValue()) {
uint64_t index_val = m.Value();
UintPtrMatcher match(index);
if (match.HasValue()) {
uint64_t index_val = match.Value();
if ((index_val + offset + access_size) <= min_size) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
return;
return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
: index;
}
}
}
// Compute the effective size of the memory, which is the size of the memory
// minus the statically known offset, minus the byte size of the access minus
// one.
Node* effective_size;
if (jsgraph()->machine()->Is32()) {
effective_size =
......@@ -3544,12 +3571,14 @@ void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
jsgraph()->Int64Constant(static_cast<int64_t>(end_offset - 1)));
}
const Operator* less = jsgraph()->machine()->Is32()
? jsgraph()->machine()->Uint32LessThan()
: jsgraph()->machine()->Uint64LessThan();
Node* cond = graph()->NewNode(less, index, effective_size);
// Introduce the actual bounds check.
Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
// In the fallthrough case, condition the index with the memory mask.
Node* masked_index = graph()->NewNode(m->Word32And(), index, mem_mask);
return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), masked_index)
: masked_index;
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
......@@ -3602,15 +3631,10 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
wasm::WasmCodePosition position) {
Node* load;
if (jsgraph()->machine()->Is64()) {
index =
graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
}
// Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!use_trap_handler()) {
BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset,
position);
}
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset,
position, kCanOmitBoundsCheck);
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
......@@ -3662,15 +3686,8 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
wasm::ValueType type) {
Node* store;
if (jsgraph()->machine()->Is64()) {
index =
graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
}
// Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!use_trap_handler()) {
BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset,
position);
}
index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset,
position, kCanOmitBoundsCheck);
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, mem_rep, type);
......@@ -4235,11 +4252,12 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
switch (opcode) {
#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
case wasm::kExpr##Name: { \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \
offset, position); \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \
MemBuffer(offset), index, inputs[1], *effect_, *control_); \
break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
......@@ -4247,12 +4265,12 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
case wasm::kExpr##Name: { \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \
offset, position); \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), inputs[0], inputs[1], inputs[2], *effect_, \
*control_); \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
break; \
}
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
......@@ -4260,11 +4278,12 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
case wasm::kExpr##Name: { \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \
offset, position); \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
MemBuffer(offset), inputs[0], *effect_, *control_); \
MemBuffer(offset), index, *effect_, *control_); \
break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
......@@ -4272,11 +4291,12 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
case wasm::kExpr##Name: { \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \
offset, position); \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \
MemBuffer(offset), index, inputs[1], *effect_, *control_); \
break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
......
......@@ -218,6 +218,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
struct WasmContextCacheNodes {
Node* mem_start;
Node* mem_size;
Node* mem_mask;
};
// Abstracts details of building TurboFan graph nodes for wasm to separate
......@@ -225,6 +226,8 @@ struct WasmContextCacheNodes {
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr,
......@@ -370,8 +373,6 @@ class WasmGraphBuilder {
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
Node* LoadMemSize();
Node* LoadMemStart();
void GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset,
Node** base_node, Node** offset_node);
......@@ -458,8 +459,9 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
......
......@@ -1478,9 +1478,15 @@ class ThreadImpl {
}
template <typename mtype>
inline bool BoundsCheck(uint32_t mem_size, uint32_t offset, uint32_t index) {
return sizeof(mtype) <= mem_size && offset <= mem_size - sizeof(mtype) &&
index <= mem_size - sizeof(mtype) - offset;
inline byte* BoundsCheckMem(uint32_t offset, uint32_t index) {
uint32_t mem_size = wasm_context_->mem_size;
if (sizeof(mtype) > mem_size) return nullptr;
if (offset > (mem_size - sizeof(mtype))) return nullptr;
if (index > (mem_size - sizeof(mtype) - offset)) return nullptr;
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
return wasm_context_->mem_start + offset +
(index & wasm_context_->mem_mask);
}
template <typename ctype, typename mtype>
......@@ -1489,11 +1495,11 @@ class ThreadImpl {
MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc),
sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(wasm_context_->mem_size, operand.offset, index)) {
byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
byte* addr = wasm_context_->mem_start + operand.offset + index;
WasmValue result(
converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
......@@ -1518,11 +1524,11 @@ class ThreadImpl {
ctype val = Pop().to<ctype>();
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(wasm_context_->mem_size, operand.offset, index)) {
byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
byte* addr = wasm_context_->mem_start + operand.offset + index;
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
len = 1 + operand.length;
......@@ -1544,11 +1550,11 @@ class ThreadImpl {
sizeof(type));
if (val) *val = Pop().to<uint32_t>();
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<type>(wasm_context_->mem_size, operand.offset, index)) {
address = BoundsCheckMem<type>(operand.offset, index);
if (!address) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
address = wasm_context_->mem_start + operand.offset + index;
len = 2 + operand.length;
return true;
}
......@@ -2022,10 +2028,10 @@ class ThreadImpl {
case kExpr##name: { \
uint32_t index = Pop().to<uint32_t>(); \
ctype result; \
if (!BoundsCheck<mtype>(wasm_context_->mem_size, 0, index)) { \
byte* addr = BoundsCheckMem<mtype>(0, index); \
if (!addr) { \
result = defval; \
} else { \
byte* addr = wasm_context_->mem_start + index; \
/* TODO(titzer): alignment for asmjs load mem? */ \
result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
} \
......@@ -2047,9 +2053,8 @@ class ThreadImpl {
case kExpr##name: { \
WasmValue val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
if (BoundsCheck<mtype>(wasm_context_->mem_size, 0, index)) { \
byte* addr = wasm_context_->mem_start + index; \
/* TODO(titzer): alignment for asmjs store mem? */ \
byte* addr = BoundsCheckMem<mtype>(0, index); \
if (addr) { \
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
} \
Push(val); \
......
......@@ -5,6 +5,7 @@
#ifndef V8_WASM_OBJECTS_H_
#define V8_WASM_OBJECTS_H_
#include "src/base/bits.h"
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
#include "src/managed.h"
......@@ -63,6 +64,7 @@ class WasmInstanceObject;
struct WasmContext {
byte* mem_start = nullptr;
uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
byte* globals_start = nullptr;
inline void SetRawMemory(void* mem_start, size_t mem_size) {
......@@ -70,6 +72,8 @@ struct WasmContext {
wasm::kV8MaxWasmMemoryPages * wasm::kSpecMaxWasmMemoryPages);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
DCHECK_LE(mem_size, this->mem_mask + 1);
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment