Commit 7dcfe1a7 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Additional masking to memory accesses

Add additional protection against OOB accesses by masking the
index to access by a mask precomputed from the memory size.

R=clemensh@chromium.org, bradnelson@chromium.org

Change-Id: I1d5875121e1904074b115a2c88ca773b6c1c1a66
Reviewed-on: https://chromium-review.googlesource.com/830394Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50162}
parent ce77c8cb
This diff is collapsed.
......@@ -218,6 +218,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
struct WasmContextCacheNodes {
Node* mem_start;
Node* mem_size;
Node* mem_mask;
};
// Abstracts details of building TurboFan graph nodes for wasm to separate
......@@ -225,6 +226,8 @@ struct WasmContextCacheNodes {
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr,
......@@ -370,8 +373,6 @@ class WasmGraphBuilder {
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
Node* LoadMemSize();
Node* LoadMemStart();
void GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset,
Node** base_node, Node** offset_node);
......@@ -458,8 +459,9 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
......
......@@ -1478,9 +1478,15 @@ class ThreadImpl {
}
template <typename mtype>
inline bool BoundsCheck(uint32_t mem_size, uint32_t offset, uint32_t index) {
return sizeof(mtype) <= mem_size && offset <= mem_size - sizeof(mtype) &&
index <= mem_size - sizeof(mtype) - offset;
inline byte* BoundsCheckMem(uint32_t offset, uint32_t index) {
uint32_t mem_size = wasm_context_->mem_size;
if (sizeof(mtype) > mem_size) return nullptr;
if (offset > (mem_size - sizeof(mtype))) return nullptr;
if (index > (mem_size - sizeof(mtype) - offset)) return nullptr;
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
return wasm_context_->mem_start + offset +
(index & wasm_context_->mem_mask);
}
template <typename ctype, typename mtype>
......@@ -1489,11 +1495,11 @@ class ThreadImpl {
MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc),
sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(wasm_context_->mem_size, operand.offset, index)) {
byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
byte* addr = wasm_context_->mem_start + operand.offset + index;
WasmValue result(
converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
......@@ -1518,11 +1524,11 @@ class ThreadImpl {
ctype val = Pop().to<ctype>();
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(wasm_context_->mem_size, operand.offset, index)) {
byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
byte* addr = wasm_context_->mem_start + operand.offset + index;
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
len = 1 + operand.length;
......@@ -1544,11 +1550,11 @@ class ThreadImpl {
sizeof(type));
if (val) *val = Pop().to<uint32_t>();
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<type>(wasm_context_->mem_size, operand.offset, index)) {
address = BoundsCheckMem<type>(operand.offset, index);
if (!address) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
address = wasm_context_->mem_start + operand.offset + index;
len = 2 + operand.length;
return true;
}
......@@ -2022,10 +2028,10 @@ class ThreadImpl {
case kExpr##name: { \
uint32_t index = Pop().to<uint32_t>(); \
ctype result; \
if (!BoundsCheck<mtype>(wasm_context_->mem_size, 0, index)) { \
byte* addr = BoundsCheckMem<mtype>(0, index); \
if (!addr) { \
result = defval; \
} else { \
byte* addr = wasm_context_->mem_start + index; \
/* TODO(titzer): alignment for asmjs load mem? */ \
result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
} \
......@@ -2047,9 +2053,8 @@ class ThreadImpl {
case kExpr##name: { \
WasmValue val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
if (BoundsCheck<mtype>(wasm_context_->mem_size, 0, index)) { \
byte* addr = wasm_context_->mem_start + index; \
/* TODO(titzer): alignment for asmjs store mem? */ \
byte* addr = BoundsCheckMem<mtype>(0, index); \
if (addr) { \
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
} \
Push(val); \
......
......@@ -5,6 +5,7 @@
#ifndef V8_WASM_OBJECTS_H_
#define V8_WASM_OBJECTS_H_
#include "src/base/bits.h"
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
#include "src/managed.h"
......@@ -63,6 +64,7 @@ class WasmInstanceObject;
struct WasmContext {
byte* mem_start = nullptr;
uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
byte* globals_start = nullptr;
inline void SetRawMemory(void* mem_start, size_t mem_size) {
......@@ -70,6 +72,8 @@ struct WasmContext {
wasm::kV8MaxWasmMemoryPages * wasm::kSpecMaxWasmMemoryPages);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
DCHECK_LE(mem_size, this->mem_mask + 1);
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment