Commit 4ab4bbe9 authored by eholk's avatar eholk Committed by Commit bot

[wasm] Add ProtectedStore instruction

This is necessary for signal-based out of bounds handling in WebAssembly.

Adds a ProtectedStore instruction that is analogous to the previously added
ProtectedLoad instruction. Rather than using bounds checks, ProtectedStore emits
an out of line section of code that throws a JavaScript exception and provides
the necessary metadata for a signal handler to be able to find the out of line
code.

BUG= https://bugs.chromium.org/p/v8/issues/detail?id=5277

Review-Url: https://codereview.chromium.org/2516413003
Cr-Commit-Position: refs/heads/master@{#41398}
parent 9e3feeff
......@@ -501,6 +501,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
void InstructionSelector::VisitUnalignedLoad(Node* node) {
UnalignedLoadRepresentation load_rep =
UnalignedLoadRepresentationOf(node->op());
......
......@@ -708,6 +708,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
......
......@@ -351,6 +351,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
......
......@@ -1033,6 +1033,8 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
return VisitProtectedStore(node);
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
......
......@@ -43,7 +43,8 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStore, op->opcode());
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
......@@ -510,9 +511,9 @@ struct MachineOperatorGlobalCache {
"CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct ProtectedLoad##Type##Operator final \
: public Operator1<ProtectedLoadRepresentation> { \
: public Operator1<LoadRepresentation> { \
ProtectedLoad##Type##Operator() \
: Operator1<ProtectedLoadRepresentation>( \
: Operator1<LoadRepresentation>( \
IrOpcode::kProtectedLoad, \
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {} \
......@@ -585,13 +586,24 @@ struct MachineOperatorGlobalCache {
"CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
} \
}; \
struct ProtectedStore##Type##Operator \
: public Operator1<StoreRepresentation> { \
explicit ProtectedStore##Type##Operator() \
: Operator1<StoreRepresentation>( \
IrOpcode::kProtectedStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Store", 5, 1, 1, 0, 1, 0, \
StoreRepresentation(MachineRepresentation::Type, \
kNoWriteBarrier)) {} \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
CheckedStore##Type##Operator kCheckedStore##Type;
CheckedStore##Type##Operator kCheckedStore##Type; \
ProtectedStore##Type##Operator kProtectedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
......@@ -762,6 +774,23 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
return nullptr;
}
const Operator* MachineOperatorBuilder::ProtectedStore(
MachineRepresentation rep) {
switch (rep) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \
return &cache_.kProtectedStore##kRep; \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
return nullptr;
}
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
return &cache_.kUnsafePointerAdd;
}
......
......@@ -43,7 +43,6 @@ class OptionalOperator final {
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
typedef LoadRepresentation ProtectedLoadRepresentation;
LoadRepresentation LoadRepresentationOf(Operator const*);
......@@ -615,6 +614,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
const Operator* ProtectedStore(MachineRepresentation rep);
// unaligned load [base + index]
const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
......
......@@ -368,6 +368,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
......
......@@ -464,6 +464,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32And(Node* node) {
Mips64OperandGenerator g(this);
......
......@@ -528,6 +528,7 @@
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
V(AtomicLoad) \
V(AtomicStore) \
V(UnsafePointerAdd)
......
......@@ -339,6 +339,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
......
......@@ -440,6 +440,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
......
......@@ -1163,6 +1163,7 @@ void Verifier::Visitor::Check(Node* node) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
case IrOpcode::kStackSlot:
case IrOpcode::kWord32And:
......
......@@ -100,6 +100,13 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
} // namespace
// TODO(eholk): Support trap handlers on other platforms.
#if V8_TARGET_ARCH_X64 && V8_OS_LINUX
const bool kTrapHandlerSupported = true;
#else
const bool kTrapHandlerSupported = false;
#endif
// A helper that handles building graph fragments for trapping.
// To avoid generating a ton of redundant code that just calls the runtime
// to trap, we generate a per-trap-reason block of code that all trap sites
......@@ -2948,7 +2955,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* load;
// WASM semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler) {
if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
BoundsCheckMem(memtype, index, offset, position);
}
bool aligned = static_cast<int>(alignment) >=
......@@ -2956,7 +2963,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
if (aligned ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
if (FLAG_wasm_trap_handler) {
if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
DCHECK(FLAG_wasm_guard_pages);
Node* context = HeapConstant(module_->instance->context);
Node* position_node = jsgraph()->Int32Constant(position);
......@@ -2968,7 +2975,8 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
MemBuffer(offset), index, *effect_, *control_);
}
} else {
DCHECK(!FLAG_wasm_trap_handler);
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
......@@ -3002,7 +3010,9 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
Node* store;
// WASM semantics throw on OOB. Introduce explicit bounds check.
BoundsCheckMem(memtype, index, offset, position);
if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
BoundsCheckMem(memtype, index, offset, position);
}
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
bool aligned = static_cast<int>(alignment) >=
......@@ -3014,11 +3024,22 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
if (aligned ||
jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
Node* context = HeapConstant(module_->instance->context);
Node* position_node = jsgraph()->Int32Constant(position);
store = graph()->NewNode(
jsgraph()->machine()->ProtectedStore(memtype.representation()),
MemBuffer(offset), index, val, context, position_node, *effect_,
*control_);
} else {
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
UnalignedStoreRepresentation rep(memtype.representation());
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
......
......@@ -302,6 +302,17 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
int32_t position_;
};
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, X64OperandConverter& i,
Address pc) {
X64MemoryProtection protection =
static_cast<X64MemoryProtection>(MiscField::decode(opcode));
if (protection == X64MemoryProtection::kProtected) {
bool frame_elided = !codegen->frame_access_state()->has_frame();
new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, i.InputRegister(2),
i.InputInt32(3));
}
}
} // namespace
......@@ -1839,17 +1850,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
ASSEMBLE_MOVX(movzxbl);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxbq:
ASSEMBLE_MOVX(movsxbq);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
case kX64Movzxbq:
ASSEMBLE_MOVX(movzxbq);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
......@@ -1860,21 +1875,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movb(operand, i.InputRegister(index));
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
}
case kX64Movsxwl:
ASSEMBLE_MOVX(movsxwl);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
ASSEMBLE_MOVX(movzxwl);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxwq:
ASSEMBLE_MOVX(movsxwq);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
case kX64Movzxwq:
ASSEMBLE_MOVX(movzxwq);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
......@@ -1885,10 +1905,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movw(operand, i.InputRegister(index));
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
}
case kX64Movl:
case kX64TrapMovl:
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
if (instr->InputAt(0)->IsRegister()) {
......@@ -1897,15 +1917,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(i.OutputRegister(), i.InputOperand(0));
}
} else {
Address pc = __ pc();
__ movl(i.OutputRegister(), i.MemoryOperand());
if (arch_opcode == kX64TrapMovl) {
bool frame_elided = !frame_access_state()->has_frame();
new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
i.InputRegister(2), i.InputInt32(3));
}
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
__ AssertZeroExtended(i.OutputRegister());
} else {
size_t index = 0;
......@@ -1915,10 +1929,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movl(operand, i.InputRegister(index));
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
}
break;
case kX64Movsxlq:
ASSEMBLE_MOVX(movsxlq);
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
case kX64Movq:
if (instr->HasOutput()) {
......@@ -1932,6 +1948,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(operand, i.InputRegister(index));
}
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
case kX64Movss:
if (instr->HasOutput()) {
......@@ -1941,6 +1958,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
__ movss(operand, i.InputDoubleRegister(index));
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
case kX64Movsd:
if (instr->HasOutput()) {
......@@ -1950,6 +1968,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
__ Movsd(operand, i.InputDoubleRegister(index));
}
EmitOOLTrapIfNeeded(zone(), this, opcode, i, __ pc());
break;
case kX64BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
......
......@@ -128,7 +128,6 @@ namespace compiler {
V(X64Movzxwq) \
V(X64Movw) \
V(X64Movl) \
V(X64TrapMovl) \
V(X64Movsxlq) \
V(X64Movq) \
V(X64Movsd) \
......@@ -183,6 +182,8 @@ namespace compiler {
V(M8I) /* [ %r2*8 + K] */ \
V(Root) /* [%root + K] */
enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
} // namespace compiler
} // namespace internal
} // namespace v8
......
......@@ -155,7 +155,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
case kX64Movl:
case kX64TrapMovl:
if (instr->HasOutput()) {
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
......
......@@ -213,6 +213,39 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
return opcode;
}
ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
case MachineRepresentation::kFloat32:
return kX64Movss;
break;
case MachineRepresentation::kFloat64:
return kX64Movsd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
return kX64Movb;
break;
case MachineRepresentation::kWord16:
return kX64Movw;
break;
case MachineRepresentation::kWord32:
return kX64Movl;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
return kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return kArchNop;
}
UNREACHABLE();
return kArchNop;
}
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
......@@ -245,7 +278,8 @@ void InstructionSelector::VisitProtectedLoad(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
// Add the source position as an input
inputs[input_count++] = g.UseImmediate(node->InputAt(3));
InstructionCode code = opcode | AddressingModeField::encode(mode);
InstructionCode code = opcode | AddressingModeField::encode(mode) |
MiscField::encode(X64MemoryProtection::kProtected);
Emit(code, 1, outputs, input_count, inputs);
}
......@@ -257,10 +291,9 @@ void InstructionSelector::VisitStore(Node* node) {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(rep));
DCHECK(CanBeTaggedPointer(store_rep.representation()));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
......@@ -295,35 +328,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kX64Movss;
break;
case MachineRepresentation::kFloat64:
opcode = kX64Movsd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = kX64Movb;
break;
case MachineRepresentation::kWord16:
opcode = kX64Movw;
break;
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode =
......@@ -338,6 +343,29 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
Node* context = node->InputAt(3);
Node* position = node->InputAt(4);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionOperand inputs[6];
size_t input_count = 0;
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
MiscField::encode(X64MemoryProtection::kProtected);
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
inputs[input_count++] = g.UseRegister(context);
inputs[input_count++] = g.UseImmediate(position);
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
......
......@@ -312,6 +312,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment