Commit 798c51cd authored by Manos Koukoutos's avatar Manos Koukoutos Committed by V8 LUCI CQ

[turboshaft] Split out ProtectedLoad and ProtectedStore

Those operators are not eliminable and need different properties than
the rest of loads/stores.

Bug: v8:12783
Change-Id: I7cd478fa827589612ca5d7628c628c09f3f4a3a8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3909361
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83437}
parent 39985773
......@@ -590,7 +590,6 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
case IrOpcode::kUnalignedLoad: {
MemoryRepresentation loaded_rep =
MemoryRepresentation::FromMachineType(LoadRepresentationOf(op));
......@@ -598,10 +597,9 @@ OpIndex GraphBuilder::Process(
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
// It's ok to merge LoadImmutable into Load after scheduling.
LoadOp::Kind kind =
opcode == IrOpcode::kUnalignedLoad ? LoadOp::Kind::kRawUnaligned
: opcode == IrOpcode::kProtectedLoad ? LoadOp::Kind::kProtected
: LoadOp::Kind::kRawAligned;
LoadOp::Kind kind = opcode == IrOpcode::kUnalignedLoad
? LoadOp::Kind::kRawUnaligned
: LoadOp::Kind::kRawAligned;
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
return assembler.Load(Map(base), kind, loaded_rep, result_rep, offset);
......@@ -618,19 +616,25 @@ OpIndex GraphBuilder::Process(
return assembler.IndexedLoad(Map(base), Map(index), kind, loaded_rep,
result_rep, offset, element_size_log2);
}
case IrOpcode::kProtectedLoad: {
MemoryRepresentation loaded_rep =
MemoryRepresentation::FromMachineType(LoadRepresentationOf(op));
RegisterRepresentation result_rep = loaded_rep.ToRegisterRepresentation();
return assembler.ProtectedLoad(
Map(node->InputAt(0)), Map(node->InputAt(1)), loaded_rep, result_rep);
}
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
case IrOpcode::kProtectedStore: {
case IrOpcode::kUnalignedStore: {
bool aligned = opcode != IrOpcode::kUnalignedStore;
StoreRepresentation store_rep =
aligned ? StoreRepresentationOf(op)
: StoreRepresentation(UnalignedStoreRepresentationOf(op),
WriteBarrierKind::kNoWriteBarrier);
StoreOp::Kind kind =
opcode == IrOpcode::kStore ? StoreOp::Kind::kRawAligned
: opcode == IrOpcode::kUnalignedStore ? StoreOp::Kind::kRawAligned
: StoreOp::Kind::kProtected;
StoreOp::Kind kind = opcode == IrOpcode::kStore
? StoreOp::Kind::kRawAligned
: StoreOp::Kind::kRawUnaligned;
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
......@@ -659,6 +663,12 @@ OpIndex GraphBuilder::Process(
store_rep.representation()),
store_rep.write_barrier_kind(), offset, element_size_log2);
}
case IrOpcode::kProtectedStore: {
return assembler.ProtectedStore(
Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
MemoryRepresentation::FromMachineRepresentation(
OpParameter<MachineRepresentation>(node->op())));
}
case IrOpcode::kRetain:
return assembler.Retain(Map(node->InputAt(0)));
......
......@@ -77,8 +77,10 @@ class Graph;
V(Constant) \
V(Load) \
V(IndexedLoad) \
V(ProtectedLoad) \
V(Store) \
V(IndexedStore) \
V(ProtectedStore) \
V(Retain) \
V(Parameter) \
V(OsrValue) \
......@@ -264,6 +266,12 @@ struct OpProperties {
static constexpr OpProperties BlockTerminatorWithAnySideEffect() {
return {true, true, true, true};
}
static constexpr OpProperties ReadingAndCanAbort() {
return {true, false, true, false};
}
static constexpr OpProperties WritingAndCanAbort() {
return {false, true, true, false};
}
bool operator==(const OpProperties& other) const {
return can_read == other.can_read && can_write == other.can_write &&
can_abort == other.can_abort &&
......@@ -1265,12 +1273,7 @@ struct ConstantOp : FixedArityOperationT<0, ConstantOp> {
// When result_rep is RegisterRepresentation::Compressed(), then the load does
// not decompress the value.
struct LoadOp : FixedArityOperationT<1, LoadOp> {
enum class Kind : uint8_t {
kTaggedBase,
kRawAligned,
kRawUnaligned,
kProtected
};
enum class Kind : uint8_t { kTaggedBase, kRawAligned, kRawUnaligned };
Kind kind;
MemoryRepresentation loaded_rep;
RegisterRepresentation result_rep;
......@@ -1303,7 +1306,6 @@ inline bool IsAlignedAccess(LoadOp::Kind kind) {
case LoadOp::Kind::kRawAligned:
return true;
case LoadOp::Kind::kRawUnaligned:
case LoadOp::Kind::kProtected:
return false;
}
}
......@@ -1347,6 +1349,28 @@ struct IndexedLoadOp : FixedArityOperationT<2, IndexedLoadOp> {
}
};
// A protected load registers a trap handler which handles out-of-bounds memory
// accesses.
struct ProtectedLoadOp : FixedArityOperationT<2, ProtectedLoadOp> {
MemoryRepresentation loaded_rep;
RegisterRepresentation result_rep;
static constexpr OpProperties properties = OpProperties::ReadingAndCanAbort();
OpIndex base() const { return input(0); }
OpIndex index() const { return input(1); }
ProtectedLoadOp(OpIndex base, OpIndex index, MemoryRepresentation loaded_rep,
RegisterRepresentation result_rep)
: Base(base, index), loaded_rep(loaded_rep), result_rep(result_rep) {
DCHECK(loaded_rep.ToRegisterRepresentation() == result_rep ||
(loaded_rep.IsTagged() &&
result_rep == RegisterRepresentation::Compressed()));
}
auto options() const { return std::tuple{loaded_rep, result_rep}; }
};
// Store `value` to: base + offset.
// For Kind::tagged_base: subtract kHeapObjectTag,
// `base` has to be the object start.
......@@ -1410,6 +1434,23 @@ struct IndexedStoreOp : FixedArityOperationT<3, IndexedStoreOp> {
}
};
// A protected store registers a trap handler which handles out-of-bounds memory
// accesses.
struct ProtectedStoreOp : FixedArityOperationT<3, ProtectedStoreOp> {
MemoryRepresentation stored_rep;
static constexpr OpProperties properties = OpProperties::WritingAndCanAbort();
OpIndex base() const { return input(0); }
OpIndex index() const { return input(1); }
OpIndex value() const { return input(2); }
ProtectedStoreOp(OpIndex base, OpIndex index, OpIndex value,
MemoryRepresentation stored_rep)
: Base(base, index, value), stored_rep(stored_rep) {}
auto options() const { return std::tuple{stored_rep}; }
};
// Retain a HeapObject to prevent it from being garbage collected too early.
struct RetainOp : FixedArityOperationT<1, RetainOp> {
OpIndex retained() const { return input(0); }
......
......@@ -463,6 +463,11 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
MapToNewGraph(op.base()), MapToNewGraph(op.index()), op.kind,
op.loaded_rep, op.result_rep, op.offset, op.element_size_log2);
}
OpIndex ReduceProtectedLoad(const ProtectedLoadOp& op) {
return assembler.ProtectedLoad(MapToNewGraph(op.base()),
MapToNewGraph(op.index()), op.loaded_rep,
op.result_rep);
}
OpIndex ReduceStore(const StoreOp& op) {
return assembler.Store(MapToNewGraph(op.base()), MapToNewGraph(op.value()),
op.kind, op.stored_rep, op.write_barrier, op.offset);
......@@ -473,6 +478,11 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier,
op.offset, op.element_size_log2);
}
OpIndex ReduceProtectedStore(const ProtectedStoreOp& op) {
return assembler.ProtectedStore(MapToNewGraph(op.base()),
MapToNewGraph(op.index()),
MapToNewGraph(op.value()), op.stored_rep);
}
OpIndex ReduceRetain(const RetainOp& op) {
return assembler.Retain(MapToNewGraph(op.retained()));
}
......
......@@ -930,6 +930,10 @@ Node* ScheduleBuilder::ProcessOperation(const IndexedLoadOp& op) {
: machine.ProtectedLoad(loaded_rep),
{base, index});
}
Node* ScheduleBuilder::ProcessOperation(const ProtectedLoadOp& op) {
return AddNode(machine.ProtectedLoad(op.loaded_rep.ToMachineType()),
{GetNode(op.base()), GetNode(op.index())});
}
Node* ScheduleBuilder::ProcessOperation(const StoreOp& op) {
intptr_t offset = op.offset;
if (op.kind == StoreOp::Kind::kTaggedBase) {
......@@ -942,9 +946,6 @@ Node* ScheduleBuilder::ProcessOperation(const StoreOp& op) {
if (IsAlignedAccess(op.kind)) {
o = machine.Store(StoreRepresentation(
op.stored_rep.ToMachineType().representation(), op.write_barrier));
} else if (op.kind == LoadOp::Kind::kProtected) {
DCHECK_EQ(op.write_barrier, WriteBarrierKind::kNoWriteBarrier);
o = machine.ProtectedStore(op.stored_rep.ToMachineType().representation());
} else {
DCHECK_EQ(op.write_barrier, WriteBarrierKind::kNoWriteBarrier);
o = machine.UnalignedStore(op.stored_rep.ToMachineType().representation());
......@@ -970,15 +971,17 @@ Node* ScheduleBuilder::ProcessOperation(const IndexedStoreOp& op) {
if (IsAlignedAccess(op.kind)) {
o = machine.Store(StoreRepresentation(
op.stored_rep.ToMachineType().representation(), op.write_barrier));
} else if (op.kind == LoadOp::Kind::kProtected) {
DCHECK_EQ(op.write_barrier, WriteBarrierKind::kNoWriteBarrier);
o = machine.ProtectedStore(op.stored_rep.ToMachineType().representation());
} else {
DCHECK_EQ(op.write_barrier, WriteBarrierKind::kNoWriteBarrier);
o = machine.UnalignedStore(op.stored_rep.ToMachineType().representation());
}
return AddNode(o, {base, index, value});
}
Node* ScheduleBuilder::ProcessOperation(const ProtectedStoreOp& op) {
return AddNode(
machine.ProtectedStore(op.stored_rep.ToMachineType().representation()),
{GetNode(op.base()), GetNode(op.index()), GetNode(op.value())});
}
Node* ScheduleBuilder::ProcessOperation(const RetainOp& op) {
return AddNode(common.Retain(), {GetNode(op.retained())});
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment