Commit 6c6deee7 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[compiler] Support AcqRel in MemoryBarrier and expose to CodeAssembler

AcqRel barriers are currently unused and will be used by the shared
value barrier in a future CL.

Bug: v8:12547
Change-Id: I8ae40b9e17f007441125dfa5d0a04f46565785fd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3827319Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82568}
parent ee16640e
......@@ -2274,6 +2274,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
}
void InstructionSelector::VisitMemoryBarrier(Node* node) {
// Use DMB ISH for both acquire-release and sequentially consistent barriers.
ArmOperandGenerator g(this);
Emit(kArmDmbIsh, g.NoOutput());
}
......
......@@ -3387,6 +3387,7 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
}
void InstructionSelector::VisitMemoryBarrier(Node* node) {
// Use DMB ISH for both acquire-release and sequentially consistent barriers.
Arm64OperandGenerator g(this);
Emit(kArm64DmbIsh, g.NoOutput());
}
......
......@@ -2018,8 +2018,15 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
}
void InstructionSelector::VisitMemoryBarrier(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32MFence, g.NoOutput());
// ia32 is no weaker than release-acquire and only needs to emit an
// instruction for SeqCst memory barriers.
AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
if (order == AtomicMemoryOrder::kSeqCst) {
IA32OperandGenerator g(this);
Emit(kIA32MFence, g.NoOutput());
return;
}
DCHECK_EQ(AtomicMemoryOrder::kAcqRel, order);
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
......
......@@ -3055,8 +3055,15 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
}
void InstructionSelector::VisitMemoryBarrier(Node* node) {
X64OperandGenerator g(this);
Emit(kX64MFence, g.NoOutput());
// x64 is no weaker than release-acquire and only needs to emit an instruction
// for SeqCst memory barriers.
AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
if (order == AtomicMemoryOrder::kSeqCst) {
X64OperandGenerator g(this);
Emit(kX64MFence, g.NoOutput());
return;
}
DCHECK_EQ(AtomicMemoryOrder::kAcqRel, order);
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
......
......@@ -944,6 +944,10 @@ CodeAssembler::AtomicCompareExchange64<AtomicUint64>(
TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high,
TNode<UintPtrT> new_value_high);
void CodeAssembler::MemoryBarrier(AtomicMemoryOrder order) {
raw_assembler()->MemoryBarrier(order);
}
void CodeAssembler::StoreRoot(RootIndex root_index, TNode<Object> value) {
DCHECK(!RootsTable::IsImmortalImmovable(root_index));
TNode<ExternalReference> isolate_root =
......
......@@ -862,6 +862,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<UintPtrT> old_value_high,
TNode<UintPtrT> new_value_high);
void MemoryBarrier(AtomicMemoryOrder order);
// Store a value to the root array.
void StoreRoot(RootIndex root_index, TNode<Object> value);
......
......@@ -1293,13 +1293,15 @@ struct MachineOperatorGlobalCache {
};
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
struct MemoryBarrierOperator : public Operator {
template <AtomicMemoryOrder order>
struct MemoryBarrierOperator : public Operator1<AtomicMemoryOrder> {
MemoryBarrierOperator()
: Operator(IrOpcode::kMemoryBarrier,
Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
1, 1, 0, 1, 0) {}
: Operator1<AtomicMemoryOrder>(
IrOpcode::kMemoryBarrier, Operator::kNoDeopt | Operator::kNoThrow,
"SeqCstMemoryBarrier", 0, 1, 1, 0, 1, 0, order) {}
};
MemoryBarrierOperator kMemoryBarrier;
MemoryBarrierOperator<AtomicMemoryOrder::kSeqCst> kSeqCstMemoryBarrier;
MemoryBarrierOperator<AtomicMemoryOrder::kAcqRel> kAcqRelMemoryBarrier;
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
......@@ -1734,8 +1736,15 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return zone_->New<CommentOperator>(msg);
}
const Operator* MachineOperatorBuilder::MemBarrier() {
return &cache_.kMemoryBarrier;
const Operator* MachineOperatorBuilder::MemoryBarrier(AtomicMemoryOrder order) {
switch (order) {
case AtomicMemoryOrder::kSeqCst:
return &cache_.kSeqCstMemoryBarrier;
case AtomicMemoryOrder::kAcqRel:
return &cache_.kAcqRelMemoryBarrier;
default:
UNREACHABLE();
}
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
......
......@@ -1003,8 +1003,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// Runtime::kStackGuardWithGap call.
const Operator* LoadStackCheckOffset();
// Memory barrier.
const Operator* MemBarrier();
const Operator* MemoryBarrier(AtomicMemoryOrder order);
// atomic-load [base + index]
const Operator* Word32AtomicLoad(AtomicLoadParameters params);
......
......@@ -335,6 +335,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
}
Node* MemoryBarrier(AtomicMemoryOrder order) {
return AddNode(machine()->MemoryBarrier(order));
}
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
......
......@@ -5009,8 +5009,9 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
}
void WasmGraphBuilder::AtomicFence() {
SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(), effect(),
control()));
SetEffect(graph()->NewNode(
mcgraph()->machine()->MemoryBarrier(AtomicMemoryOrder::kSeqCst), effect(),
control()));
}
void WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment