Commit 5eb08ce2 authored by Yu Yin's avatar Yu Yin Committed by V8 LUCI CQ

[mips64][loong64] Support acq/rel accesses and atomic accesses on tagged

Port commit 6a487504

Change-Id: I6fd1a15f1f35fd3b8cd3cfb72d7c428d018273c8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3109087Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#76417}
parent 3c81b4d8
......@@ -807,7 +807,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
AddressingMode addressing_mode =
......@@ -824,11 +825,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode());
if (addressing_mode == kMode_MRI) {
__ St_d(value, MemOperand(object, i.InputInt64(1)));
if (arch_opcode == kArchStoreWithWriteBarrier) {
if (addressing_mode == kMode_MRI) {
__ St_d(value, MemOperand(object, i.InputInt64(1)));
} else {
DCHECK_EQ(addressing_mode, kMode_MRR);
__ St_d(value, MemOperand(object, i.InputRegister(1)));
}
} else {
DCHECK_EQ(addressing_mode, kMode_MRR);
__ St_d(value, MemOperand(object, i.InputRegister(1)));
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
DCHECK_EQ(addressing_mode, kMode_MRI);
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
__ Add_d(scratch, object, Operand(i.InputInt64(1)));
__ amswap_db_d(zero_reg, value, scratch);
}
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
......@@ -1604,6 +1614,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
break;
case kLoong64StoreCompressTagged:
case kLoong64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
break;
......
......@@ -360,6 +360,7 @@ namespace compiler {
V(Loong64I16x8UConvertI8x16High) \
V(Loong64I8x16SConvertI16x8) \
V(Loong64I8x16UConvertI16x8) \
V(Loong64StoreCompressTagged) \
V(Loong64Word64AtomicLoadUint32) \
V(Loong64Word64AtomicLoadUint64) \
V(Loong64Word64AtomicStoreWord64) \
......
......@@ -1919,12 +1919,43 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
AtomicWidth width) {
Loong64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
// The memory order is ignored.
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
InstructionCode code;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
: kLoong64Word64AtomicLoadUint32;
break;
case MachineRepresentation::kWord64:
code = kLoong64Word64AtomicLoadUint64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(kTaggedSize, 8);
code = kLoong64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
if (g.CanBeImmediate(index, code)) {
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
......@@ -1933,20 +1964,67 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
AtomicWidth width) {
Loong64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
// The memory order is ignored.
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
MachineRepresentation rep = store_params.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
InstructionCode code;
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
DCHECK(CanBeTaggedPointer(rep));
DCHECK_EQ(kTaggedSize, 8);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
code = kArchAtomicStoreWithWriteBarrier;
code |= MiscField::encode(static_cast<int>(record_write_mode));
} else {
switch (rep) {
case MachineRepresentation::kWord8:
code = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
code = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
code = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
DCHECK_EQ(width, AtomicWidth::kWord64);
code = kLoong64Word64AtomicStoreWord64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(kTaggedSize, 8);
code = kLoong64StoreCompressTagged;
break;
default:
UNREACHABLE();
}
}
if (g.CanBeImmediate(index, code)) {
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
......@@ -1955,7 +2033,7 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
......@@ -2396,87 +2474,19 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kLoong64Word64AtomicLoadUint32;
break;
case MachineRepresentation::kWord64:
opcode = kLoong64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kLoong64Word64AtomicStoreWord64;
break;
default:
UNREACHABLE();
}
VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
......@@ -829,7 +829,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
......@@ -841,7 +842,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
if (arch_opcode == kArchStoreWithWriteBarrier) {
__ Sd(value, MemOperand(kScratchReg));
} else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
__ sync();
__ Sd(value, MemOperand(kScratchReg));
__ sync();
}
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
......@@ -1900,6 +1908,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
......
......@@ -397,6 +397,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Uswc1:
case kMips64Sync:
case kMips64S128StoreLane:
case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
case kMips64Word64AtomicAddUint64:
case kMips64Word64AtomicSubUint64:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment