Commit 5eb08ce2 authored by Yu Yin's avatar Yu Yin Committed by V8 LUCI CQ

[mips64][loong64] Support acq/rel accesses and atomic accesses on tagged

Port commit 6a487504

Change-Id: I6fd1a15f1f35fd3b8cd3cfb72d7c428d018273c8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3109087Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#76417}
parent 3c81b4d8
......@@ -807,7 +807,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
AddressingMode addressing_mode =
......@@ -824,11 +825,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode());
if (addressing_mode == kMode_MRI) {
__ St_d(value, MemOperand(object, i.InputInt64(1)));
if (arch_opcode == kArchStoreWithWriteBarrier) {
if (addressing_mode == kMode_MRI) {
__ St_d(value, MemOperand(object, i.InputInt64(1)));
} else {
DCHECK_EQ(addressing_mode, kMode_MRR);
__ St_d(value, MemOperand(object, i.InputRegister(1)));
}
} else {
DCHECK_EQ(addressing_mode, kMode_MRR);
__ St_d(value, MemOperand(object, i.InputRegister(1)));
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
DCHECK_EQ(addressing_mode, kMode_MRI);
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
__ Add_d(scratch, object, Operand(i.InputInt64(1)));
__ amswap_db_d(zero_reg, value, scratch);
}
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
......@@ -1604,6 +1614,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
break;
case kLoong64StoreCompressTagged:
case kLoong64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
break;
......
......@@ -360,6 +360,7 @@ namespace compiler {
V(Loong64I16x8UConvertI8x16High) \
V(Loong64I8x16SConvertI16x8) \
V(Loong64I8x16UConvertI16x8) \
V(Loong64StoreCompressTagged) \
V(Loong64Word64AtomicLoadUint32) \
V(Loong64Word64AtomicLoadUint64) \
V(Loong64Word64AtomicStoreWord64) \
......
......@@ -1919,12 +1919,43 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
AtomicWidth width) {
Loong64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
// The memory order is ignored.
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
InstructionCode code;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
: kLoong64Word64AtomicLoadUint32;
break;
case MachineRepresentation::kWord64:
code = kLoong64Word64AtomicLoadUint64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(kTaggedSize, 8);
code = kLoong64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
if (g.CanBeImmediate(index, code)) {
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
......@@ -1933,20 +1964,67 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
AtomicWidth width) {
Loong64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
// The memory order is ignored.
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
MachineRepresentation rep = store_params.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
InstructionCode code;
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
DCHECK(CanBeTaggedPointer(rep));
DCHECK_EQ(kTaggedSize, 8);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
code = kArchAtomicStoreWithWriteBarrier;
code |= MiscField::encode(static_cast<int>(record_write_mode));
} else {
switch (rep) {
case MachineRepresentation::kWord8:
code = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
code = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
code = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
DCHECK_EQ(width, AtomicWidth::kWord64);
code = kLoong64Word64AtomicStoreWord64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(kTaggedSize, 8);
code = kLoong64StoreCompressTagged;
break;
default:
UNREACHABLE();
}
}
if (g.CanBeImmediate(index, code)) {
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
......@@ -1955,7 +2033,7 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
......@@ -2396,87 +2474,19 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kLoong64Word64AtomicLoadUint32;
break;
case MachineRepresentation::kWord64:
opcode = kLoong64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kLoong64Word64AtomicStoreWord64;
break;
default:
UNREACHABLE();
}
VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
......@@ -829,7 +829,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
......@@ -841,7 +842,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
if (arch_opcode == kArchStoreWithWriteBarrier) {
__ Sd(value, MemOperand(kScratchReg));
} else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
__ sync();
__ Sd(value, MemOperand(kScratchReg));
__ sync();
}
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
......@@ -1900,6 +1908,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
......
......@@ -11,392 +11,393 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Mips64Add) \
V(Mips64Dadd) \
V(Mips64DaddOvf) \
V(Mips64Sub) \
V(Mips64Dsub) \
V(Mips64DsubOvf) \
V(Mips64Mul) \
V(Mips64MulOvf) \
V(Mips64MulHigh) \
V(Mips64DMulHigh) \
V(Mips64MulHighU) \
V(Mips64Dmul) \
V(Mips64Div) \
V(Mips64Ddiv) \
V(Mips64DivU) \
V(Mips64DdivU) \
V(Mips64Mod) \
V(Mips64Dmod) \
V(Mips64ModU) \
V(Mips64DmodU) \
V(Mips64And) \
V(Mips64And32) \
V(Mips64Or) \
V(Mips64Or32) \
V(Mips64Nor) \
V(Mips64Nor32) \
V(Mips64Xor) \
V(Mips64Xor32) \
V(Mips64Clz) \
V(Mips64Lsa) \
V(Mips64Dlsa) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
V(Mips64Ext) \
V(Mips64Ins) \
V(Mips64Dext) \
V(Mips64Dins) \
V(Mips64Dclz) \
V(Mips64Ctz) \
V(Mips64Dctz) \
V(Mips64Popcnt) \
V(Mips64Dpopcnt) \
V(Mips64Dshl) \
V(Mips64Dshr) \
V(Mips64Dsar) \
V(Mips64Ror) \
V(Mips64Dror) \
V(Mips64Mov) \
V(Mips64Tst) \
V(Mips64Cmp) \
V(Mips64CmpS) \
V(Mips64AddS) \
V(Mips64SubS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64AbsS) \
V(Mips64NegS) \
V(Mips64SqrtS) \
V(Mips64MaxS) \
V(Mips64MinS) \
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \
V(Mips64AbsD) \
V(Mips64NegD) \
V(Mips64SqrtD) \
V(Mips64MaxD) \
V(Mips64MinD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
V(Mips64Float64RoundTiesEven) \
V(Mips64Float32RoundDown) \
V(Mips64Float32RoundTruncate) \
V(Mips64Float32RoundUp) \
V(Mips64Float32RoundTiesEven) \
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
V(Mips64RoundWD) \
V(Mips64FloorWD) \
V(Mips64CeilWD) \
V(Mips64TruncWS) \
V(Mips64RoundWS) \
V(Mips64FloorWS) \
V(Mips64CeilWS) \
V(Mips64TruncLS) \
V(Mips64TruncLD) \
V(Mips64TruncUwD) \
V(Mips64TruncUwS) \
V(Mips64TruncUlS) \
V(Mips64TruncUlD) \
V(Mips64CvtDW) \
V(Mips64CvtSL) \
V(Mips64CvtSW) \
V(Mips64CvtSUw) \
V(Mips64CvtSUl) \
V(Mips64CvtDL) \
V(Mips64CvtDUw) \
V(Mips64CvtDUl) \
V(Mips64Lb) \
V(Mips64Lbu) \
V(Mips64Sb) \
V(Mips64Lh) \
V(Mips64Ulh) \
V(Mips64Lhu) \
V(Mips64Ulhu) \
V(Mips64Sh) \
V(Mips64Ush) \
V(Mips64Ld) \
V(Mips64Uld) \
V(Mips64Lw) \
V(Mips64Ulw) \
V(Mips64Lwu) \
V(Mips64Ulwu) \
V(Mips64Sw) \
V(Mips64Usw) \
V(Mips64Sd) \
V(Mips64Usd) \
V(Mips64Lwc1) \
V(Mips64Ulwc1) \
V(Mips64Swc1) \
V(Mips64Uswc1) \
V(Mips64Ldc1) \
V(Mips64Uldc1) \
V(Mips64Sdc1) \
V(Mips64Usdc1) \
V(Mips64BitcastDL) \
V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
V(Mips64Float64InsertHighWord32) \
V(Mips64Float32Max) \
V(Mips64Float64Max) \
V(Mips64Float32Min) \
V(Mips64Float64Min) \
V(Mips64Float64SilenceNaN) \
V(Mips64Push) \
V(Mips64Peek) \
V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
V(Mips64StackClaim) \
V(Mips64Seb) \
V(Mips64Seh) \
V(Mips64Sync) \
V(Mips64AssertEqual) \
V(Mips64S128Const) \
V(Mips64S128Zero) \
V(Mips64S128AllOnes) \
V(Mips64I32x4Splat) \
V(Mips64I32x4ExtractLane) \
V(Mips64I32x4ReplaceLane) \
V(Mips64I32x4Add) \
V(Mips64I32x4Sub) \
V(Mips64F64x2Abs) \
V(Mips64F64x2Neg) \
V(Mips64F32x4Splat) \
V(Mips64F32x4ExtractLane) \
V(Mips64F32x4ReplaceLane) \
V(Mips64F32x4SConvertI32x4) \
V(Mips64F32x4UConvertI32x4) \
V(Mips64I32x4Mul) \
V(Mips64I32x4MaxS) \
V(Mips64I32x4MinS) \
V(Mips64I32x4Eq) \
V(Mips64I32x4Ne) \
V(Mips64I32x4Shl) \
V(Mips64I32x4ShrS) \
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
V(Mips64F64x2Sqrt) \
V(Mips64F64x2Add) \
V(Mips64F64x2Sub) \
V(Mips64F64x2Mul) \
V(Mips64F64x2Div) \
V(Mips64F64x2Min) \
V(Mips64F64x2Max) \
V(Mips64F64x2Eq) \
V(Mips64F64x2Ne) \
V(Mips64F64x2Lt) \
V(Mips64F64x2Le) \
V(Mips64F64x2Splat) \
V(Mips64F64x2ExtractLane) \
V(Mips64F64x2ReplaceLane) \
V(Mips64F64x2Pmin) \
V(Mips64F64x2Pmax) \
V(Mips64F64x2Ceil) \
V(Mips64F64x2Floor) \
V(Mips64F64x2Trunc) \
V(Mips64F64x2NearestInt) \
V(Mips64F64x2ConvertLowI32x4S) \
V(Mips64F64x2ConvertLowI32x4U) \
V(Mips64F64x2PromoteLowF32x4) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
V(Mips64I64x2Add) \
V(Mips64I64x2Sub) \
V(Mips64I64x2Mul) \
V(Mips64I64x2Neg) \
V(Mips64I64x2Shl) \
V(Mips64I64x2ShrS) \
V(Mips64I64x2ShrU) \
V(Mips64I64x2BitMask) \
V(Mips64I64x2Eq) \
V(Mips64I64x2Ne) \
V(Mips64I64x2GtS) \
V(Mips64I64x2GeS) \
V(Mips64I64x2Abs) \
V(Mips64I64x2SConvertI32x4Low) \
V(Mips64I64x2SConvertI32x4High) \
V(Mips64I64x2UConvertI32x4Low) \
V(Mips64I64x2UConvertI32x4High) \
V(Mips64ExtMulLow) \
V(Mips64ExtMulHigh) \
V(Mips64ExtAddPairwise) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4Sqrt) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
V(Mips64F32x4Sub) \
V(Mips64F32x4Mul) \
V(Mips64F32x4Div) \
V(Mips64F32x4Max) \
V(Mips64F32x4Min) \
V(Mips64F32x4Eq) \
V(Mips64F32x4Ne) \
V(Mips64F32x4Lt) \
V(Mips64F32x4Le) \
V(Mips64F32x4Pmin) \
V(Mips64F32x4Pmax) \
V(Mips64F32x4Ceil) \
V(Mips64F32x4Floor) \
V(Mips64F32x4Trunc) \
V(Mips64F32x4NearestInt) \
V(Mips64F32x4DemoteF64x2Zero) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
V(Mips64I32x4GtS) \
V(Mips64I32x4GeS) \
V(Mips64I32x4GtU) \
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \
V(Mips64I32x4DotI16x8S) \
V(Mips64I32x4TruncSatF64x2SZero) \
V(Mips64I32x4TruncSatF64x2UZero) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
V(Mips64I16x8ReplaceLane) \
V(Mips64I16x8Neg) \
V(Mips64I16x8Shl) \
V(Mips64I16x8ShrS) \
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
V(Mips64I16x8AddSatS) \
V(Mips64I16x8Sub) \
V(Mips64I16x8SubSatS) \
V(Mips64I16x8Mul) \
V(Mips64I16x8MaxS) \
V(Mips64I16x8MinS) \
V(Mips64I16x8Eq) \
V(Mips64I16x8Ne) \
V(Mips64I16x8GtS) \
V(Mips64I16x8GeS) \
V(Mips64I16x8AddSatU) \
V(Mips64I16x8SubSatU) \
V(Mips64I16x8MaxU) \
V(Mips64I16x8MinU) \
V(Mips64I16x8GtU) \
V(Mips64I16x8GeU) \
V(Mips64I16x8RoundingAverageU) \
V(Mips64I16x8Abs) \
V(Mips64I16x8BitMask) \
V(Mips64I16x8Q15MulRSatS) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLaneU) \
V(Mips64I8x16ExtractLaneS) \
V(Mips64I8x16ReplaceLane) \
V(Mips64I8x16Neg) \
V(Mips64I8x16Shl) \
V(Mips64I8x16ShrS) \
V(Mips64I8x16Add) \
V(Mips64I8x16AddSatS) \
V(Mips64I8x16Sub) \
V(Mips64I8x16SubSatS) \
V(Mips64I8x16MaxS) \
V(Mips64I8x16MinS) \
V(Mips64I8x16Eq) \
V(Mips64I8x16Ne) \
V(Mips64I8x16GtS) \
V(Mips64I8x16GeS) \
V(Mips64I8x16ShrU) \
V(Mips64I8x16AddSatU) \
V(Mips64I8x16SubSatU) \
V(Mips64I8x16MaxU) \
V(Mips64I8x16MinU) \
V(Mips64I8x16GtU) \
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
V(Mips64I8x16Popcnt) \
V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
V(Mips64S128Xor) \
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
V(Mips64I64x2AllTrue) \
V(Mips64I32x4AllTrue) \
V(Mips64I16x8AllTrue) \
V(Mips64I8x16AllTrue) \
V(Mips64V128AnyTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
V(Mips64S32x4PackOdd) \
V(Mips64S32x4InterleaveEven) \
V(Mips64S32x4InterleaveOdd) \
V(Mips64S32x4Shuffle) \
V(Mips64S16x8InterleaveRight) \
V(Mips64S16x8InterleaveLeft) \
V(Mips64S16x8PackEven) \
V(Mips64S16x8PackOdd) \
V(Mips64S16x8InterleaveEven) \
V(Mips64S16x8InterleaveOdd) \
V(Mips64S16x4Reverse) \
V(Mips64S16x2Reverse) \
V(Mips64S8x16InterleaveRight) \
V(Mips64S8x16InterleaveLeft) \
V(Mips64S8x16PackEven) \
V(Mips64S8x16PackOdd) \
V(Mips64S8x16InterleaveEven) \
V(Mips64S8x16InterleaveOdd) \
V(Mips64I8x16Shuffle) \
V(Mips64I8x16Swizzle) \
V(Mips64S8x16Concat) \
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
V(Mips64S128LoadSplat) \
V(Mips64S128Load8x8S) \
V(Mips64S128Load8x8U) \
V(Mips64S128Load16x4S) \
V(Mips64S128Load16x4U) \
V(Mips64S128Load32x2S) \
V(Mips64S128Load32x2U) \
V(Mips64S128Load32Zero) \
V(Mips64S128Load64Zero) \
V(Mips64S128LoadLane) \
V(Mips64S128StoreLane) \
V(Mips64MsaLd) \
V(Mips64MsaSt) \
V(Mips64I32x4SConvertI16x8Low) \
V(Mips64I32x4SConvertI16x8High) \
V(Mips64I32x4UConvertI16x8Low) \
V(Mips64I32x4UConvertI16x8High) \
V(Mips64I16x8SConvertI8x16Low) \
V(Mips64I16x8SConvertI8x16High) \
V(Mips64I16x8SConvertI32x4) \
V(Mips64I16x8UConvertI32x4) \
V(Mips64I16x8UConvertI8x16Low) \
V(Mips64I16x8UConvertI8x16High) \
V(Mips64I8x16SConvertI16x8) \
V(Mips64I8x16UConvertI16x8) \
V(Mips64Word64AtomicLoadUint64) \
V(Mips64Word64AtomicStoreWord64) \
V(Mips64Word64AtomicAddUint64) \
V(Mips64Word64AtomicSubUint64) \
V(Mips64Word64AtomicAndUint64) \
V(Mips64Word64AtomicOrUint64) \
V(Mips64Word64AtomicXorUint64) \
V(Mips64Word64AtomicExchangeUint64) \
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Mips64Add) \
V(Mips64Dadd) \
V(Mips64DaddOvf) \
V(Mips64Sub) \
V(Mips64Dsub) \
V(Mips64DsubOvf) \
V(Mips64Mul) \
V(Mips64MulOvf) \
V(Mips64MulHigh) \
V(Mips64DMulHigh) \
V(Mips64MulHighU) \
V(Mips64Dmul) \
V(Mips64Div) \
V(Mips64Ddiv) \
V(Mips64DivU) \
V(Mips64DdivU) \
V(Mips64Mod) \
V(Mips64Dmod) \
V(Mips64ModU) \
V(Mips64DmodU) \
V(Mips64And) \
V(Mips64And32) \
V(Mips64Or) \
V(Mips64Or32) \
V(Mips64Nor) \
V(Mips64Nor32) \
V(Mips64Xor) \
V(Mips64Xor32) \
V(Mips64Clz) \
V(Mips64Lsa) \
V(Mips64Dlsa) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
V(Mips64Ext) \
V(Mips64Ins) \
V(Mips64Dext) \
V(Mips64Dins) \
V(Mips64Dclz) \
V(Mips64Ctz) \
V(Mips64Dctz) \
V(Mips64Popcnt) \
V(Mips64Dpopcnt) \
V(Mips64Dshl) \
V(Mips64Dshr) \
V(Mips64Dsar) \
V(Mips64Ror) \
V(Mips64Dror) \
V(Mips64Mov) \
V(Mips64Tst) \
V(Mips64Cmp) \
V(Mips64CmpS) \
V(Mips64AddS) \
V(Mips64SubS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64AbsS) \
V(Mips64NegS) \
V(Mips64SqrtS) \
V(Mips64MaxS) \
V(Mips64MinS) \
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \
V(Mips64AbsD) \
V(Mips64NegD) \
V(Mips64SqrtD) \
V(Mips64MaxD) \
V(Mips64MinD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
V(Mips64Float64RoundTiesEven) \
V(Mips64Float32RoundDown) \
V(Mips64Float32RoundTruncate) \
V(Mips64Float32RoundUp) \
V(Mips64Float32RoundTiesEven) \
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
V(Mips64RoundWD) \
V(Mips64FloorWD) \
V(Mips64CeilWD) \
V(Mips64TruncWS) \
V(Mips64RoundWS) \
V(Mips64FloorWS) \
V(Mips64CeilWS) \
V(Mips64TruncLS) \
V(Mips64TruncLD) \
V(Mips64TruncUwD) \
V(Mips64TruncUwS) \
V(Mips64TruncUlS) \
V(Mips64TruncUlD) \
V(Mips64CvtDW) \
V(Mips64CvtSL) \
V(Mips64CvtSW) \
V(Mips64CvtSUw) \
V(Mips64CvtSUl) \
V(Mips64CvtDL) \
V(Mips64CvtDUw) \
V(Mips64CvtDUl) \
V(Mips64Lb) \
V(Mips64Lbu) \
V(Mips64Sb) \
V(Mips64Lh) \
V(Mips64Ulh) \
V(Mips64Lhu) \
V(Mips64Ulhu) \
V(Mips64Sh) \
V(Mips64Ush) \
V(Mips64Ld) \
V(Mips64Uld) \
V(Mips64Lw) \
V(Mips64Ulw) \
V(Mips64Lwu) \
V(Mips64Ulwu) \
V(Mips64Sw) \
V(Mips64Usw) \
V(Mips64Sd) \
V(Mips64Usd) \
V(Mips64Lwc1) \
V(Mips64Ulwc1) \
V(Mips64Swc1) \
V(Mips64Uswc1) \
V(Mips64Ldc1) \
V(Mips64Uldc1) \
V(Mips64Sdc1) \
V(Mips64Usdc1) \
V(Mips64BitcastDL) \
V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
V(Mips64Float64InsertHighWord32) \
V(Mips64Float32Max) \
V(Mips64Float64Max) \
V(Mips64Float32Min) \
V(Mips64Float64Min) \
V(Mips64Float64SilenceNaN) \
V(Mips64Push) \
V(Mips64Peek) \
V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
V(Mips64StackClaim) \
V(Mips64Seb) \
V(Mips64Seh) \
V(Mips64Sync) \
V(Mips64AssertEqual) \
V(Mips64S128Const) \
V(Mips64S128Zero) \
V(Mips64S128AllOnes) \
V(Mips64I32x4Splat) \
V(Mips64I32x4ExtractLane) \
V(Mips64I32x4ReplaceLane) \
V(Mips64I32x4Add) \
V(Mips64I32x4Sub) \
V(Mips64F64x2Abs) \
V(Mips64F64x2Neg) \
V(Mips64F32x4Splat) \
V(Mips64F32x4ExtractLane) \
V(Mips64F32x4ReplaceLane) \
V(Mips64F32x4SConvertI32x4) \
V(Mips64F32x4UConvertI32x4) \
V(Mips64I32x4Mul) \
V(Mips64I32x4MaxS) \
V(Mips64I32x4MinS) \
V(Mips64I32x4Eq) \
V(Mips64I32x4Ne) \
V(Mips64I32x4Shl) \
V(Mips64I32x4ShrS) \
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
V(Mips64F64x2Sqrt) \
V(Mips64F64x2Add) \
V(Mips64F64x2Sub) \
V(Mips64F64x2Mul) \
V(Mips64F64x2Div) \
V(Mips64F64x2Min) \
V(Mips64F64x2Max) \
V(Mips64F64x2Eq) \
V(Mips64F64x2Ne) \
V(Mips64F64x2Lt) \
V(Mips64F64x2Le) \
V(Mips64F64x2Splat) \
V(Mips64F64x2ExtractLane) \
V(Mips64F64x2ReplaceLane) \
V(Mips64F64x2Pmin) \
V(Mips64F64x2Pmax) \
V(Mips64F64x2Ceil) \
V(Mips64F64x2Floor) \
V(Mips64F64x2Trunc) \
V(Mips64F64x2NearestInt) \
V(Mips64F64x2ConvertLowI32x4S) \
V(Mips64F64x2ConvertLowI32x4U) \
V(Mips64F64x2PromoteLowF32x4) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
V(Mips64I64x2Add) \
V(Mips64I64x2Sub) \
V(Mips64I64x2Mul) \
V(Mips64I64x2Neg) \
V(Mips64I64x2Shl) \
V(Mips64I64x2ShrS) \
V(Mips64I64x2ShrU) \
V(Mips64I64x2BitMask) \
V(Mips64I64x2Eq) \
V(Mips64I64x2Ne) \
V(Mips64I64x2GtS) \
V(Mips64I64x2GeS) \
V(Mips64I64x2Abs) \
V(Mips64I64x2SConvertI32x4Low) \
V(Mips64I64x2SConvertI32x4High) \
V(Mips64I64x2UConvertI32x4Low) \
V(Mips64I64x2UConvertI32x4High) \
V(Mips64ExtMulLow) \
V(Mips64ExtMulHigh) \
V(Mips64ExtAddPairwise) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4Sqrt) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
V(Mips64F32x4Sub) \
V(Mips64F32x4Mul) \
V(Mips64F32x4Div) \
V(Mips64F32x4Max) \
V(Mips64F32x4Min) \
V(Mips64F32x4Eq) \
V(Mips64F32x4Ne) \
V(Mips64F32x4Lt) \
V(Mips64F32x4Le) \
V(Mips64F32x4Pmin) \
V(Mips64F32x4Pmax) \
V(Mips64F32x4Ceil) \
V(Mips64F32x4Floor) \
V(Mips64F32x4Trunc) \
V(Mips64F32x4NearestInt) \
V(Mips64F32x4DemoteF64x2Zero) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
V(Mips64I32x4GtS) \
V(Mips64I32x4GeS) \
V(Mips64I32x4GtU) \
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \
V(Mips64I32x4DotI16x8S) \
V(Mips64I32x4TruncSatF64x2SZero) \
V(Mips64I32x4TruncSatF64x2UZero) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
V(Mips64I16x8ReplaceLane) \
V(Mips64I16x8Neg) \
V(Mips64I16x8Shl) \
V(Mips64I16x8ShrS) \
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
V(Mips64I16x8AddSatS) \
V(Mips64I16x8Sub) \
V(Mips64I16x8SubSatS) \
V(Mips64I16x8Mul) \
V(Mips64I16x8MaxS) \
V(Mips64I16x8MinS) \
V(Mips64I16x8Eq) \
V(Mips64I16x8Ne) \
V(Mips64I16x8GtS) \
V(Mips64I16x8GeS) \
V(Mips64I16x8AddSatU) \
V(Mips64I16x8SubSatU) \
V(Mips64I16x8MaxU) \
V(Mips64I16x8MinU) \
V(Mips64I16x8GtU) \
V(Mips64I16x8GeU) \
V(Mips64I16x8RoundingAverageU) \
V(Mips64I16x8Abs) \
V(Mips64I16x8BitMask) \
V(Mips64I16x8Q15MulRSatS) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLaneU) \
V(Mips64I8x16ExtractLaneS) \
V(Mips64I8x16ReplaceLane) \
V(Mips64I8x16Neg) \
V(Mips64I8x16Shl) \
V(Mips64I8x16ShrS) \
V(Mips64I8x16Add) \
V(Mips64I8x16AddSatS) \
V(Mips64I8x16Sub) \
V(Mips64I8x16SubSatS) \
V(Mips64I8x16MaxS) \
V(Mips64I8x16MinS) \
V(Mips64I8x16Eq) \
V(Mips64I8x16Ne) \
V(Mips64I8x16GtS) \
V(Mips64I8x16GeS) \
V(Mips64I8x16ShrU) \
V(Mips64I8x16AddSatU) \
V(Mips64I8x16SubSatU) \
V(Mips64I8x16MaxU) \
V(Mips64I8x16MinU) \
V(Mips64I8x16GtU) \
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
V(Mips64I8x16Popcnt) \
V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
V(Mips64S128Xor) \
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
V(Mips64I64x2AllTrue) \
V(Mips64I32x4AllTrue) \
V(Mips64I16x8AllTrue) \
V(Mips64I8x16AllTrue) \
V(Mips64V128AnyTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
V(Mips64S32x4PackOdd) \
V(Mips64S32x4InterleaveEven) \
V(Mips64S32x4InterleaveOdd) \
V(Mips64S32x4Shuffle) \
V(Mips64S16x8InterleaveRight) \
V(Mips64S16x8InterleaveLeft) \
V(Mips64S16x8PackEven) \
V(Mips64S16x8PackOdd) \
V(Mips64S16x8InterleaveEven) \
V(Mips64S16x8InterleaveOdd) \
V(Mips64S16x4Reverse) \
V(Mips64S16x2Reverse) \
V(Mips64S8x16InterleaveRight) \
V(Mips64S8x16InterleaveLeft) \
V(Mips64S8x16PackEven) \
V(Mips64S8x16PackOdd) \
V(Mips64S8x16InterleaveEven) \
V(Mips64S8x16InterleaveOdd) \
V(Mips64I8x16Shuffle) \
V(Mips64I8x16Swizzle) \
V(Mips64S8x16Concat) \
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
V(Mips64S128LoadSplat) \
V(Mips64S128Load8x8S) \
V(Mips64S128Load8x8U) \
V(Mips64S128Load16x4S) \
V(Mips64S128Load16x4U) \
V(Mips64S128Load32x2S) \
V(Mips64S128Load32x2U) \
V(Mips64S128Load32Zero) \
V(Mips64S128Load64Zero) \
V(Mips64S128LoadLane) \
V(Mips64S128StoreLane) \
V(Mips64MsaLd) \
V(Mips64MsaSt) \
V(Mips64I32x4SConvertI16x8Low) \
V(Mips64I32x4SConvertI16x8High) \
V(Mips64I32x4UConvertI16x8Low) \
V(Mips64I32x4UConvertI16x8High) \
V(Mips64I16x8SConvertI8x16Low) \
V(Mips64I16x8SConvertI8x16High) \
V(Mips64I16x8SConvertI32x4) \
V(Mips64I16x8UConvertI32x4) \
V(Mips64I16x8UConvertI8x16Low) \
V(Mips64I16x8UConvertI8x16High) \
V(Mips64I8x16SConvertI16x8) \
V(Mips64I8x16UConvertI16x8) \
V(Mips64StoreCompressTagged) \
V(Mips64Word64AtomicLoadUint64) \
V(Mips64Word64AtomicStoreWord64) \
V(Mips64Word64AtomicAddUint64) \
V(Mips64Word64AtomicSubUint64) \
V(Mips64Word64AtomicAndUint64) \
V(Mips64Word64AtomicOrUint64) \
V(Mips64Word64AtomicXorUint64) \
V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -397,6 +397,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Uswc1:
case kMips64Sync:
case kMips64S128StoreLane:
case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
case kMips64Word64AtomicAddUint64:
case kMips64Word64AtomicSubUint64:
......
......@@ -2035,10 +2035,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad() ||
m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
LoadRepresentation load_rep = atomic_load_params.representation();
return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
......@@ -2138,12 +2141,42 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
// The memory order is ignored.
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
InstructionCode code;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
code = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
code = kMips64Word64AtomicLoadUint64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(kTaggedSize, 8);
code = kMips64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
if (g.CanBeImmediate(index, code)) {
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
......@@ -2152,32 +2185,88 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
// The memory order is ignored.
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
MachineRepresentation rep = store_params.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
InstructionCode code;
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
DCHECK(CanBeTaggedPointer(rep));
DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
code = kArchAtomicStoreWithWriteBarrier;
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
InstructionOperand addr_reg = g.TempRegister();
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
switch (rep) {
case MachineRepresentation::kWord8:
code = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
code = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
code = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
DCHECK_EQ(width, AtomicWidth::kWord64);
code = kMips64Word64AtomicStoreWord64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
code = kMips64StoreCompressTagged;
break;
default:
UNREACHABLE();
}
code |= AtomicWidthField::encode(width);
if (g.CanBeImmediate(index, code)) {
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
}
}
}
......@@ -2615,87 +2704,19 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
opcode = kMips64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kMips64Word64AtomicStoreWord64;
break;
default:
UNREACHABLE();
}
VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment