Commit e3aee111 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

PPC: Use non prefixed instructions when possible

This CL adds a check to all integer/and fp load/store operations,
if the offset fits in an `is_int16` and if alignment requirements
are met (specific to lwa, ld and std) then a non prefixed load/store
instruction will be used.

Note that operation mode (MRI vs MRR) gets set during instruction selection.

Change-Id: I68e2aa1d559c7ff058d715e6e577a14b590b632b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3669186Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#80765}
parent 1b13172d
......@@ -461,10 +461,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode); \
bool is_atomic = i.InputInt32(2); \
if (mode == kMode_MRI) { \
if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
__ asm_instrp(result, operand); \
} else { \
intptr_t offset = operand.offset(); \
if (is_int16(offset)) { \
__ asm_instr(result, operand); \
} else { \
CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
__ asm_instrp(result, operand); \
} \
} else { \
__ asm_instrx(result, operand); \
......@@ -473,23 +475,27 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx) \
do { \
Register result = i.OutputRegister(); \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
bool is_atomic = i.InputInt32(2); \
if (mode == kMode_MRI) { \
if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
__ asm_instrp(result, operand); \
} else { \
__ asm_instr(result, operand); \
} \
} else { \
__ asm_instrx(result, operand); \
} \
if (is_atomic) __ lwsync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx, \
must_be_aligned) \
do { \
Register result = i.OutputRegister(); \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
bool is_atomic = i.InputInt32(2); \
if (mode == kMode_MRI) { \
intptr_t offset = operand.offset(); \
bool misaligned = offset & 3; \
if (is_int16(offset) && (!must_be_aligned || !misaligned)) { \
__ asm_instr(result, operand); \
} else { \
CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
__ asm_instrp(result, operand); \
} \
} else { \
__ asm_instrx(result, operand); \
} \
if (is_atomic) __ lwsync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_LOAD_INTEGER_RR(asm_instr) \
......@@ -515,10 +521,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
/* removed frsp as instruction-selector checked */ \
/* value to be kFloat32 */ \
if (mode == kMode_MRI) { \
if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
__ asm_instrp(value, operand); \
} else { \
intptr_t offset = operand.offset(); \
if (is_int16(offset)) { \
__ asm_instr(value, operand); \
} else { \
CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
__ asm_instrp(value, operand); \
} \
} else { \
__ asm_instrx(value, operand); \
......@@ -527,25 +535,29 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrp, asm_instrx) \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
bool is_atomic = i.InputInt32(3); \
if (is_atomic) __ lwsync(); \
if (mode == kMode_MRI) { \
if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
__ asm_instrp(value, operand); \
} else { \
__ asm_instr(value, operand); \
} \
} else { \
__ asm_instrx(value, operand); \
} \
if (is_atomic) __ sync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrp, asm_instrx, \
must_be_aligned) \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
bool is_atomic = i.InputInt32(3); \
if (is_atomic) __ lwsync(); \
if (mode == kMode_MRI) { \
intptr_t offset = operand.offset(); \
bool misaligned = offset & 3; \
if (is_int16(offset) && (!must_be_aligned || !misaligned)) { \
__ asm_instr(value, operand); \
} else { \
CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
__ asm_instrp(value, operand); \
} \
} else { \
__ asm_instrx(value, operand); \
} \
if (is_atomic) __ sync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_STORE_INTEGER_RR(asm_instr) \
......@@ -1967,27 +1979,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, plbz, lbzx);
ASSEMBLE_LOAD_INTEGER(lbz, plbz, lbzx, false);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, plbz, lbzx);
ASSEMBLE_LOAD_INTEGER(lbz, plbz, lbzx, false);
__ extsb(i.OutputRegister(), i.OutputRegister());
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, plhz, lhzx);
ASSEMBLE_LOAD_INTEGER(lhz, plhz, lhzx, false);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, plha, lhax);
ASSEMBLE_LOAD_INTEGER(lha, plha, lhax, false);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, plwa, lwax);
ASSEMBLE_LOAD_INTEGER(lwa, plwa, lwax, true);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, pld, ldx);
ASSEMBLE_LOAD_INTEGER(ld, pld, ldx, true);
break;
#endif
case kPPC_LoadFloat32:
......@@ -2012,17 +2024,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_StoreWord8:
ASSEMBLE_STORE_INTEGER(stb, pstb, stbx);
ASSEMBLE_STORE_INTEGER(stb, pstb, stbx, false);
break;
case kPPC_StoreWord16:
ASSEMBLE_STORE_INTEGER(sth, psth, sthx);
ASSEMBLE_STORE_INTEGER(sth, psth, sthx, false);
break;
case kPPC_StoreWord32:
ASSEMBLE_STORE_INTEGER(stw, pstw, stwx);
ASSEMBLE_STORE_INTEGER(stw, pstw, stwx, false);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_StoreWord64:
ASSEMBLE_STORE_INTEGER(std, pstd, stdx);
ASSEMBLE_STORE_INTEGER(std, pstd, stdx, true);
break;
#endif
case kPPC_StoreFloat32:
......@@ -3778,23 +3790,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedField,
StoreTaggedField);
StoreTaggedField, true);
break;
}
case kPPC_LoadDecompressTaggedSigned: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
break;
}
case kPPC_LoadDecompressTaggedPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
case kPPC_LoadDecompressAnyTagged: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment