Commit 45fad45d authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

ppc: cleanup and refactor MinF64/MaxF64

Change-Id: I2b1adb84fb62b60e62229252dadbd4c9e4c8042e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3010322Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#75638}
parent 060c2cb1
......@@ -1811,6 +1811,85 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
}
void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, DoubleRegister scratch) {
Label check_zero, return_left, return_right, return_nan, done;
fcmpu(lhs, rhs);
bunordered(&return_nan);
beq(&check_zero);
ble(&return_left);
b(&return_right);
bind(&check_zero);
fcmpu(lhs, kDoubleRegZero);
/* left == right != 0. */
bne(&return_left);
/* At this point, both left and right are either 0 or -0. */
/* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */
/* being different registers is most efficiently expressed */
/* as -((-L) - R). */
fneg(scratch, lhs);
if (scratch == rhs) {
fadd(dst, scratch, rhs);
} else {
fsub(dst, scratch, rhs);
}
fneg(dst, dst);
b(&done);
bind(&return_nan);
/* If left or right are NaN, fadd propagates the appropriate one.*/
fadd(dst, lhs, rhs);
b(&done);
bind(&return_right);
if (rhs != dst) {
fmr(dst, rhs);
}
b(&done);
bind(&return_left);
if (lhs != dst) {
fmr(dst, lhs);
}
bind(&done);
}
void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, DoubleRegister scratch) {
Label check_zero, return_left, return_right, return_nan, done;
fcmpu(lhs, rhs);
bunordered(&return_nan);
beq(&check_zero);
bge(&return_left);
b(&return_right);
bind(&check_zero);
fcmpu(lhs, kDoubleRegZero);
/* left == right != 0. */
bne(&return_left);
/* At this point, both left and right are either 0 or -0. */
fadd(dst, lhs, rhs);
b(&done);
bind(&return_nan);
/* If left or right are NaN, fadd propagates the appropriate one.*/
fadd(dst, lhs, rhs);
b(&done);
bind(&return_right);
if (rhs != dst) {
fmr(dst, rhs);
}
b(&done);
bind(&return_left);
if (lhs != dst) {
fmr(dst, lhs);
}
bind(&done);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
......
......@@ -172,6 +172,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
}
void MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
DoubleRegister scratch = kScratchDoubleReg);
void MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
DoubleRegister scratch = kScratchDoubleReg);
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
......
......@@ -463,91 +463,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_FLOAT_MAX() \
do { \
DoubleRegister left_reg = i.InputDoubleRegister(0); \
DoubleRegister right_reg = i.InputDoubleRegister(1); \
DoubleRegister result_reg = i.OutputDoubleRegister(); \
Label check_zero, return_left, return_right, return_nan, done; \
__ fcmpu(left_reg, right_reg); \
__ bunordered(&return_nan); \
__ beq(&check_zero); \
__ bge(&return_left); \
__ b(&return_right); \
\
__ bind(&check_zero); \
__ fcmpu(left_reg, kDoubleRegZero); \
/* left == right != 0. */ \
__ bne(&return_left); \
/* At this point, both left and right are either 0 or -0. */ \
__ fadd(result_reg, left_reg, right_reg); \
__ b(&done); \
\
__ bind(&return_nan); \
/* If left or right are NaN, fadd propagates the appropriate one.*/ \
__ fadd(result_reg, left_reg, right_reg); \
__ b(&done); \
\
__ bind(&return_right); \
if (right_reg != result_reg) { \
__ fmr(result_reg, right_reg); \
} \
__ b(&done); \
\
__ bind(&return_left); \
if (left_reg != result_reg) { \
__ fmr(result_reg, left_reg); \
} \
__ bind(&done); \
} while (0)
#define ASSEMBLE_FLOAT_MIN() \
do { \
DoubleRegister left_reg = i.InputDoubleRegister(0); \
DoubleRegister right_reg = i.InputDoubleRegister(1); \
DoubleRegister result_reg = i.OutputDoubleRegister(); \
Label check_zero, return_left, return_right, return_nan, done; \
__ fcmpu(left_reg, right_reg); \
__ bunordered(&return_nan); \
__ beq(&check_zero); \
__ ble(&return_left); \
__ b(&return_right); \
\
__ bind(&check_zero); \
__ fcmpu(left_reg, kDoubleRegZero); \
/* left == right != 0. */ \
__ bne(&return_left); \
/* At this point, both left and right are either 0 or -0. */ \
/* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */ \
/* being different registers is most efficiently expressed */ \
/* as -((-L) - R). */ \
__ fneg(kScratchDoubleReg, left_reg); \
if (kScratchDoubleReg == right_reg) { \
__ fadd(result_reg, kScratchDoubleReg, right_reg); \
} else { \
__ fsub(result_reg, kScratchDoubleReg, right_reg); \
} \
__ fneg(result_reg, result_reg); \
__ b(&done); \
\
__ bind(&return_nan); \
/* If left or right are NaN, fadd propagates the appropriate one.*/ \
__ fadd(result_reg, left_reg, right_reg); \
__ b(&done); \
\
__ bind(&return_right); \
if (right_reg != result_reg) { \
__ fmr(result_reg, right_reg); \
} \
__ b(&done); \
\
__ bind(&return_left); \
if (left_reg != result_reg) { \
__ fmr(result_reg, left_reg); \
} \
__ bind(&done); \
} while (0)
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
......@@ -1693,10 +1608,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
break;
case kPPC_MaxDouble:
ASSEMBLE_FLOAT_MAX();
__ MaxF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), kScratchDoubleReg);
break;
case kPPC_MinDouble:
ASSEMBLE_FLOAT_MIN();
__ MinF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), kScratchDoubleReg);
break;
case kPPC_AbsDouble:
ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
......
......@@ -808,8 +808,6 @@ UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f32_div)
UNIMPLEMENTED_FP_BINOP(f32_min)
UNIMPLEMENTED_FP_BINOP(f32_max)
UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
......@@ -822,8 +820,6 @@ UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
UNIMPLEMENTED_FP_BINOP(f64_div)
UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
UNIMPLEMENTED_FP_BINOP(f64_copysign)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
......@@ -876,6 +872,14 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
#define BINOP_LIST(V) \
V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment