Commit 0aef4bc7 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64] Port Improve gap resolver algorithm

Port a77dd89e

Bug: v8:5210, chromium:1269989, chromium:1313647
Change-Id: I11d223d93d98b22b95324efd2c5a8ebee7fb1e21
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3677327
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Commit-Queue: ji qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#80903}
parent f1c4fd39
......@@ -1358,6 +1358,12 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
}
struct MoveCycleState {
// Whether a move in the cycle needs the scratch or double scratch register.
bool pending_scratch_register_use = false;
bool pending_double_scratch_register_use = false;
};
#define ACCESS_MASM(masm) masm->
} // namespace internal
......
......@@ -4068,6 +4068,94 @@ void CodeGenerator::PrepareForDeoptimizationExits(
__ CheckTrampolinePoolQuick(total_size);
}
void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
auto rep = LocationOperand::cast(source)->representation();
if ((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use) ||
(!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) {
// The scratch register for this rep is available.
int scratch_reg_code =
!IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code();
AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
AssembleMove(source, &scratch);
} else {
// The scratch register is blocked by pending moves. Use the stack instead.
int new_slots = ElementSizeInPointers(rep);
RiscvOperandConverter g(this, nullptr);
if (source->IsRegister()) {
__ Push(g.ToRegister(source));
} else if (source->IsStackSlot() || source->IsFloatStackSlot() ||
source->IsDoubleStackSlot()) {
__ Ld(kScratchReg, g.ToMemOperand(source));
__ Push(kScratchReg);
} else {
// Bump the stack pointer and assemble the move.
int last_frame_slot_id =
frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
int sp_delta = frame_access_state_->sp_delta();
int temp_slot = last_frame_slot_id + sp_delta + new_slots;
__ Sub64(sp, sp, Operand(new_slots * kSystemPointerSize));
AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
AssembleMove(source, &temp);
}
frame_access_state()->IncreaseSPDelta(new_slots);
}
}
void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
MachineRepresentation rep) {
if ((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use) ||
(!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) {
int scratch_reg_code =
!IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code();
AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
AssembleMove(&scratch, dest);
} else {
RiscvOperandConverter g(this, nullptr);
int new_slots = ElementSizeInPointers(rep);
frame_access_state()->IncreaseSPDelta(-new_slots);
if (dest->IsRegister()) {
__ Pop(g.ToRegister(dest));
} else if (dest->IsStackSlot() || dest->IsFloatStackSlot() ||
dest->IsDoubleStackSlot()) {
__ Pop(kScratchReg);
__ Sd(kScratchReg, g.ToMemOperand(dest));
} else {
int last_frame_slot_id =
frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
int sp_delta = frame_access_state_->sp_delta();
int temp_slot = last_frame_slot_id + sp_delta + new_slots;
AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
AssembleMove(&temp, dest);
__ Add64(sp, sp, Operand(new_slots * kSystemPointerSize));
}
}
move_cycle_ = MoveCycleState();
}
void CodeGenerator::SetPendingMove(MoveOperands* move) {
MoveType::Type move_type =
MoveType::InferMove(&move->source(), &move->destination());
if (move_type == MoveType::kConstantToStack) {
RiscvOperandConverter g(this, nullptr);
Constant src = g.ToConstant(&move->source());
if (move->destination().IsStackSlot() &&
(RelocInfo::IsWasmReference(src.rmode()) ||
(src.type() != Constant::kInt32 && src.type() != Constant::kInt64))) {
move_cycle_.pending_scratch_register_use = true;
}
} else if (move_type == MoveType::kStackToStack) {
if (move->source().IsFPLocationOperand()) {
move_cycle_.pending_double_scratch_register_use = true;
} else {
move_cycle_.pending_scratch_register_use = true;
}
}
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
RiscvOperandConverter g(this, nullptr);
......
......@@ -2404,6 +2404,23 @@ void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_relaxed_trunc_f32x4_s");
}
void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_relaxed_trunc_f32x4_u");
}
void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_s_zero(
LiftoffRegister dst, LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_relaxed_trunc_f64x2_s_zero");
}
void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_u_zero(
LiftoffRegister dst, LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_relaxed_trunc_f64x2_u_zero");
}
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E64, m1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment