Commit 1a5adf89 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[x64] Remove "fake conditions" which are rarely used

Those conditions are rarely used and not properly supported everywhere.
If needed, we should duplicate methods instead, or pass a
{base::Optional<Condition>}.

This is a follow-up to https://crrev.com/c/3629129.

R=tebbi@chromium.org
CC=sroettger@chromium.org

Bug: v8:12425
Change-Id: Ia67c3d4f575b0f7dd0ae125971959cf68f2fefc7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3629553Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80395}
parent 31d78380
......@@ -1254,7 +1254,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label loop_header;
Label loop_check;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ j(always, &loop_check, Label::kNear);
__ jmp(&loop_check, Label::kNear);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ Push(kInterpreterAccumulatorRegister);
......
......@@ -1068,15 +1068,8 @@ void Assembler::cdq() {
}
void Assembler::cmovq(Condition cc, Register dst, Register src) {
if (cc == always) {
movq(dst, src);
return;
} else if (cc == never) {
return;
}
// No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture.
DCHECK_GE(cc, 0); // Use mov for unconditional moves.
DCHECK_LE(0, cc); // Check for standard and degenerate 'no_condition'.
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
......@@ -1086,13 +1079,8 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
}
void Assembler::cmovq(Condition cc, Register dst, Operand src) {
if (cc == always) {
movq(dst, src);
return;
} else if (cc == never) {
return;
}
DCHECK_GE(cc, 0);
DCHECK_LE(0, cc); // Check for standard and degenerate 'no_condition'.
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
......@@ -1102,13 +1090,8 @@ void Assembler::cmovq(Condition cc, Register dst, Operand src) {
}
void Assembler::cmovl(Condition cc, Register dst, Register src) {
if (cc == always) {
movl(dst, src);
return;
} else if (cc == never) {
return;
}
DCHECK_GE(cc, 0);
DCHECK_LE(0, cc); // Check for standard and degenerate 'no_condition'.
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
......@@ -1118,13 +1101,8 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
}
void Assembler::cmovl(Condition cc, Register dst, Operand src) {
if (cc == always) {
movl(dst, src);
return;
} else if (cc == never) {
return;
}
DCHECK_GE(cc, 0);
DCHECK_LE(0, cc); // Check for standard and degenerate 'no_condition'.
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
......@@ -1362,12 +1340,6 @@ void Assembler::int3() {
}
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
if (cc == always) {
jmp(L, distance);
return;
} else if (cc == never) {
return;
}
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
if (L->is_bound()) {
......@@ -1447,12 +1419,6 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
}
void Assembler::j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode) {
if (cc == always) {
jmp(target, rmode);
return;
} else if (cc == never) {
return;
}
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
// 0000 1111 1000 tttn #32-bit disp.
......@@ -2150,10 +2116,6 @@ void Assembler::ud2() {
}
void Assembler::setcc(Condition cc, Register reg) {
if (cc > last_condition) {
movb(reg, Immediate(cc == always ? 1 : 0));
return;
}
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
if (!reg.is_byte_register()) {
......
......@@ -83,10 +83,6 @@ enum Condition {
less_equal = 14,
greater = 15,
// Fake conditions that are handled by the
// opcodes using them.
always = 16,
never = 17,
// aliases
carry = below,
not_carry = above_equal,
......@@ -94,7 +90,6 @@ enum Condition {
not_zero = not_equal,
sign = negative,
not_sign = positive,
last_condition = greater
};
// Returns the equivalent of !cc.
......
......@@ -1817,6 +1817,20 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
jmp(kScratchRegister);
}
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(
options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code_object)));
if (options().inline_offheap_trampolines) {
Builtin builtin = Builtin::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
TailCallBuiltin(builtin);
return;
}
}
jmp(code_object, rmode);
}
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc) {
DCHECK_IMPLIES(
......@@ -1826,10 +1840,7 @@ void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Builtin builtin = Builtin::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
Label skip;
if (cc != always) {
if (cc == never) return;
j(NegateCondition(cc), &skip, Label::kNear);
}
j(NegateCondition(cc), &skip, Label::kNear);
TailCallBuiltin(builtin);
bind(&skip);
return;
......
......@@ -416,8 +416,8 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc = always);
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode);
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode, Condition cc);
void BailoutIfDeoptimized(Register scratch);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
......
......@@ -261,6 +261,19 @@ void RegisterEagerDeopt(MaglevCodeGenState* code_gen_state,
}
}
void EmitEagerDeopt(MaglevCodeGenState* code_gen_state,
EagerDeoptInfo* deopt_info) {
RegisterEagerDeopt(code_gen_state, deopt_info);
__ RecordComment("-- Jump to eager deopt");
__ jmp(&deopt_info->deopt_entry_label);
}
template <typename NodeT>
void EmitEagerDeopt(MaglevCodeGenState* code_gen_state, NodeT* node) {
STATIC_ASSERT(NodeT::kProperties.can_eager_deopt());
EmitEagerDeopt(code_gen_state, node->eager_deopt_info());
}
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
EagerDeoptInfo* deopt_info) {
RegisterEagerDeopt(code_gen_state, deopt_info);
......@@ -1148,7 +1161,7 @@ void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
EmitEagerDeoptIf(always, code_gen_state, this);
EmitEagerDeopt(code_gen_state, this);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment