Commit 54932703 authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

PPC/s390: [masm][cleanup] Refactor call related assembler options

Port 00746406

Original Commit Message:

    ... which affect how builtin calls are generated.

    This CL replaces the following boolean options
     - builtin_calls_as_table_load,
     - inline_offheap_trampolines,
     - short_builtin_calls,
     - use_pc_relative_calls_and_jumps,

    with an enum BuiltinCallJumpMode and a boolean option
    use_pc_relative_calls_and_jumps_for_mksnapshot.

BUG=
LOG=N

Change-Id: I9ad83d27fc5b295ca4827e9608d4be10f7b10551
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3831638
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Reviewed-by: 's avatarMilad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#82534}
parent e24efa1d
...@@ -216,34 +216,6 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, ...@@ -216,34 +216,6 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
__ JumpIfNotSmi(value, target); __ JumpIfNotSmi(value, target);
} }
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
__ CallBuiltin(builtin, al);
} else {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltin(builtin, temp);
__ Call(temp);
}
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("tail call", builtin));
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
__ TailCallBuiltin(builtin);
} else {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltin(builtin, temp);
__ Jump(temp);
}
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -14,6 +14,11 @@ namespace baseline { ...@@ -14,6 +14,11 @@ namespace baseline {
#define __ basm_. #define __ basm_.
// A builtin call/jump mode that is used then short builtin calls feature is
// not enabled.
constexpr BuiltinCallJumpMode kFallbackBuiltinCallJumpModeForBaseline =
BuiltinCallJumpMode::kIndirect;
void BaselineCompiler::Prologue() { void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_); ASM_CODE_COMMENT(&masm_);
__ masm()->EnterFrame(StackFrame::BASELINE); __ masm()->EnterFrame(StackFrame::BASELINE);
......
...@@ -216,34 +216,6 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, ...@@ -216,34 +216,6 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
__ JumpIfNotSmi(value, target); __ JumpIfNotSmi(value, target);
} }
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
__ CallBuiltin(builtin);
} else {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltin(builtin, temp);
__ Call(temp);
}
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("tail call", builtin));
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
__ TailCallBuiltin(builtin);
} else {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltin(builtin, temp);
__ Jump(temp);
}
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -14,6 +14,11 @@ namespace baseline { ...@@ -14,6 +14,11 @@ namespace baseline {
#define __ basm_. #define __ basm_.
// A builtin call/jump mode that is used then short builtin calls feature is
// not enabled.
constexpr BuiltinCallJumpMode kFallbackBuiltinCallJumpModeForBaseline =
BuiltinCallJumpMode::kIndirect;
void BaselineCompiler::Prologue() { void BaselineCompiler::Prologue() {
// Enter the frame here, since CallBuiltin will override lr. // Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE); __ masm()->EnterFrame(StackFrame::BASELINE);
......
...@@ -166,26 +166,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -166,26 +166,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code)); Builtins::IsIsolateIndependentBuiltin(*code));
Builtin builtin = Builtin::kNoBuiltinId; Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin = if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
isolate()->builtins()->IsBuiltinHandle(code, &builtin); TailCallBuiltin(builtin, cond, cr);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
LoadU64(ip, EntryFromBuiltinAsOperand(code->builtin_id()), r0);
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
Label skip;
RecordCommentForOffHeapTrampoline(builtin);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
return; return;
} }
int32_t target_index = AddCodeTarget(code); int32_t target_index = AddCodeTarget(code);
...@@ -246,23 +228,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -246,23 +228,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code, DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code)); Builtins::IsIsolateIndependentBuiltin(*code));
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
Builtins::IsIsolateIndependentBuiltin(*code));
Builtin builtin = Builtin::kNoBuiltinId; Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin = if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if ((target_is_builtin && options().builtin_calls_as_table_load) ||
(root_array_available_ && options().isolate_independent_code)) {
Label skip;
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
CallBuiltin(builtin, cond); CallBuiltin(builtin, cond);
return; return;
} }
...@@ -275,26 +243,84 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { ...@@ -275,26 +243,84 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not // Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls. // preserve scratch registers across calls.
if (options().builtin_calls_as_table_load) { switch (options().builtin_call_jump_mode) {
LoadEntryFromBuiltin(builtin, ip); case BuiltinCallJumpMode::kAbsolute: {
} else { Label skip;
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
break;
}
case BuiltinCallJumpMode::kPCRelative:
UNREACHABLE();
case BuiltinCallJumpMode::kIndirect: {
Label skip;
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
break;
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
Call(static_cast<Address>(code_target_index), RelocInfo::CODE_TARGET,
cond);
} else {
Label skip;
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
}
break;
}
} }
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
} }
void TurboAssembler::TailCallBuiltin(Builtin builtin) { void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond,
CRegister cr) {
ASM_CODE_COMMENT_STRING(this, ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin)); CommentForOffHeapTrampoline("tail call", builtin));
if (options().builtin_calls_as_table_load) { // Use ip directly instead of using UseScratchRegisterScope, as we do not
LoadEntryFromBuiltin(builtin, ip); // preserve scratch registers across calls.
} else { switch (options().builtin_call_jump_mode) {
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); case BuiltinCallJumpMode::kAbsolute: {
Label skip;
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
break;
}
case BuiltinCallJumpMode::kPCRelative:
UNREACHABLE();
case BuiltinCallJumpMode::kIndirect: {
Label skip;
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
break;
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
Jump(static_cast<intptr_t>(code_target_index), RelocInfo::CODE_TARGET,
cond, cr);
} else {
Label skip;
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
}
break;
}
} }
Jump(ip);
} }
void TurboAssembler::Drop(int count) { void TurboAssembler::Drop(int count) {
...@@ -779,17 +805,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, ...@@ -779,17 +805,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
#endif #endif
} else { } else {
auto builtin_index = Builtins::GetRecordWriteStub(fp_mode); auto builtin_index = Builtins::GetRecordWriteStub(fp_mode);
if (options().inline_offheap_trampolines || CallBuiltin(builtin_index, al);
options().builtin_calls_as_table_load) {
RecordCommentForOffHeapTrampoline(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
CallBuiltin(builtin_index, al);
} else {
Handle<Code> code_target =
isolate()->builtins()->code_handle(builtin_index);
Call(code_target, RelocInfo::CODE_TARGET);
}
} }
} }
......
...@@ -50,8 +50,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -50,8 +50,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using TurboAssemblerBase::TurboAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond); void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin); void TailCallBuiltin(Builtin builtin, Condition cond = al,
CRegister cr = cr7);
void Popcnt32(Register dst, Register src); void Popcnt32(Register dst, Register src);
void Popcnt64(Register dst, Register src); void Popcnt64(Register dst, Register src);
// Converts the integer (untagged smi) in |src| to a double, storing // Converts the integer (untagged smi) in |src| to a double, storing
......
...@@ -393,20 +393,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -393,20 +393,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code)); Builtins::IsIsolateIndependentBuiltin(*code));
Builtin builtin = Builtin::kNoBuiltinId; Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin = if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
isolate()->builtins()->IsBuiltinHandle(code, &builtin); TailCallBuiltin(builtin, cond);
if ((options().inline_offheap_trampolines ||
options().builtin_calls_as_table_load) &&
target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin);
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
b(cond, ip);
return; return;
} }
DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(!options().builtin_calls_as_table_load);
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond); jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
} }
...@@ -454,43 +445,69 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -454,43 +445,69 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code, DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code)); Builtins::IsIsolateIndependentBuiltin(*code));
Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (target_is_builtin && (options().inline_offheap_trampolines || Builtin builtin = Builtin::kNoBuiltinId;
options().builtin_calls_as_table_load)) { if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
// Inline the trampoline.
CallBuiltin(builtin); CallBuiltin(builtin);
return; return;
} }
DCHECK(code->IsExecutable()); DCHECK(code->IsExecutable());
DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(!options().builtin_calls_as_table_load);
call(code, rmode); call(code, rmode);
} }
void TurboAssembler::CallBuiltin(Builtin builtin) { void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not // Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls. // preserve scratch registers across calls.
if (options().builtin_calls_as_table_load) { switch (options().builtin_call_jump_mode) {
LoadEntryFromBuiltin(builtin, ip); case BuiltinCallJumpMode::kAbsolute: {
} else { mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); Call(ip);
break;
}
case BuiltinCallJumpMode::kPCRelative:
UNREACHABLE();
case BuiltinCallJumpMode::kIndirect:
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
Call(ip);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
} }
Call(ip);
} }
void TurboAssembler::TailCallBuiltin(Builtin builtin) { void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin)); CommentForOffHeapTrampoline("tail call", builtin));
if (options().builtin_calls_as_table_load) { // Use ip directly instead of using UseScratchRegisterScope, as we do not
LoadEntryFromBuiltin(builtin, ip); // preserve scratch registers across calls.
} else { switch (options().builtin_call_jump_mode) {
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); case BuiltinCallJumpMode::kAbsolute: {
mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(ip, cond);
break;
}
case BuiltinCallJumpMode::kPCRelative:
UNREACHABLE();
case BuiltinCallJumpMode::kIndirect:
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
Jump(ip, cond);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
} else {
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
Jump(ip, cond);
}
break;
}
} }
b(ip);
} }
void TurboAssembler::Drop(int count) { void TurboAssembler::Drop(int count) {
...@@ -1024,15 +1041,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, ...@@ -1024,15 +1041,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
#endif #endif
} else { } else {
auto builtin_index = Builtins::GetRecordWriteStub(fp_mode); auto builtin_index = Builtins::GetRecordWriteStub(fp_mode);
if (options().inline_offheap_trampolines || CallBuiltin(builtin_index);
options().builtin_calls_as_table_load) {
RecordCommentForOffHeapTrampoline(builtin_index);
CallBuiltin(builtin_index);
} else {
Handle<Code> code_target =
isolate()->builtins()->code_handle(builtin_index);
Call(code_target, RelocInfo::CODE_TARGET);
}
} }
} }
......
...@@ -45,8 +45,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -45,8 +45,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using TurboAssemblerBase::TurboAssemblerBase;
void CallBuiltin(Builtin builtin); void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin); void TailCallBuiltin(Builtin builtin, Condition cond = al);
void AtomicCmpExchangeHelper(Register addr, Register output, void AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value, Register new_value, Register old_value, Register new_value,
int start, int end, int shift_amount, int offset, int start, int end, int shift_amount, int offset,
......
...@@ -381,8 +381,9 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) { ...@@ -381,8 +381,9 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_metadata_start()), std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_metadata_start()),
code.raw_metadata_size()); code.raw_metadata_size());
} }
CHECK_IMPLIES(kMaxPCRelativeCodeRangeInMB, CHECK_IMPLIES(
raw_code_size <= kMaxPCRelativeCodeRangeInMB * MB); kMaxPCRelativeCodeRangeInMB,
static_cast<size_t>(raw_code_size) <= kMaxPCRelativeCodeRangeInMB * MB);
// .. and the variable-size code section. // .. and the variable-size code section.
uint8_t* const raw_code_start = blob_code + RawCodeOffset(); uint8_t* const raw_code_start = blob_code + RawCodeOffset();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment