Commit 613c2402 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv][masm] Move tiering logic to macro-assembler

Port commit ca33c73e
Port commit 8b4272c2

Also [compiler] Remove delayed string constants
Port commit de04959f

Bug: v8:7700

Change-Id: I28e876231e580710354876a6841e15ca2b35e372
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3826517Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Commit-Queue: ji qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#82415}
parent 699642a6
This diff is collapsed.
......@@ -163,15 +163,8 @@ Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
result.is_heap_number_request_ = true;
result.value_.heap_number_request = HeapNumberRequest(value);
return result;
}
......@@ -185,21 +178,12 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
offset_ = unit * multiplier + offset_addend;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
void Assembler::AllocateAndInstallRequestedHeapNumbers(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_number_requests_.empty());
for (auto& request : heap_number_requests_) {
Handle<HeapObject> object =
isolate->factory()->NewHeapNumber<AllocationType::kOld>(
request.heap_number());
break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
object = str->AllocateStringConstant(isolate);
break;
}
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
set_target_value_at(pc, reinterpret_cast<uintptr_t>(object.location()));
}
......@@ -253,7 +237,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateAndInstallRequestedHeapObjects(isolate);
AllocateAndInstallRequestedHeapNumbers(isolate);
// Set up code descriptor.
// TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
......
......@@ -95,7 +95,6 @@ class Operand {
}
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Register.
V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
......@@ -104,23 +103,23 @@ class Operand {
V8_INLINE bool is_reg() const { return rm_.is_valid(); }
inline intptr_t immediate() const {
DCHECK(!is_reg());
DCHECK(!IsHeapObjectRequest());
DCHECK(!IsHeapNumberRequest());
return value_.immediate;
}
bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
HeapNumberRequest heap_number_request() const {
DCHECK(IsHeapNumberRequest());
return value_.heap_number_request;
}
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
bool IsHeapNumberRequest() const {
DCHECK_IMPLIES(is_heap_number_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_number_request_,
rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
return is_heap_number_request_;
}
Register rm() const { return rm_; }
......@@ -131,10 +130,10 @@ class Operand {
Register rm_;
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
HeapNumberRequest heap_number_request; // if is_heap_number_request_
intptr_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
bool is_heap_number_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
......@@ -801,7 +800,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
private:
ConstantPool constpool_;
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
void AllocateAndInstallRequestedHeapNumbers(Isolate* isolate);
int WriteCodeComments();
......
......@@ -91,6 +91,145 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
#define __ ACCESS_MASM(masm)
namespace {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch1, Register scratch2) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1,
&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadCodeObjectEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
if (FLAG_debug_code) {
GetObjectType(object, scratch, scratch);
Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
SmiCheck::kOmit);
}
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
{
FrameScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
// Push function as parameter to the runtime call.
SmiTag(kJavaScriptCallArgCountRegister);
Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
// Use the return value before restoring a0
AddWord(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
Jump(a2);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
And(scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
UseScratchRegisterScope temps(this);
temps.Include(t0, t1);
Label maybe_has_optimized_code;
// Check if optimized code is available.
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
And(scratch, optimization_state,
Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg),
Label::Distance::kNear);
}
GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
temps.Acquire());
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
LoadWord(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
......@@ -1946,11 +2085,6 @@ void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
li(dst, Operand(value), mode);
}
void TurboAssembler::li(Register dst, const StringConstantBase* string,
LiFlags mode) {
li(dst, Operand::EmbeddedStringConstant(string), mode);
}
static inline int InstrCountForLiLower32Bit(int64_t value) {
int64_t Hi20 = ((value + 0x800) >> 12);
int64_t Lo12 = value << 52 >> 52;
......@@ -2000,8 +2134,8 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
}
} else if (MustUseReg(j.rmode())) {
int64_t immediate;
if (j.IsHeapObjectRequest()) {
RequestHeapObject(j.heap_object_request());
if (j.IsHeapNumberRequest()) {
RequestHeapNumber(j.heap_number_request());
immediate = 0;
} else {
immediate = j.immediate();
......@@ -6183,6 +6317,6 @@ void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
push(receiver);
}
}
#undef __
} // namespace internal
} // namespace v8
......@@ -206,8 +206,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void li(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
LiFlags mode = OPTIMIZE_SIZE);
void LoadFromConstantsTable(Register destination, int constant_index) final;
void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
......@@ -1337,6 +1335,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Must preserve the result register.
void PopStackHandler();
// Tiering support.
void AssertFeedbackVector(Register object,
Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector);
// -------------------------------------------------------------------------
// Support functions.
......
......@@ -101,9 +101,6 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
break;
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates
}
......@@ -4501,9 +4498,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ li(dst, src.ToExternalReference());
break;
case Constant::kDelayedStringConstant:
__ li(dst, src.ToDelayedStringConstant());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment