Commit 7095683a authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[osr] Load FeedbackVector::kFlagsOffset by load_halfword

The size of flag is now 16 bits.

Bug: v8:12161
Change-Id: I5db5e05171281f27cce739c7b76e1d4b9ebf20b9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3602236Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Yu Liu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#80165}
parent 6c6fc948
......@@ -1034,8 +1034,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ ldr(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ tst(
optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......
......@@ -1216,8 +1216,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ Ldr(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ Ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ TestAndBranchIfAnySet(
optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask,
......
......@@ -44,8 +44,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) {
Label fallthrough(this), may_have_optimized_code(this);
TNode<Uint32T> optimization_state =
LoadObjectField<Uint32T>(feedback_vector, FeedbackVector::kFlagsOffset);
TNode<Uint16T> optimization_state =
LoadObjectField<Uint16T>(feedback_vector, FeedbackVector::kFlagsOffset);
// Fall through if no optimization trigger or optimized code.
GotoIfNot(
......
......@@ -1001,14 +1001,15 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
// Store feedback_vector. We may need it if we need to load the optimize code
// slot entry.
__ movd(saved_feedback_vector, feedback_vector);
__ mov(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ mov_w(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if there is optimized code or a tiering state that needes to be
// processed.
__ test(optimization_state,
Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
__ test_w(
optimization_state,
Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
__ j(not_zero, has_optimized_code_or_state);
}
......
......@@ -1010,8 +1010,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
// TODO(liuyu): Remove CHECK
CHECK_NE(t2, optimization_state);
CHECK_NE(t2, feedback_vector);
__ Ld_w(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ Ld_hu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......@@ -1224,6 +1224,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld_d(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline(
masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
......@@ -1247,18 +1248,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there
// is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Ld_w(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
// Check the tiering state.
Label has_optimized_code_or_state;
__ andi(t0, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
__ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
Register optimization_state = a4;
LoadTieringStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
......
......@@ -1008,8 +1008,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
Register scratch = t6;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......@@ -1241,18 +1241,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there
// is optimized code or an tiering state, call that instead.
Register optimization_state = t0;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
// Check the tiering state.
Label has_optimized_code_or_state;
__ andi(t1, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
__ Branch(&has_optimized_code_or_state, ne, t1, Operand(zero_reg));
Register optimization_state = t0;
LoadTieringStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
......
......@@ -1017,8 +1017,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
Register scratch = t2;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......@@ -1249,18 +1249,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there
// is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
// Check the tiering state.
Label has_optimized_code_or_state;
__ andi(t0, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
__ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
Register optimization_state = a4;
LoadTieringStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
......
......@@ -1074,8 +1074,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
DCHECK(!AreAliased(optimization_state, feedback_vector));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......@@ -1313,18 +1313,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
Label::Distance::kNear);
// Read off the optimization state in the feedback vector, and if there
// is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
// Check the tiering state.
Label has_optimized_code_or_state;
__ And(scratch, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
__ Branch(&has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
Register optimization_state = a4;
LoadTieringStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
......
......@@ -1111,9 +1111,9 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
__ movl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ testl(
__ movzxwl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ testw(
optimization_state,
Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......
......@@ -1257,7 +1257,7 @@ FieldAccess AccessBuilder::ForFeedbackVectorInvocationCount() {
FieldAccess AccessBuilder::ForFeedbackVectorFlags() {
FieldAccess access = {
kTaggedBase, FeedbackVector::kFlagsOffset, Handle<Name>(),
MaybeHandle<Map>(), TypeCache::Get()->kUint32, MachineType::Uint32(),
MaybeHandle<Map>(), TypeCache::Get()->kUint16, MachineType::Uint16(),
kNoWriteBarrier};
return access;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment