Commit 7095683a authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[osr] Load FeedbackVector::kFlagsOffset by load_halfword

The size of flag is now 16 bits.

Bug: v8:12161
Change-Id: I5db5e05171281f27cce739c7b76e1d4b9ebf20b9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3602236Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Yu Liu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#80165}
parent 6c6fc948
...@@ -1034,8 +1034,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1034,8 +1034,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) { Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
__ ldr(optimization_state, __ ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ tst( __ tst(
optimization_state, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......
...@@ -1216,8 +1216,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1216,8 +1216,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) { Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
__ Ldr(optimization_state, __ Ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ TestAndBranchIfAnySet( __ TestAndBranchIfAnySet(
optimization_state, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask, FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask,
......
...@@ -44,8 +44,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( ...@@ -44,8 +44,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) { TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) {
Label fallthrough(this), may_have_optimized_code(this); Label fallthrough(this), may_have_optimized_code(this);
TNode<Uint32T> optimization_state = TNode<Uint16T> optimization_state =
LoadObjectField<Uint32T>(feedback_vector, FeedbackVector::kFlagsOffset); LoadObjectField<Uint16T>(feedback_vector, FeedbackVector::kFlagsOffset);
// Fall through if no optimization trigger or optimized code. // Fall through if no optimization trigger or optimized code.
GotoIfNot( GotoIfNot(
......
...@@ -1001,14 +1001,15 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1001,14 +1001,15 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
// Store feedback_vector. We may need it if we need to load the optimize code // Store feedback_vector. We may need it if we need to load the optimize code
// slot entry. // slot entry.
__ movd(saved_feedback_vector, feedback_vector); __ movd(saved_feedback_vector, feedback_vector);
__ mov(optimization_state, __ mov_w(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if there is optimized code or a tiering state that needes to be // Check if there is optimized code or a tiering state that needes to be
// processed. // processed.
__ test(optimization_state, __ test_w(
Immediate( optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
__ j(not_zero, has_optimized_code_or_state); __ j(not_zero, has_optimized_code_or_state);
} }
......
...@@ -1010,8 +1010,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1010,8 +1010,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
// TODO(liuyu): Remove CHECK // TODO(liuyu): Remove CHECK
CHECK_NE(t2, optimization_state); CHECK_NE(t2, optimization_state);
CHECK_NE(t2, feedback_vector); CHECK_NE(t2, feedback_vector);
__ Ld_w(optimization_state, __ Ld_hu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And( __ And(
scratch, optimization_state, scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
...@@ -1224,6 +1224,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1224,6 +1224,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld_d( __ Ld_d(
kInterpreterBytecodeArrayRegister, kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline; Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline( GetSharedFunctionInfoBytecodeOrBaseline(
masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
...@@ -1247,18 +1248,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1247,18 +1248,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there // Check the tiering state.
// is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Ld_w(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = a4;
__ andi(t0, optimization_state, LoadTieringStateAndJumpIfNeedsProcessing(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask); masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
__ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
......
...@@ -1008,8 +1008,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1008,8 +1008,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) { Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
Register scratch = t6; Register scratch = t6;
__ Lw(optimization_state, __ lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And( __ And(
scratch, optimization_state, scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
...@@ -1241,18 +1241,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1241,18 +1241,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there // Check the tiering state.
// is optimized code or an tiering state, call that instead.
Register optimization_state = t0;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = t0;
__ andi(t1, optimization_state, LoadTieringStateAndJumpIfNeedsProcessing(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask); masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
__ Branch(&has_optimized_code_or_state, ne, t1, Operand(zero_reg));
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
......
...@@ -1017,8 +1017,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1017,8 +1017,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
Label* has_optimized_code_or_state) { Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
Register scratch = t2; Register scratch = t2;
__ Lw(optimization_state, __ Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And( __ And(
scratch, optimization_state, scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
...@@ -1249,18 +1249,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1249,18 +1249,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there // Check the tiering state.
// is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = a4;
__ andi(t0, optimization_state, LoadTieringStateAndJumpIfNeedsProcessing(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask); masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
__ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
......
...@@ -1074,8 +1074,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1074,8 +1074,8 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Lw(optimization_state, __ Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And( __ And(
scratch, optimization_state, scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
...@@ -1313,18 +1313,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1313,18 +1313,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE), __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
Label::Distance::kNear); Label::Distance::kNear);
// Read off the optimization state in the feedback vector, and if there // Check the tiering state.
// is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a tiering state.
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = a4;
__ And(scratch, optimization_state, LoadTieringStateAndJumpIfNeedsProcessing(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask); masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
__ Branch(&has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
......
...@@ -1111,9 +1111,9 @@ static void LoadTieringStateAndJumpIfNeedsProcessing( ...@@ -1111,9 +1111,9 @@ static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector, MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
__ movl(optimization_state, __ movzxwl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ testl( __ testw(
optimization_state, optimization_state,
Immediate( Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
......
...@@ -1257,7 +1257,7 @@ FieldAccess AccessBuilder::ForFeedbackVectorInvocationCount() { ...@@ -1257,7 +1257,7 @@ FieldAccess AccessBuilder::ForFeedbackVectorInvocationCount() {
FieldAccess AccessBuilder::ForFeedbackVectorFlags() { FieldAccess AccessBuilder::ForFeedbackVectorFlags() {
FieldAccess access = { FieldAccess access = {
kTaggedBase, FeedbackVector::kFlagsOffset, Handle<Name>(), kTaggedBase, FeedbackVector::kFlagsOffset, Handle<Name>(),
MaybeHandle<Map>(), TypeCache::Get()->kUint32, MachineType::Uint32(), MaybeHandle<Map>(), TypeCache::Get()->kUint16, MachineType::Uint16(),
kNoWriteBarrier}; kNoWriteBarrier};
return access; return access;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment