Commit fa0cb020 authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[mips][loong64][sparkplug] Remove BaselineData, use Code directly

Port: 787bec09

Change-Id: I40055181a3b14929559672845215ec560906ca8a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3144790
Auto-Submit: Liu yu <liuyu@loongson.cn>
Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#76688}
parent 991fc40e
...@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { ...@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm); Generate_JSBuiltinsConstructStubHelper(masm);
} }
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
Operand(static_cast<int>(CodeKind::BASELINE)));
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch. // the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
...@@ -309,7 +319,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, ...@@ -309,7 +319,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done; Label done;
__ GetObjectType(sfi_data, scratch1, scratch1); __ GetObjectType(sfi_data, scratch1, scratch1);
__ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE)); if (FLAG_debug_code) {
Label not_baseline;
__ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline);
__ bind(&not_baseline);
} else {
__ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
}
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld_d(sfi_data, __ Ld_d(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
...@@ -1401,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1401,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker); &has_optimized_code_or_marker);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Ld_d(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister, __ Move(a2, kInterpreterBytecodeArrayRegister);
BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure); ReplaceClosureCodeWithOptimizedCode(masm, a2, closure);
__ JumpCodeObject(a2); __ JumpCodeObject(a2);
...@@ -1791,7 +1808,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1791,7 +1808,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object. // Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset] // <deopt_data> = <code>[#deoptimization_data_offset]
__ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
...@@ -3513,7 +3531,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3513,7 +3531,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) { if (!is_osr) {
Label start_with_baseline; Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE)); __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code. // Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode Builtin builtin_id = next_bytecode
...@@ -3526,13 +3544,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3526,13 +3544,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline); __ bind(&start_with_baseline);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Assert(eq, AbortReason::kExpectedBaselineData, t2, __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
Operand(BASELINE_DATA_TYPE));
} }
// Load baseline code from baseline data. if (FLAG_debug_code) {
__ Ld_d(code_obj, AssertCodeIsBaseline(masm, code_obj, t2);
FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); }
// Replace BytecodeOffset with the feedback vector. // Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2; Register feedback_vector = a2;
......
...@@ -612,6 +612,16 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { ...@@ -612,6 +612,16 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
} }
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
Operand(static_cast<int>(CodeKind::BASELINE)));
}
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data, Register sfi_data,
Register scratch1, Register scratch1,
...@@ -620,7 +630,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, ...@@ -620,7 +630,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done; Label done;
__ GetObjectType(sfi_data, scratch1, scratch1); __ GetObjectType(sfi_data, scratch1, scratch1);
__ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE)); if (FLAG_debug_code) {
Label not_baseline;
__ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline);
__ bind(&not_baseline);
} else {
__ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
}
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ lw(sfi_data, __ lw(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
...@@ -1389,8 +1407,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1389,8 +1407,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker); &has_optimized_code_or_marker);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Lw(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister, __ Move(a2, kInterpreterBytecodeArrayRegister);
BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5); ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5);
__ JumpCodeObject(a2); __ JumpCodeObject(a2);
...@@ -1779,7 +1796,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1779,7 +1796,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
} }
// Load deoptimization data from the code object. // Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset] // <deopt_data> = <code>[#deoptimization_data_offset]
__ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
...@@ -3958,7 +3976,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3958,7 +3976,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) { if (!is_osr) {
Label start_with_baseline; Label start_with_baseline;
__ GetObjectType(code_obj, t6, t6); __ GetObjectType(code_obj, t6, t6);
__ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE)); __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code. // Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode Builtin builtin_id = next_bytecode
...@@ -3971,12 +3989,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3971,12 +3989,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline); __ bind(&start_with_baseline);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t6, t6); __ GetObjectType(code_obj, t6, t6);
__ Assert(eq, AbortReason::kExpectedBaselineData, t6, __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE));
Operand(BASELINE_DATA_TYPE));
} }
// Load baseline code from baseline data. if (FLAG_debug_code) {
__ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); AssertCodeIsBaseline(masm, code_obj, t2);
}
// Replace BytecodeOffset with the feedback vector. // Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2; Register feedback_vector = a2;
......
...@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { ...@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm); Generate_JSBuiltinsConstructStubHelper(masm);
} }
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
Operand(static_cast<int>(CodeKind::BASELINE)));
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch. // the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
...@@ -309,11 +319,18 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, ...@@ -309,11 +319,18 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done; Label done;
__ GetObjectType(sfi_data, scratch1, scratch1); __ GetObjectType(sfi_data, scratch1, scratch1);
__ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE)); if (FLAG_debug_code) {
Label not_baseline;
__ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline);
__ bind(&not_baseline);
} else {
__ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
}
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data, __ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done); __ bind(&done);
} }
...@@ -1402,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1402,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker); &has_optimized_code_or_marker);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister, __ Move(a2, kInterpreterBytecodeArrayRegister);
BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1); ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
__ JumpCodeObject(a2); __ JumpCodeObject(a2);
...@@ -1788,7 +1804,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1788,7 +1804,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
} }
// Load deoptimization data from the code object. // Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset] // <deopt_data> = <code>[#deoptimization_data_offset]
__ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
...@@ -3543,7 +3560,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3543,7 +3560,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) { if (!is_osr) {
Label start_with_baseline; Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE)); __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code. // Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode Builtin builtin_id = next_bytecode
...@@ -3556,12 +3573,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3556,12 +3573,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline); __ bind(&start_with_baseline);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Assert(eq, AbortReason::kExpectedBaselineData, t2, __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
Operand(BASELINE_DATA_TYPE));
} }
// Load baseline code from baseline data. if (FLAG_debug_code) {
__ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); AssertCodeIsBaseline(masm, code_obj, t2);
}
// Replace BytecodeOffset with the feedback vector. // Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2; Register feedback_vector = a2;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment