Commit a29de090 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: Reland^2: Remove SFI code field

Port 51ded9d3

Original Commit Message:

    This is a reland of d8f564ea

    Original change's description:
    > Reland: Remove SFI code field
    >
    > Remove the SharedFunctionInfo code field, inferring the code object
    > from the function_data field instead. In some cases, the function_data
    > field can now hold a Code object (e.g. some WASM cases).
    >
    > (Reland of https://chromium-review.googlesource.com/952452)
    >
    > TBR=mstarzinger@chromium.org
    >
    > Bug: chromium:783853
    > Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
    > Change-Id: I10ea5be7ceed1b51362a2fad9be7397624d69343
    > Reviewed-on: https://chromium-review.googlesource.com/970649
    > Commit-Queue: Leszek Swirski <leszeks@chromium.org>
    > Reviewed-by: Yang Guo <yangguo@chromium.org>
    > Reviewed-by: Jakob Gruber <jgruber@chromium.org>
    > Reviewed-by: Leszek Swirski <leszeks@chromium.org>
    > Cr-Commit-Position: refs/heads/master@{#52136}

R=leszeks@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I86c5a46a866830a2150ff9366be12a09f111240f
Reviewed-on: https://chromium-review.googlesource.com/976624Reviewed-by: 's avatarJoran Siu <joransiu@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#52165}
parent e46772ea
......@@ -1282,10 +1282,8 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
// Set the code slot inside the JSFunction to CompileLazy.
__ Move(r5, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ StoreP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
__ RecordWriteField(r4, JSFunction::kCodeOffset, r5, r7, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
......@@ -1293,6 +1291,68 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
Register scratch1) {
// Figure out the SFI's code object.
Label done;
Label check_is_bytecode_array;
Label check_is_code;
Label check_is_fixed_array;
Label check_is_pre_parsed_scope_data;
Label check_is_function_template_info;
Register data_type = scratch1;
// IsSmi: Is builtin
__ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
__ mov(scratch1,
Operand(ExternalReference::builtins_address(masm->isolate())));
__ SmiUntag(sfi_data, LeaveRC, kPointerSizeLog2);
__ LoadPX(sfi_data, MemOperand(scratch1, sfi_data));
__ b(&done);
// Get map for subsequent checks.
__ bind(&check_is_bytecode_array);
__ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
__ LoadHalfWord(data_type,
FieldMemOperand(data_type, Map::kInstanceTypeOffset), r0);
// IsBytecodeArray: Interpret bytecode
__ cmpi(data_type, Operand(BYTECODE_ARRAY_TYPE));
__ bne(&check_is_code);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
__ b(&done);
// IsCode: Run code
__ bind(&check_is_code);
__ cmpi(data_type, Operand(CODE_TYPE));
__ beq(&done);
// IsFixedArray: Instantiate using AsmWasmData,
__ bind(&check_is_fixed_array);
__ cmpi(data_type, Operand(FIXED_ARRAY_TYPE));
__ bne(&check_is_pre_parsed_scope_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
__ b(&done);
// IsPreParsedScopeData: Compile lazy
__ bind(&check_is_pre_parsed_scope_data);
__ cmpi(data_type, Operand(TUPLE2_TYPE));
__ bne(&check_is_function_template_info);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ b(&done);
// IsFunctionTemplateInfo: API call
__ bind(&check_is_function_template_info);
if (FLAG_debug_code) {
__ cmpi(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
__ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
}
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
__ bind(&done);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
......@@ -1316,13 +1376,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// We found no optimized code.
// We found no optimized code. Infer the code object needed for the SFI.
Register entry = r7;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(entry,
FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoCode(masm, entry, r8);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
// If code entry points to anything other than CompileLazy, install that.
__ mov(r8, Operand(masm->CodeObject()));
__ cmp(entry, r8);
__ beq(&gotta_call_runtime);
......@@ -1392,25 +1454,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
}
{
// If we've reached this spot, the target builtin has been deserialized and
// we simply need to copy it over. First to the shared function info.
// we simply need to copy it over to the target function.
Register target_builtin = scratch1;
Register shared = scratch0;
__ LoadP(shared,
FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
__ StoreP(target_builtin,
FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset), r0);
__ mr(r9, target_builtin); // Write barrier clobbers r9 below.
__ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r8,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// And second to the target function.
__ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset),
r0);
......
......@@ -1289,10 +1289,8 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
// Set the code slot inside the JSFunction to CompileLazy.
__ Move(r4, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ StoreP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ RecordWriteField(r3, JSFunction::kCodeOffset, r4, r6, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
......@@ -1300,6 +1298,68 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
Register scratch1) {
// Figure out the SFI's code object.
Label done;
Label check_is_bytecode_array;
Label check_is_code;
Label check_is_fixed_array;
Label check_is_pre_parsed_scope_data;
Label check_is_function_template_info;
Register data_type = scratch1;
// IsSmi: Is builtin
__ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
__ mov(scratch1,
Operand(ExternalReference::builtins_address(masm->isolate())));
__ SmiUntag(sfi_data, kPointerSizeLog2);
__ LoadP(sfi_data, MemOperand(scratch1, sfi_data));
__ b(&done);
// Get map for subsequent checks.
__ bind(&check_is_bytecode_array);
__ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
__ LoadHalfWordP(data_type,
FieldMemOperand(data_type, Map::kInstanceTypeOffset));
// IsBytecodeArray: Interpret bytecode
__ CmpP(data_type, Operand(BYTECODE_ARRAY_TYPE));
__ bne(&check_is_code);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
__ b(&done);
// IsCode: Run code
__ bind(&check_is_code);
__ CmpP(data_type, Operand(CODE_TYPE));
__ beq(&done);
// IsFixedArray: Instantiate using AsmWasmData,
__ bind(&check_is_fixed_array);
__ CmpP(data_type, Operand(FIXED_ARRAY_TYPE));
__ bne(&check_is_pre_parsed_scope_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
__ b(&done);
// IsPreParsedScopeData: Compile lazy
__ bind(&check_is_pre_parsed_scope_data);
__ CmpP(data_type, Operand(TUPLE2_TYPE));
__ bne(&check_is_function_template_info);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ b(&done);
// IsFunctionTemplateInfo: API call
__ bind(&check_is_function_template_info);
if (FLAG_debug_code) {
__ CmpP(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
__ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
}
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
__ bind(&done);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
......@@ -1323,13 +1383,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// We found no optimized code.
// We found no optimized code. Infer the code object needed for the SFI.
Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(entry,
FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoCode(masm, entry, r7);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
// If code entry points to anything other than CompileLazy, install that.
__ mov(r7, Operand(masm->CodeObject()));
__ CmpP(entry, r7);
__ beq(&gotta_call_runtime);
......@@ -1399,25 +1461,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
}
{
// If we've reached this spot, the target builtin has been deserialized and
// we simply need to copy it over. First to the shared function info.
// we simply need to copy it over to the target function.
Register target_builtin = scratch1;
Register shared = scratch0;
__ LoadP(shared,
FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
CHECK(r7 != target && r7 != scratch0 && r7 != scratch1);
CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
__ StoreP(target_builtin,
FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
__ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
__ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r8, r7,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// And second to the target function.
__ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
......
......@@ -450,7 +450,6 @@ class MemOperand BASE_EMBEDDED {
explicit MemOperand(Register ra, Register rb);
int32_t offset() const {
DCHECK(rb_ == no_reg);
return offset_;
}
......
......@@ -2391,11 +2391,12 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
// Load a "pointer" sized value from the memory location
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
DCHECK_EQ(mem.rb(), no_reg);
int offset = mem.offset();
if (!is_int16(offset)) {
/* cannot use d-form */
DCHECK(scratch != no_reg);
DCHECK_EQ(scratch, no_reg);
mov(scratch, Operand(offset));
LoadPX(dst, MemOperand(mem.ra(), scratch));
} else {
......
......@@ -505,10 +505,18 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
SmiUntag(reg, reg, rc, scale);
}
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
ShiftRightArithImm(dst, src, kSmiShift, rc);
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
if (scale > kSmiShift) {
ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
} else if (scale < kSmiShift) {
ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
} else {
// do nothing
}
}
// ---------------------------------------------------------------------------
// Bit testing/extraction
......
......@@ -989,10 +989,16 @@ class TurboAssembler : public Assembler {
// High bits must be identical to fit into an 32-bit integer
cgfr(value, value);
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
void SmiUntag(Register reg, int scale = 0) { SmiUntag(reg, reg, scale); }
void SmiUntag(Register dst, Register src) {
ShiftRightArithP(dst, src, Operand(kSmiShift));
void SmiUntag(Register dst, Register src, int scale = 0) {
if (scale > kSmiShift) {
ShiftLeftP(dst, src, Operand(scale - kSmiShift));
} else if (scale < kSmiShift) {
ShiftRightArithP(dst, src, Operand(kSmiShift - scale));
} else {
// do nothing
}
}
// Activation support.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment