Commit 9836cdb1 authored by Jaideep Bajwa's avatar Jaideep Bajwa Committed by Commit Bot

PPC/s390: Switch JSFunction::code to be a tagged value.

Port 4e207a42

Original Commit Message:

    This switches the "code entry" field on JSFunction to no longer be an
    inner pointer into a Code object (i.e. to the start of the instruction
    stream), but a properly tagged pointer instead.

    Motivation behind this is the ability to treat this field regularly as
    part of escape analysis in the optimizing compiler. Also simplifies the
    object visitation for JSFunction objects.

R=mstarzinger@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: Ifa5998551e041c8de647df7306dd549455936699
Reviewed-on: https://chromium-review.googlesource.com/588468Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46934}
parent 950e4f46
......@@ -799,7 +799,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ mr(r6, r4);
__ mr(r4, r7);
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
......@@ -942,16 +943,17 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ addi(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
__ mr(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Link the closure into the optimized function list.
// r7 : code entry
......@@ -1090,8 +1092,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ addi(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ mr(ip, optimized_code_entry);
__ Jump(optimized_code_entry);
......@@ -1269,9 +1273,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ LoadP(r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
__ StoreP(r7, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ mr(r9, r7); // Write barrier clobbers r9 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r8,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(r7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, r7, r8);
__ JumpToJSEntry(r7);
}
......@@ -1578,9 +1587,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ beq(&gotta_call_runtime);
// Install the SFI's code entry.
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ mr(r9, entry); // Write barrier clobbers ip below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r8,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
__ JumpToJSEntry(entry);
__ bind(&gotta_call_runtime);
......@@ -2683,7 +2695,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r3, r5);
__ blt(&too_few);
__ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
......
......@@ -791,7 +791,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
......@@ -941,16 +942,18 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ AddP(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
__ LoadRR(scratch1,
optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Link the closure into the optimized function list.
// r6 : code entry
......@@ -1088,8 +1091,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ AddP(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
......@@ -1264,9 +1269,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ LoadP(r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
__ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ LoadRR(r8, r6); // Write barrier clobbers r8 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, r8, r7,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, r6, r7);
__ JumpToJSEntry(r6);
}
......@@ -1572,9 +1580,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ beq(&gotta_call_runtime);
// Install the SFI's code entry.
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ LoadRR(r8, entry); // Write barrier clobbers r8 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, r8, r7,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r7);
__ JumpToJSEntry(entry);
__ bind(&gotta_call_runtime);
......@@ -2685,7 +2696,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CmpP(r2, r4);
__ blt(&too_few);
__ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
......
......@@ -1014,7 +1014,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
......@@ -2448,9 +2449,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
value = Double(src.ToFloat64());
}
#else
value = Double((src.type() == Constant::kFloat32)
? static_cast<double>(src.ToFloat32())
: src.ToFloat64());
value = src.type() == Constant::kFloat32
? Double(static_cast<double>(src.ToFloat32()))
: Double(src.ToFloat64());
#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
if (destination->IsFPStackSlot()) {
......
......@@ -1219,7 +1219,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CmpP(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......
......@@ -430,69 +430,6 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(r4));
DCHECK(code_entry.is(r7));
DCHECK(scratch.is(r8));
AssertNotSmi(js_function);
if (emit_debug_code()) {
addi(scratch, js_function, Operand(offset - kHeapObjectTag));
LoadP(ip, MemOperand(scratch));
cmp(ip, code_entry);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
const Register dst = scratch;
addi(dst, js_function, Operand(offset - kHeapObjectTag));
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
DCHECK(kJSCallerSaved & code_entry.bit());
mflr(r0);
MultiPush(kJSCallerSaved | r0.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
mr(r3, js_function);
mr(r4, dst);
mov(r5, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers (including js_function and code_entry).
MultiPop(kJSCallerSaved | r0.bit());
mtlr(r0);
bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
SaveFPRegsMode fp_mode,
......@@ -1418,7 +1355,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = ip;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
CallJSEntry(code);
} else {
......
......@@ -705,11 +705,6 @@ class MacroAssembler : public TurboAssembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
......
......@@ -421,67 +421,6 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(r3));
DCHECK(code_entry.is(r6));
DCHECK(scratch.is(r7));
AssertNotSmi(js_function);
if (emit_debug_code()) {
AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
LoadP(ip, MemOperand(scratch));
CmpP(ip, code_entry);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
const Register dst = scratch;
AddP(dst, js_function, Operand(offset - kHeapObjectTag));
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
// DCHECK(kJSCallerSaved & code_entry.bit());
MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
LoadRR(r2, js_function);
LoadRR(r3, dst);
mov(r4, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers (including js_function and code_entry).
MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
SaveFPRegsMode fp_mode,
......@@ -1380,7 +1319,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = ip;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
CallJSEntry(code);
} else {
......
......@@ -1164,8 +1164,7 @@ class MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
......@@ -1175,17 +1174,14 @@ class MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
// Frame restart support
void MaybeDropFrames();
......@@ -1674,11 +1670,6 @@ class MacroAssembler : public TurboAssembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
......@@ -1714,8 +1705,7 @@ class MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
bool* definitely_mismatches, InvokeFlag flag);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment