Commit c5d41ae6 authored by Camillo Bruni's avatar Camillo Bruni Committed by V8 LUCI CQ

[sparkplug][arm][arm64[ia32] Callee-saved registers for RecordWrite

Migrate the remaining architectures to the new callee save RecordWrite
approach.

Bug: v8:11420
Change-Id: I9da56cbb5bf8c6ca4bcc7c0e2a1233e2f5ef587c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2944844
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75075}
parent 86952023
...@@ -349,6 +349,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, ...@@ -349,6 +349,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
DCHECK(!AreAliased(target, value));
__ str(value, FieldMemOperand(target, offset)); __ str(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved, __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore); SaveFPRegsMode::kIgnore);
......
...@@ -713,8 +713,6 @@ void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() { ...@@ -713,8 +713,6 @@ void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
} }
void BaselineCompiler::VisitStaContextSlot() { void BaselineCompiler::VisitStaContextSlot() {
// TODO(cbruni): enable on all platforms
#if V8_TARGET_ARCH_X64
Register value = WriteBarrierDescriptor::ValueRegister(); Register value = WriteBarrierDescriptor::ValueRegister();
Register context = WriteBarrierDescriptor::ObjectRegister(); Register context = WriteBarrierDescriptor::ObjectRegister();
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister)); DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
...@@ -724,37 +722,17 @@ void BaselineCompiler::VisitStaContextSlot() { ...@@ -724,37 +722,17 @@ void BaselineCompiler::VisitStaContextSlot() {
for (; depth > 0; --depth) { for (; depth > 0; --depth) {
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset); __ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
} }
#else
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
LoadRegister(context, 0);
int depth = Uint(2);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
Register value = scratch_scope.AcquireScratch();
__ Move(value, kInterpreterAccumulatorRegister);
#endif // V8_TARGET_ARCH_X64
__ StoreTaggedFieldWithWriteBarrier( __ StoreTaggedFieldWithWriteBarrier(
context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)), context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)),
value); value);
} }
void BaselineCompiler::VisitStaCurrentContextSlot() { void BaselineCompiler::VisitStaCurrentContextSlot() {
// TODO(cbruni): enable on all platforms
#if V8_TARGET_ARCH_X64
Register value = WriteBarrierDescriptor::ValueRegister(); Register value = WriteBarrierDescriptor::ValueRegister();
Register context = WriteBarrierDescriptor::ObjectRegister(); Register context = WriteBarrierDescriptor::ObjectRegister();
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister)); DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
__ Move(value, kInterpreterAccumulatorRegister); __ Move(value, kInterpreterAccumulatorRegister);
__ LoadContext(context); __ LoadContext(context);
#else
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
__ LoadContext(context);
Register value = scratch_scope.AcquireScratch();
__ Move(value, kInterpreterAccumulatorRegister);
#endif // V8_TARGET_ARCH_X64
__ StoreTaggedFieldWithWriteBarrier( __ StoreTaggedFieldWithWriteBarrier(
context, Context::OffsetOfElementAt(Index(0)), value); context, Context::OffsetOfElementAt(Index(0)), value);
} }
...@@ -879,8 +857,6 @@ void BaselineCompiler::VisitLdaModuleVariable() { ...@@ -879,8 +857,6 @@ void BaselineCompiler::VisitLdaModuleVariable() {
} }
void BaselineCompiler::VisitStaModuleVariable() { void BaselineCompiler::VisitStaModuleVariable() {
// TODO(cbruni): enable on all platforms
#if V8_TARGET_ARCH_X64
int cell_index = Int(0); int cell_index = Int(0);
if (V8_UNLIKELY(cell_index < 0)) { if (V8_UNLIKELY(cell_index < 0)) {
// Not supported (probably never). // Not supported (probably never).
...@@ -906,33 +882,6 @@ void BaselineCompiler::VisitStaModuleVariable() { ...@@ -906,33 +882,6 @@ void BaselineCompiler::VisitStaModuleVariable() {
cell_index -= 1; cell_index -= 1;
__ LoadFixedArrayElement(scratch, scratch, cell_index); __ LoadFixedArrayElement(scratch, scratch, cell_index);
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value); __ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value);
#else // V8_TARGET_ARCH_X64
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ LoadContext(scratch);
int depth = Uint(1);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
}
__ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
int cell_index = Int(0);
if (cell_index > 0) {
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
__ LoadFixedArrayElement(scratch, scratch, cell_index);
SaveAccumulatorScope save_accumulator(&basm_);
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset,
kInterpreterAccumulatorRegister);
} else {
// Not supported (probably never).
CallRuntime(Runtime::kAbort,
Smi::FromInt(static_cast<int>(
AbortReason::kUnsupportedModuleOperation)));
__ Trap();
}
#endif // V8_TARGET_ARCH_X64
} }
void BaselineCompiler::VisitStaNamedProperty() { void BaselineCompiler::VisitStaNamedProperty() {
......
...@@ -333,12 +333,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -333,12 +333,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r1 : the JSGeneratorObject to resume // -- r1 : the JSGeneratorObject to resume
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
__ AssertGeneratorObject(r1);
// Store input value into generator object. // Store input value into generator object.
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset)); __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore); kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Check that r1 is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(r1);
// Load suspended function and context. // Load suspended function and context.
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset)); __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
...@@ -793,6 +793,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { ...@@ -793,6 +793,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code, Register optimized_code,
Register closure) { Register closure) {
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure. // Store code entry in the closure.
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); __ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
...@@ -999,6 +1000,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -999,6 +1000,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static void LoadOptimizationStateAndJumpIfNeedsProcessing( static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector, MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) { Label* has_optimized_code_or_marker) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ RecordComment("[ Check optimization state"); __ RecordComment("[ Check optimization state");
__ ldr(optimization_state, __ ldr(optimization_state,
...@@ -1014,6 +1016,7 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing( ...@@ -1014,6 +1016,7 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state, MacroAssembler* masm, Register optimization_state,
Register feedback_vector) { Register feedback_vector) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ tst( __ tst(
......
...@@ -437,13 +437,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -437,13 +437,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x1 : the JSGeneratorObject to resume // -- x1 : the JSGeneratorObject to resume
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
__ AssertGeneratorObject(x1);
// Store input value into generator object. // Store input value into generator object.
__ StoreTaggedField( __ StoreTaggedField(
x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset)); x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore); kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Check that x1 is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(x1);
// Load suspended function and context. // Load suspended function and context.
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
...@@ -963,6 +964,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { ...@@ -963,6 +964,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code, Register optimized_code,
Register closure) { Register closure) {
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure. // Store code entry in the closure.
__ StoreTaggedField(optimized_code, __ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset)); FieldMemOperand(closure, JSFunction::kCodeOffset));
...@@ -1179,6 +1181,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -1179,6 +1181,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static void LoadOptimizationStateAndJumpIfNeedsProcessing( static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector, MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) { Label* has_optimized_code_or_marker) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ RecordComment("[ Check optimization state"); __ RecordComment("[ Check optimization state");
__ Ldr(optimization_state, __ Ldr(optimization_state,
...@@ -1194,6 +1197,7 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing( ...@@ -1194,6 +1197,7 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state, MacroAssembler* masm, Register optimization_state,
Register feedback_vector) { Register feedback_vector) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ TestAndBranchIfAllClear( __ TestAndBranchIfAllClear(
......
...@@ -592,12 +592,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -592,12 +592,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- edx : the JSGeneratorObject to resume // -- edx : the JSGeneratorObject to resume
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
__ AssertGeneratorObject(edx);
// Store input value into generator object. // Store input value into generator object.
__ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax); __ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
__ RecordWriteField(edx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx, Register object = WriteBarrierDescriptor::ObjectRegister();
__ mov(object, edx);
__ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, eax,
WriteBarrierDescriptor::SlotAddressRegister(),
SaveFPRegsMode::kIgnore); SaveFPRegsMode::kIgnore);
// Check that edx is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(edx);
// Load suspended function and context. // Load suspended function and context.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset)); __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
...@@ -731,12 +734,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -731,12 +734,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code, Register optimized_code,
Register closure, Register closure,
Register scratch1, Register value,
Register scratch2) { Register slot_address) {
// Store the optimized code in the closure. // Store the optimized code in the closure.
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code); __ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ mov(value, optimized_code); // Write barrier clobbers slot_address below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit, SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
SmiCheck::kOmit); SmiCheck::kOmit);
} }
...@@ -816,9 +819,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -816,9 +819,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Optimized code is good, get it into the closure and link the closure // Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code. // into the optimized functions list, then tail call the optimized code.
__ Push(optimized_code_entry);
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx, ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
eax); ecx);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Pop(optimized_code_entry);
__ LoadCodeObjectEntry(ecx, optimized_code_entry); __ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ Pop(edx); __ Pop(edx);
__ Pop(eax); __ Pop(eax);
...@@ -1255,9 +1260,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1255,9 +1260,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset)); __ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ push(edx); // Spill. __ push(edx); // Spill.
__ push(ecx);
__ Push(xmm0, eax); // Save the argument count (currently in xmm0). __ Push(xmm0, eax); // Save the argument count (currently in xmm0).
ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, edx); ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, ecx);
__ pop(eax); // Restore the argument count. __ pop(eax); // Restore the argument count.
__ pop(ecx);
__ pop(edx); __ pop(edx);
__ JumpCodeObject(ecx); __ JumpCodeObject(ecx);
......
...@@ -681,7 +681,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -681,7 +681,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- rdx : the JSGeneratorObject to resume // -- rdx : the JSGeneratorObject to resume
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
__ AssertGeneratorObject(rdx);
// Store input value into generator object. // Store input value into generator object.
__ StoreTaggedField( __ StoreTaggedField(
...@@ -691,6 +690,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -691,6 +690,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, rax, __ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, rax,
WriteBarrierDescriptor::SlotAddressRegister(), WriteBarrierDescriptor::SlotAddressRegister(),
SaveFPRegsMode::kIgnore); SaveFPRegsMode::kIgnore);
// Check that rdx is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(rdx);
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg; Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
......
...@@ -38,8 +38,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>:: ...@@ -38,8 +38,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r0); return RegisterArray(r1, r5, r4, r2, r0);
return RegisterArray(r0, r1, r2, r3, r4);
} }
// static // static
......
...@@ -672,6 +672,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -672,6 +672,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok; Label ok;
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
DCHECK(!AreAliased(object, value, scratch));
add(scratch, object, Operand(offset - kHeapObjectTag)); add(scratch, object, Operand(offset - kHeapObjectTag));
tst(scratch, Operand(kPointerSize - 1)); tst(scratch, Operand(kPointerSize - 1));
b(eq, &ok); b(eq, &ok);
...@@ -810,13 +811,12 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, ...@@ -810,13 +811,12 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
SmiCheck smi_check) { SmiCheck smi_check) {
DCHECK(!AreAliased(object, value)); DCHECK(!AreAliased(object, value));
if (FLAG_debug_code) { if (FLAG_debug_code) {
{
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
DCHECK(!AreAliased(object, value, scratch));
add(scratch, object, offset); add(scratch, object, offset);
ldr(scratch, MemOperand(scratch)); ldr(scratch, MemOperand(scratch));
cmp(scratch, value); cmp(scratch, value);
}
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
} }
...@@ -843,12 +843,18 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, ...@@ -843,12 +843,18 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (lr_status == kLRHasNotBeenSaved) { if (lr_status == kLRHasNotBeenSaved) {
push(lr); push(lr);
} }
CallRecordWriteStubSaveRegisters(object, offset, remembered_set_action,
fp_mode); Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
DCHECK(!AreAliased(object, value, slot_address));
DCHECK(!offset.IsRegister());
add(slot_address, object, offset);
CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) { if (lr_status == kLRHasNotBeenSaved) {
pop(lr); pop(lr);
} }
if (FLAG_debug_code) Move(slot_address, Operand(kZapValue));
bind(&done); bind(&done);
} }
...@@ -2573,6 +2579,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc, ...@@ -2573,6 +2579,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Label* condition_met) { Label* condition_met) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
DCHECK(!AreAliased(object, scratch));
DCHECK(cc == eq || cc == ne); DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits); Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
......
...@@ -38,8 +38,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>:: ...@@ -38,8 +38,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == x0); return RegisterArray(x1, x5, x4, x2, x0, x3);
return RegisterArray(x0, x1, x2, x3, x4);
} }
// static // static
......
...@@ -2921,6 +2921,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -2921,6 +2921,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok; Label ok;
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
DCHECK(!AreAliased(object, value, scratch));
Add(scratch, object, offset - kHeapObjectTag); Add(scratch, object, offset - kHeapObjectTag);
Tst(scratch, kTaggedSize - 1); Tst(scratch, kTaggedSize - 1);
B(eq, &ok); B(eq, &ok);
...@@ -3059,7 +3060,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, ...@@ -3059,7 +3060,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (FLAG_debug_code) { if (FLAG_debug_code) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX(); Register temp = temps.AcquireX();
DCHECK(!AreAliased(object, value, temp));
Add(temp, object, offset); Add(temp, object, offset);
LoadTaggedPointerField(temp, MemOperand(temp)); LoadTaggedPointerField(temp, MemOperand(temp));
Cmp(temp, value); Cmp(temp, value);
...@@ -3090,11 +3091,16 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, ...@@ -3090,11 +3091,16 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (lr_status == kLRHasNotBeenSaved) { if (lr_status == kLRHasNotBeenSaved) {
Push<TurboAssembler::kSignLR>(padreg, lr); Push<TurboAssembler::kSignLR>(padreg, lr);
} }
CallRecordWriteStubSaveRegisters(object, offset, remembered_set_action, Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
fp_mode); DCHECK(!AreAliased(object, slot_address, value));
// TODO(cbruni): Turn offset into int.
DCHECK(offset.IsImmediate());
Add(slot_address, object, offset);
CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) { if (lr_status == kLRHasNotBeenSaved) {
Pop<TurboAssembler::kAuthLR>(lr, padreg); Pop<TurboAssembler::kAuthLR>(lr, padreg);
} }
if (FLAG_debug_code) Mov(slot_address, Operand(kZapValue));
Bind(&done); Bind(&done);
} }
......
...@@ -32,8 +32,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>:: ...@@ -32,8 +32,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(esi == kContextRegister); return RegisterArray(edi, ecx, edx, esi, kReturnRegister0);
return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
} }
// static // static
......
...@@ -363,7 +363,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ...@@ -363,7 +363,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
} }
void MacroAssembler::RecordWriteField(Register object, int offset, void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst, Register value, Register slot_address,
SaveFPRegsMode save_fp, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SmiCheck smi_check) { SmiCheck smi_check) {
...@@ -380,16 +380,16 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -380,16 +380,16 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kTaggedSize. // of the object, so so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize)); DCHECK(IsAligned(offset, kTaggedSize));
lea(dst, FieldOperand(object, offset)); lea(slot_address, FieldOperand(object, offset));
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label ok; Label ok;
test_b(dst, Immediate(kTaggedSize - 1)); test_b(slot_address, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear); j(zero, &ok, Label::kNear);
int3(); int3();
bind(&ok); bind(&ok);
} }
RecordWrite(object, dst, value, save_fp, remembered_set_action, RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
SmiCheck::kOmit); SmiCheck::kOmit);
bind(&done); bind(&done);
...@@ -398,7 +398,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -398,7 +398,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors. // turned on to provoke errors.
if (FLAG_debug_code) { if (FLAG_debug_code) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue))); mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(dst, Immediate(bit_cast<int32_t>(kZapValue))); mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
} }
} }
...@@ -498,11 +498,11 @@ void TurboAssembler::CallRecordWriteStub( ...@@ -498,11 +498,11 @@ void TurboAssembler::CallRecordWriteStub(
} }
} }
void MacroAssembler::RecordWrite(Register object, Register address, void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, SaveFPRegsMode fp_mode, Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SmiCheck smi_check) { SmiCheck smi_check) {
DCHECK(!AreAliased(object, value, address)); DCHECK(!AreAliased(object, value, slot_address));
AssertNotSmi(object); AssertNotSmi(object);
if ((remembered_set_action == RememberedSetAction::kOmit && if ((remembered_set_action == RememberedSetAction::kOmit &&
...@@ -513,7 +513,7 @@ void MacroAssembler::RecordWrite(Register object, Register address, ...@@ -513,7 +513,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label ok; Label ok;
cmp(value, Operand(address, 0)); cmp(value, Operand(slot_address, 0));
j(equal, &ok, Label::kNear); j(equal, &ok, Label::kNear);
int3(); int3();
bind(&ok); bind(&ok);
...@@ -536,16 +536,16 @@ void MacroAssembler::RecordWrite(Register object, Register address, ...@@ -536,16 +536,16 @@ void MacroAssembler::RecordWrite(Register object, Register address,
value, // Used as scratch. value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done, MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear); Label::kNear);
RecordComment("CheckPageFlag]");
CallRecordWriteStubSaveRegisters(object, address, remembered_set_action, CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
fp_mode);
bind(&done); bind(&done);
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (FLAG_debug_code) { if (FLAG_debug_code) {
mov(address, Immediate(bit_cast<int32_t>(kZapValue))); mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue))); mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
} }
} }
......
...@@ -132,8 +132,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::Verify( ...@@ -132,8 +132,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::Verify(
// static // static
void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) { void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) {
DCHECK(!AreAliased(ObjectRegister(), SlotAddressRegister(), ValueRegister())); DCHECK(!AreAliased(ObjectRegister(), SlotAddressRegister(), ValueRegister()));
// TODO(cbruni): enable on all platforms.
#if V8_TARGET_ARCH_X64
// The default parameters should not clobber vital registers in order to // The default parameters should not clobber vital registers in order to
// reduce code size: // reduce code size:
DCHECK(!AreAliased(ObjectRegister(), kContextRegister, DCHECK(!AreAliased(ObjectRegister(), kContextRegister,
...@@ -142,9 +140,9 @@ void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) { ...@@ -142,9 +140,9 @@ void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) {
kInterpreterAccumulatorRegister)); kInterpreterAccumulatorRegister));
DCHECK(!AreAliased(ValueRegister(), kContextRegister, DCHECK(!AreAliased(ValueRegister(), kContextRegister,
kInterpreterAccumulatorRegister)); kInterpreterAccumulatorRegister));
DCHECK(!AreAliased(SlotAddressRegister(), kJavaScriptCallNewTargetRegister));
// Coincidental: to make calling from various builtins easier. // Coincidental: to make calling from various builtins easier.
DCHECK_EQ(ObjectRegister(), kJSFunctionRegister); DCHECK_EQ(ObjectRegister(), kJSFunctionRegister);
#endif
// We need a certain set of registers by default: // We need a certain set of registers by default:
RegList allocatable_regs = data->allocatable_registers(); RegList allocatable_regs = data->allocatable_registers();
DCHECK(allocatable_regs | kContextRegister.bit()); DCHECK(allocatable_regs | kContextRegister.bit());
......
...@@ -1023,10 +1023,7 @@ class WriteBarrierDescriptor final ...@@ -1023,10 +1023,7 @@ class WriteBarrierDescriptor final
DECLARE_DESCRIPTOR(WriteBarrierDescriptor) DECLARE_DESCRIPTOR(WriteBarrierDescriptor)
static constexpr auto registers(); static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true; static constexpr bool kRestrictAllocatableRegisters = true;
#if V8_TARGET_ARCH_X64
// TODO(cbruni): Extend to all platforms.
static constexpr bool kCalleeSaveRegisters = true; static constexpr bool kCalleeSaveRegisters = true;
#endif
static constexpr inline Register ObjectRegister(); static constexpr inline Register ObjectRegister();
static constexpr inline Register SlotAddressRegister(); static constexpr inline Register SlotAddressRegister();
// A temporary register used in helpers. // A temporary register used in helpers.
......
...@@ -21,7 +21,6 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { ...@@ -21,7 +21,6 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == v0);
return RegisterArray(a0, a1, a2, a3, kReturnRegister0); return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
} }
......
...@@ -21,7 +21,6 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { ...@@ -21,7 +21,6 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == v0);
return RegisterArray(a0, a1, a2, a3, kReturnRegister0); return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
} }
......
...@@ -38,7 +38,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>:: ...@@ -38,7 +38,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r3);
return RegisterArray(r3, r4, r5, r6, r7); return RegisterArray(r3, r4, r5, r6, r7);
} }
......
...@@ -39,7 +39,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>:: ...@@ -39,7 +39,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == a0);
return RegisterArray(a0, a1, a2, a3); return RegisterArray(a0, a1, a2, a3);
} }
......
...@@ -38,7 +38,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>:: ...@@ -38,7 +38,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static // static
constexpr auto WriteBarrierDescriptor::registers() { constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r2);
return RegisterArray(r2, r3, r4, r5, r6); return RegisterArray(r2, r3, r4, r5, r6);
} }
......
...@@ -3251,8 +3251,6 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3251,8 +3251,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ PushCPURegList(saves_fp); __ PushCPURegList(saves_fp);
// Save registers. // Save registers.
DCHECK_IMPLIES(!saves.IsEmpty(),
saves.list() == CPURegList::GetCalleeSaved().list());
__ PushCPURegList<TurboAssembler::kSignLR>(saves); __ PushCPURegList<TurboAssembler::kSignLR>(saves);
if (returns != 0) { if (returns != 0) {
......
...@@ -4673,12 +4673,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { ...@@ -4673,12 +4673,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ PushReturnAddressFrom(scratch_reg); __ PushReturnAddressFrom(scratch_reg);
__ Ret(); __ Ret();
} else if (additional_pop_count->IsImmediate()) { } else if (additional_pop_count->IsImmediate()) {
Register scratch_reg = ecx;
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
int additional_count = g.ToConstant(additional_pop_count).ToInt32(); int additional_count = g.ToConstant(additional_pop_count).ToInt32();
size_t pop_size = (parameter_slots + additional_count) * kSystemPointerSize; size_t pop_size = (parameter_slots + additional_count) * kSystemPointerSize;
if (is_uint16(pop_size)) {
// Avoid the additional scratch register, it might clobber the
// CalleeSavedRegisters.
__ ret(static_cast<int>(pop_size));
} else {
Register scratch_reg = ecx;
DCHECK_EQ(0u,
call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max())); CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg); __ Ret(static_cast<int>(pop_size), scratch_reg);
}
} else { } else {
Register pop_reg = g.ToRegister(additional_pop_count); Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == ecx ? edx : ecx; Register scratch_reg = pop_reg == ecx ? edx : ecx;
......
...@@ -493,13 +493,8 @@ CallDescriptor* Linkage::GetStubCallDescriptor( ...@@ -493,13 +493,8 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
RegList allocatable_registers = descriptor.allocatable_registers(); RegList allocatable_registers = descriptor.allocatable_registers();
RegList callee_saved_registers = kNoCalleeSaved; RegList callee_saved_registers = kNoCalleeSaved;
if (descriptor.CalleeSaveRegisters()) { if (descriptor.CalleeSaveRegisters()) {
#if V8_TARGET_ARCH_X64
// TODO(cbruni): Extend to all architectures.
callee_saved_registers = allocatable_registers; callee_saved_registers = allocatable_registers;
DCHECK(callee_saved_registers); DCHECK(callee_saved_registers);
#else
UNREACHABLE();
#endif
} }
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type); LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return zone->New<CallDescriptor>( // -- return zone->New<CallDescriptor>( // --
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment