Commit afe19ad9 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Re-structure write barrier slow path

This CL re-structures the write barrier slow path in order to prepare
for adding the shared write barrier. Behavior remains the same in this
CL, only code structure changes a bit (e.g. the branch for when
marking is off, got moved up to the IsMarking() check).

Bug: v8:13018
Change-Id: I991f896abb88e0c85de3123fa67d8f47282f632d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3771840
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81836}
parent 489af94a
...@@ -239,70 +239,78 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { ...@@ -239,70 +239,78 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
} }
void WriteBarrier(SaveFPRegsMode fp_mode) { void WriteBarrier(SaveFPRegsMode fp_mode) {
Label incremental_wb(this), test_old_to_young_flags(this), Label marking_is_on(this), marking_is_off(this), next(this);
remembered_set_only(this), remembered_set_and_incremental_wb(this),
next(this);
auto slot =
UncheckedParameter<IntPtrT>(WriteBarrierDescriptor::kSlotAddress);
Branch(IsMarking(), &marking_is_on, &marking_is_off);
BIND(&marking_is_off);
// When incremental marking is not on, we skip cross generation pointer // When incremental marking is not on, we skip cross generation pointer
// checking here, because there are checks for // checking here, because there are checks for
// `kPointersFromHereAreInterestingMask` and // `kPointersFromHereAreInterestingMask` and
// `kPointersToHereAreInterestingMask` in // `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this // `src/compiler/<arch>/code-generator-<arch>.cc` before calling this
// stub, which serves as the cross generation checking. // stub, which serves as the cross generation checking.
auto slot = GenerationalBarrierSlow(slot, &next, fp_mode);
UncheckedParameter<IntPtrT>(WriteBarrierDescriptor::kSlotAddress);
Branch(IsMarking(), &test_old_to_young_flags, &remembered_set_only);
BIND(&test_old_to_young_flags); BIND(&marking_is_on);
{ WriteBarrierDuringMarking(slot, &next, fp_mode);
// TODO(ishell): do a new-space range check instead.
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
// TODO(albertnetymk): Try to cache the page flag for value and BIND(&next);
// object, instead of calling IsPageFlagSet each time. }
TNode<BoolT> value_is_young =
IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask);
GotoIfNot(value_is_young, &incremental_wb);
TNode<IntPtrT> object = BitcastTaggedToWord( void GenerationalBarrierSlow(TNode<IntPtrT> slot, Label* next,
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject)); SaveFPRegsMode fp_mode) {
TNode<BoolT> object_is_young = TNode<IntPtrT> object = BitcastTaggedToWord(
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask); UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
Branch(object_is_young, &incremental_wb, InsertIntoRememberedSet(object, slot, fp_mode);
&remembered_set_and_incremental_wb); Goto(next);
} }
BIND(&remembered_set_only); void WriteBarrierDuringMarking(TNode<IntPtrT> slot, Label* next,
{ SaveFPRegsMode fp_mode) {
TNode<IntPtrT> object = BitcastTaggedToWord( // When incremental marking is on, we need to perform generational and
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject)); // incremental marking write barrier.
InsertIntoRememberedSet(object, slot, fp_mode); Label incremental_barrier(this);
Goto(&next);
}
BIND(&remembered_set_and_incremental_wb); // During incremental marking we always reach this slow path, so we need to
{ // check whether this is a old-to-new reference before calling into the
TNode<IntPtrT> object = BitcastTaggedToWord( // generational barrier slow path.
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject)); GenerationalBarrier(slot, &incremental_barrier, fp_mode);
InsertIntoRememberedSet(object, slot, fp_mode);
Goto(&incremental_wb);
}
BIND(&incremental_wb); BIND(&incremental_barrier);
{ TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot)); IncrementalWriteBarrier(slot, value, fp_mode);
IncrementalWriteBarrier(slot, value, fp_mode); Goto(next);
Goto(&next); }
}
BIND(&next); void GenerationalBarrier(TNode<IntPtrT> slot, Label* next,
SaveFPRegsMode fp_mode) {
Label generational_barrier_slow(this);
IsGenerationalBarrierNeeded(slot, &generational_barrier_slow, next);
BIND(&generational_barrier_slow);
GenerationalBarrierSlow(slot, next, fp_mode);
} }
void IncrementalWriteBarrier(SaveFPRegsMode fp_mode) { void IsGenerationalBarrierNeeded(TNode<IntPtrT> slot, Label* true_label,
auto slot = Label* false_label) {
UncheckedParameter<IntPtrT>(WriteBarrierDescriptor::kSlotAddress); // TODO(ishell): do a new-space range check instead.
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot)); TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
IncrementalWriteBarrier(slot, value, fp_mode);
// TODO(albertnetymk): Try to cache the page flag for value and
// object, instead of calling IsPageFlagSet each time.
TNode<BoolT> value_is_young =
IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask);
GotoIfNot(value_is_young, false_label);
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
Branch(object_is_young, false_label, true_label);
} }
void IncrementalWriteBarrier(TNode<IntPtrT> slot, TNode<IntPtrT> value, void IncrementalWriteBarrier(TNode<IntPtrT> slot, TNode<IntPtrT> value,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment