Commit c96db961 authored by Jake Hughes's avatar Jake Hughes Committed by Commit Bot

Add flag for enabling unconditional write barrier

Whether or not a store requires a write barrier depends on several
invariants within V8. Some flags can break these invariants. In
particular, it's not possible to use enable_single_generation with
incremental marking because marking barriers are omitted in places where
it is assumed an object will be allocated in the young generation.

This CL introduces a new flag, enable_unconditional_write_barriers,
which allows us to specify that full write barriers should always
happens. The main purpose of this is to support single generation GC
with incremental marking, but it can also aid as a debugging tool to
check for missed write barriers.

Bug: v8:10614
Change-Id: I3ab640436bcefc118c9c5c34765421cb9ea4896f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2270546Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Jake Hughes <jakehughes@google.com>
Cr-Commit-Position: refs/heads/master@{#68623}
parent b9c3d26b
......@@ -214,6 +214,10 @@ declare_args() {
# heap has single generation.
v8_disable_write_barriers = false
# Ensure that write barriers are always used.
# Useful for debugging purposes.
v8_enable_unconditional_write_barriers = false
# Redirect allocation in young generation so that there will be
# only one single generation.
v8_enable_single_generation = ""
......@@ -319,6 +323,9 @@ assert(
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64",
"CppGC caged heap requires 64bit platforms")
......@@ -326,6 +333,12 @@ assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
"Young generation in CppGC requires caged heap")
if (v8_enable_single_generation == true) {
assert(
v8_enable_unconditional_write_barriers || v8_disable_write_barriers,
"Requires unconditional write barriers or none (which disables incremental marking)")
}
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
......
......@@ -608,6 +608,11 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
DCHECK(CanBeTaggedPointer(rep));
......
......@@ -760,6 +760,11 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
// TODO(arm64): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
......
......@@ -482,6 +482,10 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
DCHECK(CanBeTaggedPointer(rep));
......
......@@ -410,6 +410,10 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
......
......@@ -483,6 +483,10 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
......
......@@ -278,6 +278,11 @@ void InstructionSelector::VisitStore(Node* node) {
rep = store_rep.representation();
}
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
DCHECK(CanBeTaggedOrCompressedPointer(rep));
......
......@@ -819,6 +819,11 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
VisitGeneralStore(this, node, rep, write_barrier_kind);
}
......
......@@ -408,6 +408,11 @@ void InstructionSelector::VisitStore(Node* node) {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
write_barrier_kind = kFullWriteBarrier;
}
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation()));
......
......@@ -350,6 +350,16 @@ DEFINE_BOOL_READONLY(disable_write_barriers, V8_DISABLE_WRITE_BARRIERS_BOOL,
// Disable incremental marking barriers
DEFINE_NEG_IMPLICATION(disable_write_barriers, incremental_marking)
#ifdef V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS
#define V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL true
#else
#define V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL false
#endif
DEFINE_BOOL_READONLY(enable_unconditional_write_barriers,
V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL,
"always use full write barriers")
#ifdef V8_ENABLE_SINGLE_GENERATION
#define V8_GENERATION_BOOL true
#else
......
......@@ -1543,11 +1543,16 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
Heap::CopyBlock(raw_clone.address(), source->address(), object_size);
Handle<JSObject> clone(JSObject::cast(raw_clone), isolate());
if (FLAG_enable_unconditional_write_barriers) {
// By default, we shouldn't need to update the write barrier here, as the
// clone will be allocated in new space.
const ObjectSlot start(raw_clone.address());
const ObjectSlot end(raw_clone.address() + object_size);
isolate()->heap()->WriteBarrierForRange(raw_clone, start, end);
}
if (!site.is_null()) {
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
Object(raw_clone.ptr() + object_size));
......
......@@ -311,6 +311,9 @@
#ifdef V8_DISABLE_WRITE_BARRIERS
#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value)
#elif V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS
#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
WRITE_BARRIER(object, offset, value)
#else
#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
do { \
......@@ -323,6 +326,9 @@
#ifdef V8_DISABLE_WRITE_BARRIERS
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode)
#elif V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
WRITE_BARRIER(object, offset, value)
#else
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
do { \
......@@ -339,6 +345,9 @@
#ifdef V8_DISABLE_WRITE_BARRIERS
#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode)
#elif V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS
#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
WRITE_BARRIER(object, offset, value)
#else
#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
do { \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment