Commit 8d9eb6d3 authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[heap][cleanup] Add Code overload for GenerationalWriteBarrier

... to ensure that it'll not be triggered for Code objects which
are known to never be in new space.

This removes the need for having custom implementation of setters with
Code values - existing [CONDITIONAL_]WRITE_BARRIER macros will work
just fine.

Bug: v8:11879, v8:11880
Change-Id: I7ed70e51f9459040086dd4c67e61b11617dbdc24
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2964812Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75166}
parent 4e957771
......@@ -23,6 +23,8 @@ namespace internal {
// Defined in heap.cc.
V8_EXPORT_PRIVATE bool Heap_PageFlagsAreConsistent(HeapObject object);
V8_EXPORT_PRIVATE bool Heap_ValueMightRequireGenerationalWriteBarrier(
HeapObject value);
V8_EXPORT_PRIVATE void Heap_GenerationalBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
......@@ -134,12 +136,17 @@ inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
GenerationalBarrier(object, slot, HeapObject::cast(value));
}
inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
Code value) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
DCHECK(!Heap_ValueMightRequireGenerationalWriteBarrier(value));
}
inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
HeapObject value) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
DCHECK(!HasWeakHeapObjectTag(*slot));
heap_internals::GenerationalBarrierInternal(object, slot.address(),
HeapObject::cast(value));
heap_internals::GenerationalBarrierInternal(object, slot.address(), value);
}
inline void GenerationalEphemeronKeyBarrier(EphemeronHashTable table,
......
......@@ -34,6 +34,7 @@ void WriteBarrierForCode(Code host);
// Generational write barrier.
void GenerationalBarrier(HeapObject object, ObjectSlot slot, Object value);
void GenerationalBarrier(HeapObject object, ObjectSlot slot, Code value);
void GenerationalBarrier(HeapObject object, ObjectSlot slot, HeapObject value);
void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
MaybeObject value);
......
......@@ -130,6 +130,14 @@ bool Heap_PageFlagsAreConsistent(HeapObject object) {
return Heap::PageFlagsAreConsistent(object);
}
bool Heap_ValueMightRequireGenerationalWriteBarrier(HeapObject value) {
if (!value.IsCode()) return true;
// Code objects are never in new space and thus don't require generational
// write barrier.
DCHECK(!ObjectInYoungGeneration(value));
return false;
}
void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
HeapObject value) {
Heap::GenerationalBarrierSlow(object, slot, value);
......
......@@ -141,18 +141,7 @@ AbstractCode JSFunction::abstract_code(IsolateT* isolate) {
int JSFunction::length() { return shared().length(); }
Code JSFunction::code() const {
return Code::cast(RELAXED_READ_FIELD(*this, kCodeOffset));
}
void JSFunction::set_code(Code value) {
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrier::Marking(*this, RawField(kCodeOffset), value);
#endif
}
ACCESSORS_RELAXED(JSFunction, code, Code, kCodeOffset)
RELEASE_ACQUIRE_ACCESSORS(JSFunction, code, Code, kCodeOffset)
// TODO(ishell): Why relaxed read but release store?
......
......@@ -84,8 +84,7 @@ class JSFunction : public JSFunctionOrBoundFunction {
// optimized code object, or when reading from the background thread.
// Storing a builtin doesn't require release semantics because these objects
// are fully initialized.
inline Code code() const;
inline void set_code(Code code);
DECL_ACCESSORS(code, Code)
DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
// Get the abstract code associated with the function, which will either be
......
......@@ -205,6 +205,30 @@
TorqueGeneratedClass::set_##torque_name(value); \
}
#define ACCESSORS_RELAXED_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type holder::name() const { \
PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
return holder::name(cage_base); \
} \
type holder::name(PtrComprCageBase cage_base) const { \
type value = TaggedField<type, offset>::Relaxed_Load(cage_base, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(type value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
TaggedField<type, offset>::Relaxed_Store(*this, value); \
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
#define ACCESSORS_RELAXED_CHECKED(holder, name, type, offset, condition) \
ACCESSORS_RELAXED_CHECKED2(holder, name, type, offset, condition, condition)
#define ACCESSORS_RELAXED(holder, name, type, offset) \
ACCESSORS_RELAXED_CHECKED(holder, name, type, offset, true)
// Similar to ACCESSORS_RELAXED above but with respective relaxed tags.
#define RELAXED_ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type holder::name(RelaxedLoadTag tag) const { \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment