Commit dd6c9536 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

Revert "[csa] verify skipped write-barriers in MemoryOptimizer"

This reverts commit da7322c0.

Reason for revert: Breaking the pointer compression bots, e.g.:
https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20-%20pointer%20compression/3047

Original change's description:
> [csa] verify skipped write-barriers in MemoryOptimizer
> 
> With very few exceptions, this verifies all skipped write-barriers in
> CSA and Torque, showing that the MemoryOptimizer together with some
> type information on the stored value are enough to avoid unsafe skipped
> write-barriers.
> 
> Changes to CSA:
> SKIP_WRITE_BARRIER and Store*NoWriteBarrier are verified by the
> MemoryOptimizer by default.
> Type information about the stored values (TNode<Smi>) is exploited to
> safely skip write barriers for stored Smi values.
> In some cases, the code is re-structured to make it easier to consume
> for the MemoryOptimizer (manual branch and load elimination).
> 
> Changes to the MemoryOptimizer:
> Improve the MemoryOptimizer to remove write barriers:
> - When the store happens to a CSA-generated InnerAllocate, by ignoring
>   Bitcasts and additions.
> - When the stored value is the HeapConstant of an immortal immovable root.
> - When the stored value is a SmiConstant (recognized by BitcastToTaggedSigned).
> - Fast C-calls are treated as non-allocating.
> - Runtime calls can be white-listed as non-allocating.
> 
> Remaining missing cases:
> - C++-style iterator loops with inner pointers.
> - Inner allocates that are reloaded from a field where they were just stored
>   (for example an elements backing store). Load elimination would fix that.
> - Safe stored value types that cannot be expressed in CSA (e.g., Smi|Hole).
>   We could handle that in Torque.
> - Double-aligned allocations, which are not lowered in the MemoryOptimizer
>   but in CSA.
> 
> Drive-by change: Avoid Smi suffix for StoreFixedArrayElement since this
> can be handled by overload resolution (in Torque and C++).
> 
> R=​jarin@chromium.org
> TBR=mvstanton@chromium.org
> 
> Change-Id: I0af9b710673f350e0fe81c2e59f37da93c024b7c
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1571414
> Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#61016}

TBR=mvstanton@chromium.org,jarin@chromium.org,tebbi@chromium.org

Change-Id: I36877cd6d08761726ef8dce8a3e3f2ce3eebe6cf
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1585732Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61038}
parent 631c4d4f
......@@ -35,7 +35,7 @@ namespace array_reverse {
StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: Smi) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER);
StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
}
StoreElement<array::FastPackedObjectElements, Object>(
......
......@@ -66,12 +66,8 @@ namespace array_slice {
const newElement: Object = e != Hole ?
argumentsContext[UnsafeCast<Smi>(e)] :
unmappedElements.objects[current];
// It is safe to skip the write barrier here because resultElements was
// allocated together with result in a folded allocation.
// TODO(tebbi): The verification of this fails at the moment due to
// missing load elimination.
StoreFixedArrayElement(
resultElements, indexOut++, newElement, UNSAFE_SKIP_WRITE_BARRIER);
StoreFixedArrayElementSmi(
resultElements, indexOut++, newElement, SKIP_WRITE_BARRIER);
}
// Fill in the rest of the result that contains the unmapped parameters
......
......@@ -920,8 +920,6 @@ const INTPTR_PARAMETERS: constexpr ParameterMode
const SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
const UNSAFE_SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'UNSAFE_SKIP_WRITE_BARRIER';
extern class AsyncGeneratorRequest extends Struct {
next: AsyncGeneratorRequest | Undefined;
......@@ -2044,20 +2042,12 @@ extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, Smi): void;
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, HeapObject): void;
extern operator '.objects[]=' macro StoreFixedArrayElement(
extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object): void;
extern macro StoreFixedArrayElement(
extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
FixedArray, Smi, Smi, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
FixedArray, constexpr int31, Object, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
FixedArray, constexpr int31, Smi, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
FixedArray, intptr, Smi, constexpr WriteBarrierMode): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
FixedDoubleArray, intptr, float64): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
......
......@@ -1576,8 +1576,8 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
Node* const hash, Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize));
Node* const bucket_entry = UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
......@@ -1750,8 +1750,8 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize));
Node* const bucket_entry = UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
......
......@@ -35,7 +35,7 @@ TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<FixedArray>* elements_out) {
TNode<String> input) {
#ifdef DEBUG
TNode<Smi> max_length = SmiConstant(JSArray::kInitialMaxFastElementArray);
CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
......@@ -89,7 +89,6 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
FillFixedArrayWithValue(elements_kind, elements, IntPtrZero(), length_intptr,
RootIndex::kUndefinedValue);
if (elements_out) *elements_out = CAST(elements);
return CAST(result);
}
......@@ -178,9 +177,9 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<String> first =
CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
TNode<FixedArray> result_elements;
TNode<JSRegExpResult> result = AllocateRegExpResult(
context, num_results, start, string, &result_elements);
TNode<JSRegExpResult> result =
AllocateRegExpResult(context, num_results, start, string);
TNode<FixedArray> result_elements = CAST(LoadElements(result));
UnsafeStoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
......
......@@ -38,9 +38,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
// and input string.
TNode<JSRegExpResult> AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<FixedArray>* elements_out = nullptr);
TNode<JSRegExpResult> AllocateRegExpResult(TNode<Context> context,
TNode<Smi> length,
TNode<Smi> index,
TNode<String> input);
TNode<Object> FastLoadLastIndex(TNode<JSRegExp> regexp);
TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
......
......@@ -127,7 +127,7 @@ TNode<FixedTypedArrayBase> TypedArrayBuiltinsAssembler::AllocateOnHeapElements(
// pointer.
CSA_ASSERT(this, IsRegularHeapObjectSize(total_size));
TNode<HeapObject> elements;
TNode<Object> elements;
if (UnalignedLoadSupported(MachineRepresentation::kFloat64) &&
UnalignedStoreSupported(MachineRepresentation::kFloat64)) {
......@@ -136,13 +136,9 @@ TNode<FixedTypedArrayBase> TypedArrayBuiltinsAssembler::AllocateOnHeapElements(
elements = AllocateInNewSpace(total_size, kDoubleAlignment);
}
// These skipped write barriers are marked unsafe because the MemoryOptimizer
// currently doesn't handle double alignment, so it fails at verifying them.
UnsafeStoreObjectFieldNoWriteBarrier(elements,
FixedTypedArrayBase::kMapOffset, map);
UnsafeStoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kLengthOffset, length);
UnsafeStoreObjectFieldNoWriteBarrier(
StoreMapNoWriteBarrier(elements, map);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kBasePointerOffset, elements);
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kExternalPointerOffset,
......
......@@ -2738,20 +2738,11 @@ void CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value, MachineRepresentation rep) {
OptimizedStoreField(rep, UncheckedCast<HeapObject>(object), offset, value,
CanBeTaggedPointer(rep)
? WriteBarrierKind::kAssertNoWriteBarrier
: WriteBarrierKind::kNoWriteBarrier);
}
void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
TNode<HeapObject> object, int offset, TNode<Object> value) {
OptimizedStoreField(MachineRepresentation::kTagged, object, offset, value,
WriteBarrierKind::kNoWriteBarrier);
}
void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, SloppyTNode<IntPtrT> offset, Node* value,
MachineRepresentation rep) {
Node* object, Node* offset, Node* value, MachineRepresentation rep) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
......@@ -2773,7 +2764,7 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
OptimizedStoreField(MachineRepresentation::kTaggedPointer,
UncheckedCast<HeapObject>(object), HeapObject::kMapOffset,
map, WriteBarrierKind::kAssertNoWriteBarrier);
map, WriteBarrierKind::kNoWriteBarrier);
}
void CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
......@@ -2802,7 +2793,6 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UNSAFE_SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER ||
barrier_mode == UPDATE_EPHEMERON_KEY_WRITE_BARRIER);
DCHECK(IsAligned(additional_offset, kTaggedSize));
......@@ -2837,9 +2827,6 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
FixedArray::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
} else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
value);
} else if (barrier_mode == UPDATE_EPHEMERON_KEY_WRITE_BARRIER) {
StoreEphemeronKey(object, offset, value);
} else {
......@@ -2874,7 +2861,6 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
DCHECK(IsAligned(additional_offset, kTaggedSize));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UNSAFE_SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
......@@ -2886,9 +2872,6 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
FeedbackVector::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
} else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
value);
} else {
Store(object, offset, value);
}
......@@ -3824,8 +3807,7 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
BuildFastLoop(
start_address, end_address,
[this, value](Node* current) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
......@@ -3867,28 +3849,23 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TVARIABLE(JSArray, array);
TVARIABLE(FixedArrayBase, elements);
if (IsIntPtrOrSmiConstantZero(capacity, capacity_mode)) {
TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
array = AllocateJSArray(array_map, empty_array, length, allocation_site);
return {array.value(), empty_array};
}
Label out(this), empty(this), nonempty(this);
int capacity_int;
if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_int, capacity_mode)) {
if (capacity_int == 0) {
TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
array = AllocateJSArray(array_map, empty_array, length, allocation_site);
return {array.value(), empty_array};
} else {
Goto(&nonempty);
}
} else {
Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
&empty, &nonempty);
Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
&empty, &nonempty);
BIND(&empty);
{
TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
array = AllocateJSArray(array_map, empty_array, length, allocation_site);
elements = empty_array;
Goto(&out);
}
BIND(&empty);
{
TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
array = AllocateJSArray(array_map, empty_array, length, allocation_site);
elements = empty_array;
Goto(&out);
}
BIND(&nonempty);
......@@ -4560,13 +4537,13 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
CSA_SLOW_ASSERT(this, IsPropertyArray(array));
ElementsKind kind = PACKED_ELEMENTS;
Node* value = UndefinedConstant();
BuildFastFixedArrayForEach(
array, kind, from_node, to_node,
[this, value](Node* array, Node* offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
},
mode);
BuildFastFixedArrayForEach(array, kind, from_node, to_node,
[this, value](Node* array, Node* offset) {
StoreNoWriteBarrier(
MachineRepresentation::kTagged, array,
offset, value);
},
mode);
}
void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
......@@ -4983,8 +4960,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
to_offset, value);
} else {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged,
to_array_adjusted, to_offset, value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array_adjusted,
to_offset, value);
}
Goto(&next_iter);
......@@ -10333,9 +10310,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
TNode<Float64T> value_float64 = UncheckedCast<Float64T>(value);
StoreFixedDoubleArrayElement(CAST(elements), index, value_float64, mode);
} else {
WriteBarrierMode barrier_mode = IsSmiElementsKind(kind)
? UNSAFE_SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
WriteBarrierMode barrier_mode =
IsSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
StoreFixedArrayElement(CAST(elements), index, value, barrier_mode, 0, mode);
}
}
......
......@@ -1298,20 +1298,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
void UnsafeStoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
int offset, TNode<Object> value);
void StoreObjectFieldNoWriteBarrier(
Node* object, SloppyTNode<IntPtrT> offset, Node* value,
Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
template <class T = Object>
void StoreObjectFieldNoWriteBarrier(Node* object, SloppyTNode<IntPtrT> offset,
TNode<T> value) {
void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
TNode<IntPtrT> offset, TNode<T> value) {
StoreObjectFieldNoWriteBarrier(object, offset, value,
MachineRepresentationOf<T>::value);
}
template <class T = Object>
void StoreObjectFieldNoWriteBarrier(Node* object, int offset,
void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object, int offset,
TNode<T> value) {
StoreObjectFieldNoWriteBarrier(object, offset, value,
MachineRepresentationOf<T>::value);
......@@ -1339,20 +1337,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return StoreFixedArrayElement(object, index, value, barrier_mode,
CheckBounds::kDebugOnly);
}
void UnsafeStoreFixedArrayElement(
TNode<FixedArray> object, int index, TNode<Smi> value,
WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER) {
DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
return StoreFixedArrayElement(object, index, value,
UNSAFE_SKIP_WRITE_BARRIER,
CheckBounds::kDebugOnly);
}
void StoreFixedArrayElement(TNode<FixedArray> object, int index,
TNode<Smi> value,
CheckBounds check_bounds = CheckBounds::kAlways) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
UNSAFE_SKIP_WRITE_BARRIER, 0,
INTPTR_PARAMETERS, check_bounds);
SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS,
check_bounds);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
......@@ -1395,16 +1385,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
additional_offset, parameter_mode,
CheckBounds::kDebugOnly);
}
void UnsafeStoreFixedArrayElement(
TNode<FixedArray> array, Node* index, TNode<Smi> value,
WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS) {
DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
return StoreFixedArrayElement(array, index, value,
UNSAFE_SKIP_WRITE_BARRIER, additional_offset,
parameter_mode, CheckBounds::kDebugOnly);
}
void StorePropertyArrayElement(
TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
......@@ -1415,27 +1395,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
additional_offset, parameter_mode);
}
void StoreFixedArrayElement(
void StoreFixedArrayElementSmi(
TNode<FixedArray> array, TNode<Smi> index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
StoreFixedArrayElement(array, index, value, barrier_mode, 0,
SMI_PARAMETERS);
}
void StoreFixedArrayElement(
TNode<FixedArray> array, TNode<IntPtrT> index, TNode<Smi> value,
WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
int additional_offset = 0) {
DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
StoreFixedArrayElement(array, index, TNode<Object>{value},
UNSAFE_SKIP_WRITE_BARRIER, additional_offset);
void StoreFixedArrayElement(TNode<FixedArray> array, TNode<IntPtrT> index,
TNode<Smi> value) {
StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0);
}
void StoreFixedArrayElement(
TNode<FixedArray> array, TNode<Smi> index, TNode<Smi> value,
WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
int additional_offset = 0) {
DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
StoreFixedArrayElement(array, index, TNode<Object>{value},
UNSAFE_SKIP_WRITE_BARRIER, additional_offset,
void StoreFixedArrayElement(TNode<FixedArray> array, TNode<Smi> index,
TNode<Smi> value) {
StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0,
SMI_PARAMETERS);
}
......
......@@ -220,7 +220,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
CallDescriptor::Flags flags = CallDescriptor::kNoAllocate;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
if (set_initialize_root_flag) {
flags |= CallDescriptor::kInitializeRootRegister;
}
......
......@@ -1016,26 +1016,11 @@ Node* CodeAssembler::StoreEphemeronKey(Node* base, Node* offset, Node* value) {
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* value) {
return raw_assembler()->Store(
rep, base, value,
CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier);
return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* offset, Node* value) {
return raw_assembler()->Store(
rep, base, offset, value,
CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier);
}
Node* CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
Node* base, Node* value) {
return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
}
Node* CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
Node* base, Node* offset,
Node* value) {
return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
......@@ -1202,8 +1187,7 @@ TNode<Object> CodeAssembler::CallRuntimeWithCEntryImpl(
int argc = static_cast<int>(args.size());
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
Runtime::MayAllocate(function) ? CallDescriptor::kNoFlags
: CallDescriptor::kNoAllocate);
CallDescriptor::kNoFlags);
Node* ref = ExternalConstant(ExternalReference::Create(function));
Node* arity = Int32Constant(argc);
......
......@@ -962,11 +962,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
Node* UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* value);
Node* UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* offset, Node* value);
// Stores uncompressed tagged value to (most likely off JS heap) memory
// location without write barrier.
Node* StoreFullTaggedNoWriteBarrier(Node* base, Node* tagged_value);
......
......@@ -550,11 +550,6 @@ struct MachineOperatorGlobalCache {
Store##Type##NoWriteBarrier##Operator() \
: Store##Type##Operator(kNoWriteBarrier) {} \
}; \
struct Store##Type##AssertNoWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##AssertNoWriteBarrier##Operator() \
: Store##Type##Operator(kAssertNoWriteBarrier) {} \
}; \
struct Store##Type##MapWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##MapWriteBarrier##Operator() \
......@@ -595,8 +590,6 @@ struct MachineOperatorGlobalCache {
kNoWriteBarrier)) {} \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##AssertNoWriteBarrier##Operator \
kStore##Type##AssertNoWriteBarrier; \
Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \
......@@ -952,8 +945,6 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
switch (store_rep.write_barrier_kind()) { \
case kNoWriteBarrier: \
return &cache_.k##Store##kRep##NoWriteBarrier; \
case kAssertNoWriteBarrier: \
return &cache_.k##Store##kRep##AssertNoWriteBarrier; \
case kMapWriteBarrier: \
return &cache_.k##Store##kRep##MapWriteBarrier; \
case kPointerWriteBarrier: \
......
......@@ -11,7 +11,6 @@
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/interface-descriptors.h"
#include "src/roots-inl.h"
namespace v8 {
namespace internal {
......@@ -19,8 +18,7 @@ namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
const char* function_debug_name)
AllocationFolding allocation_folding)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
......@@ -28,8 +26,7 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
allocation_folding_(allocation_folding),
function_debug_name_(function_debug_name) {}
allocation_folding_(allocation_folding) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
......@@ -61,21 +58,7 @@ void MemoryOptimizer::AllocationGroup::Add(Node* node) {
}
bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
// Additions should stay within the same allocated object, so it's safe to
// ignore them.
while (node_ids_.find(node->id()) == node_ids_.end()) {
switch (node->opcode()) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
node = NodeProperties::GetValueInput(node, 0);
break;
default:
return false;
}
}
return true;
return node_ids_.find(node->id()) != node_ids_.end();
}
MemoryOptimizer::AllocationState::AllocationState()
......@@ -103,7 +86,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kEffectPhi:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
......@@ -112,10 +94,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
// TODO(tebbi): Store nodes might do a bump-pointer allocation.
// We should introduce a special bump-pointer store node to
// differentiate that.
case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
case IrOpcode::kTaggedPoisonOnSpeculation:
......@@ -158,17 +136,29 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kCallWithCallerSavedRegisters:
return !(CallDescriptorOf(node->op())->flags() &
CallDescriptor::kNoAllocate);
case IrOpcode::kStore:
// Store is not safe because it could be part of CSA's bump pointer
// allocation(?).
return true;
default:
break;
}
return true;
}
Node* SearchAllocatingNode(Node* start, Node* limit, Zone* temp_zone) {
bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
ZoneQueue<Node*> queue(temp_zone);
ZoneSet<Node*> visited(temp_zone);
visited.insert(limit);
queue.push(start);
visited.insert(loop_effect_phi);
// Start the effect chain walk from the loop back edges.
for (int i = 1; i < control->InputCount(); ++i) {
queue.push(loop_effect_phi->InputAt(i));
}
while (!queue.empty()) {
Node* const current = queue.front();
......@@ -176,40 +166,16 @@ Node* SearchAllocatingNode(Node* start, Node* limit, Zone* temp_zone) {
if (visited.find(current) == visited.end()) {
visited.insert(current);
if (CanAllocate(current)) {
return current;
}
if (CanAllocate(current)) return true;
for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
queue.push(NodeProperties::GetEffectInput(current, i));
}
}
}
return nullptr;
}
bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
// Start the effect chain walk from the loop back edges.
for (int i = 1; i < control->InputCount(); ++i) {
if (SearchAllocatingNode(loop_effect_phi->InputAt(i), loop_effect_phi,
temp_zone) != nullptr) {
return true;
}
}
return false;
}
Node* EffectPhiForPhi(Node* phi) {
Node* control = NodeProperties::GetControlInput(phi);
for (Node* use : control->uses()) {
if (use->opcode() == IrOpcode::kEffectPhi) {
return use;
}
}
return nullptr;
}
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
......@@ -534,9 +500,8 @@ void MemoryOptimizer::VisitStoreElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
WriteBarrierKind write_barrier_kind =
ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
......@@ -549,9 +514,8 @@ void MemoryOptimizer::VisitStoreField(Node* node,
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
WriteBarrierKind write_barrier_kind =
ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
......@@ -564,9 +528,8 @@ void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, representation.write_barrier_kind());
object, state, representation.write_barrier_kind());
if (write_barrier_kind != representation.write_barrier_kind()) {
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
......@@ -595,77 +558,13 @@ Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
return index;
}
namespace {
bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
switch (value->opcode()) {
case IrOpcode::kBitcastWordToTaggedSigned:
return false;
case IrOpcode::kHeapConstant: {
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
&root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
return false;
}
break;
}
default:
break;
}
return true;
}
void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
Zone* temp_zone) {
std::stringstream str;
str << "MemoryOptimizer could not remove write barrier for node #"
<< node->id() << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< node->id() << " to break in CSA code.\n";
Node* object_position = object;
if (object_position->opcode() == IrOpcode::kPhi) {
object_position = EffectPhiForPhi(object_position);
}
Node* allocating_node = nullptr;
if (object_position && object_position->op()->EffectOutputCount() > 0) {
allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
}
if (allocating_node) {
str << "\n There is a potentially allocating node in between:\n";
str << " " << *allocating_node << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< allocating_node->id() << " to break there.\n";
if (allocating_node->opcode() == IrOpcode::kCall) {
str << " If this is a never-allocating runtime call, you can add an "
"exception to Runtime::MayAllocate.\n";
}
} else {
str << "\n It seems the store happened to something different than a "
"direct "
"allocation:\n";
str << " " << *object << "\n";
str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
<< object->id() << " to break there.\n";
}
FATAL("%s", str.str().c_str());
}
} // namespace
WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
Node* node, Node* object, Node* value, AllocationState const* state,
Node* object, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
if (!ValueNeedsWriteBarrier(value, isolate())) {
write_barrier_kind = kNoWriteBarrier;
}
if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
}
return write_barrier_kind;
}
......
......@@ -35,8 +35,7 @@ class MemoryOptimizer final {
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
const char* function_debug_name);
AllocationFolding allocation_folding);
~MemoryOptimizer() = default;
void Optimize();
......@@ -124,8 +123,7 @@ class MemoryOptimizer final {
void VisitOtherEffect(Node*, AllocationState const*);
Node* ComputeIndex(ElementAccess const&, Node*);
WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
Node* value,
WriteBarrierKind ComputeWriteBarrierKind(Node* object,
AllocationState const* state,
WriteBarrierKind);
......@@ -155,7 +153,6 @@ class MemoryOptimizer final {
GraphAssembler graph_assembler_;
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
const char* function_debug_name_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
......
......@@ -1510,8 +1510,7 @@ struct MemoryOptimizationPhase {
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
data->debug_name());
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding);
optimizer.Optimize();
}
};
......
......@@ -735,7 +735,6 @@ enum class AccessMode { ATOMIC, NON_ATOMIC };
// Supported write barrier modes.
enum WriteBarrierKind : uint8_t {
kNoWriteBarrier,
kAssertNoWriteBarrier,
kMapWriteBarrier,
kPointerWriteBarrier,
kEphemeronKeyWriteBarrier,
......@@ -750,8 +749,6 @@ inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
case kAssertNoWriteBarrier:
return os << "AssertNoWriteBarrier";
case kMapWriteBarrier:
return os << "MapWriteBarrier";
case kPointerWriteBarrier:
......
......@@ -1852,8 +1852,7 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
MachineRepresentation::kFloat64);
} else if (representation.IsSmi()) {
TNode<Smi> value_smi = CAST(value);
StoreObjectFieldNoWriteBarrier(property_storage, offset, value_smi);
StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
} else {
StoreObjectField(property_storage, offset, value);
}
......
......@@ -352,8 +352,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, slow);
}
StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, elements,
offset, value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
BIND(&non_smi_value);
......
......@@ -186,15 +186,12 @@ namespace internal {
struct InliningPosition;
class PropertyDescriptorObject;
// UNSAFE_SKIP_WRITE_BARRIER skips the write barrier.
// SKIP_WRITE_BARRIER skips the write barrier and asserts that this is safe in
// the MemoryOptimizer
// SKIP_WRITE_BARRIER skips the write barrier.
// UPDATE_WEAK_WRITE_BARRIER skips the marking part of the write barrier and
// only performs the generational part.
// UPDATE_WRITE_BARRIER is doing the full barrier, marking and generational.
enum WriteBarrierMode {
SKIP_WRITE_BARRIER,
UNSAFE_SKIP_WRITE_BARRIER,
UPDATE_WEAK_WRITE_BARRIER,
UPDATE_EPHEMERON_KEY_WRITE_BARRIER,
UPDATE_WRITE_BARRIER
......
......@@ -177,16 +177,6 @@ bool Runtime::IsNonReturning(FunctionId id) {
}
}
bool Runtime::MayAllocate(FunctionId id) {
switch (id) {
case Runtime::kCompleteInobjectSlackTracking:
case Runtime::kCompleteInobjectSlackTrackingForMap:
return false;
default:
return true;
}
}
const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
int length) {
base::CallOnce(&initialize_function_name_map_once,
......
......@@ -687,10 +687,6 @@ class Runtime : public AllStatic {
// sentinel, always.
static bool IsNonReturning(FunctionId id);
// Check if a runtime function with the given {id} may trigger a heap
// allocation.
static bool MayAllocate(FunctionId id);
// Get the intrinsic function with the given name.
static const Function* FunctionForName(const unsigned char* name, int length);
......
......@@ -192,9 +192,9 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
//
// Finally, it is important that this function does not call `RecordWrite` which
// is why "setup" is in charge of all allocations and we are using
// UNSAFE_SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may
// clobber the top 64 bits of Simd128 registers. This is the case on x64, ia32
// and Arm64 for example.
// SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may clobber the
// top 64 bits of Simd128 registers. This is the case on x64, ia32 and Arm64 for
// example.
Handle<Code> BuildTeardownFunction(Isolate* isolate,
CallDescriptor* call_descriptor,
std::vector<AllocatedOperand> parameters) {
......@@ -206,8 +206,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
Node* param = __ Parameter(i + 2);
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
__ StoreFixedArrayElement(result_array, i, param,
UNSAFE_SKIP_WRITE_BARRIER);
__ StoreFixedArrayElement(result_array, i, param, SKIP_WRITE_BARRIER);
break;
// Box FP values into HeapNumbers.
case MachineRepresentation::kFloat32:
......@@ -230,7 +229,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
->I32x4ExtractLane(lane),
param));
__ StoreFixedArrayElement(vector, lane, lane_value,
UNSAFE_SKIP_WRITE_BARRIER);
SKIP_WRITE_BARRIER);
}
break;
}
......
......@@ -247,8 +247,7 @@ namespace array {
context: Context, sortState: SortState, index: Smi, value: Object): Smi {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
const value = UnsafeCast<Smi>(value);
StoreFixedArrayElement(elements, index, value, SKIP_WRITE_BARRIER);
StoreFixedArrayElementSmi(elements, index, value, SKIP_WRITE_BARRIER);
return kSuccess;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment