Commit 09faa661 authored by Wenyu Zhao's avatar Wenyu Zhao Committed by V8 LUCI CQ

Reland "[csa] Remove InnerAllocate and replace with non-folded allocations"

The initial CL is suspected to break the --predictable CI.
But looks like the CI is still crashing and also flaky after the
revert. So reland it again.

This is a reland of 59d58d72

Original change's description:
> [csa] Remove InnerAllocate and replace with non-folded allocations
>
> This CL removes all uses of InnerAllocate (except memento allocations)
> and replace with non-folded allocations. The change is based on the
> fact that 1. Those InnerAllocates are not guarded by --allocation-folding
> flag. 2. Hopefully the MemoryOptimizer can handle the folding and no
> performance regression will happen.
>
> Two special versions of InnerAllocate is still kept:
> * One for memento allocations (renamed to InnerAllocateMemento).
> * One for AllocateUninitializedJSArrayWithElements (renamed to InnerAllocateElements).
>
> Change-Id: Ie77da6b2fba31b048241f7b7d927065305a01c27
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2873767
> Commit-Queue: Wenyu Zhao <wenyu.zhao@anu.edu.au>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74899}

Change-Id: I540c3a6b6e3f7c70c048f8ad1e5f702287fb086b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2946667
Commit-Queue: Wenyu Zhao <wenyu.zhao@anu.edu.au>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75015}
parent fd9d6fef
......@@ -103,19 +103,14 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
// Allocate space for the promise, the async function object.
TNode<IntPtrT> size = IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
JSAsyncFunctionObject::kHeaderSize);
TNode<HeapObject> base = AllocateInNewSpace(size);
// Initialize the promise.
// Allocate and initialize the promise.
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> promise_function =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
TNode<Map> promise_map = LoadObjectField<Map>(
promise_function, JSFunction::kPrototypeOrInitialMapOffset);
TNode<JSPromise> promise = UncheckedCast<JSPromise>(
InnerAllocate(base, JSAsyncFunctionObject::kHeaderSize));
AllocateInNewSpace(JSPromise::kSizeWithEmbedderFields));
StoreMapNoWriteBarrier(promise, promise_map);
StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -123,11 +118,12 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
RootIndex::kEmptyFixedArray);
PromiseInit(promise);
// Initialize the async function object.
// Allocate and initialize the async function object.
TNode<Map> async_function_object_map = CAST(LoadContextElement(
native_context, Context::ASYNC_FUNCTION_OBJECT_MAP_INDEX));
TNode<JSAsyncFunctionObject> async_function_object =
UncheckedCast<JSAsyncFunctionObject>(base);
UncheckedCast<JSAsyncFunctionObject>(
AllocateInNewSpace(JSAsyncFunctionObject::kHeaderSize));
StoreMapNoWriteBarrier(async_function_object, async_function_object_map);
StoreObjectFieldRoot(async_function_object,
JSAsyncFunctionObject::kPropertiesOrHashOffset,
......
......@@ -30,17 +30,10 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
static const int kWrappedPromiseOffset =
static const int kClosureContextSize =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
static const int kResolveClosureOffset =
kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kRejectClosureOffset =
kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
TNode<Context> closure_context =
UncheckedCast<Context>(AllocateInNewSpace(kClosureContextSize));
{
// Initialize the await context, storing the {generator} as extension.
TNode<Map> map = CAST(
......@@ -73,9 +66,9 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
kTaggedSize)));
TNode<JSPromise> promise;
{
// Initialize Promise
// Allocate and initialize Promise
TNode<HeapObject> wrapped_value =
InnerAllocate(base, kWrappedPromiseOffset);
AllocateInNewSpace(JSPromise::kSizeWithEmbedderFields);
StoreMapNoWriteBarrier(wrapped_value, promise_map);
StoreObjectFieldRoot(wrapped_value, JSPromise::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -85,13 +78,15 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
PromiseInit(promise);
}
// Initialize resolve handler
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
// Allocate and initialize resolve handler
TNode<HeapObject> on_resolve =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_sfi);
// Initialize reject handler
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
// Allocate and initialize reject handler
TNode<HeapObject> on_reject =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_sfi);
......@@ -118,19 +113,14 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
static const int kResolveClosureOffset =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
static const int kRejectClosureOffset =
kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
// 2. Let promise be ? PromiseResolve(« promise »).
// We skip this step, because promise is already guaranteed to be a
// JSPRomise at this point.
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
static const int kClosureContextSize =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
TNode<Context> closure_context =
UncheckedCast<Context>(AllocateInNewSpace(kClosureContextSize));
{
// Initialize the await context, storing the {generator} as extension.
TNode<Map> map = CAST(
......@@ -149,13 +139,15 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
generator);
}
// Initialize resolve handler
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
// Allocate and initialize resolve handler
TNode<HeapObject> on_resolve =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_sfi);
// Initialize reject handler
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
// Allocate and initialize reject handler
TNode<HeapObject> on_reject =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_sfi);
......
......@@ -1444,17 +1444,6 @@ TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes,
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset) {
return UncheckedCast<HeapObject>(
BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)));
}
TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
......@@ -3928,6 +3917,20 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return result;
}
namespace {
// To prevent GC between the array and elements allocation, the elements
// object allocation is folded together with the js-array allocation.
TNode<FixedArrayBase> InnerAllocateElements(CodeStubAssembler* csa,
TNode<JSArray> js_array,
int offset) {
return csa->UncheckedCast<FixedArrayBase>(
csa->BitcastWordToTagged(csa->IntPtrAdd(
csa->BitcastTaggedToWord(js_array), csa->IntPtrConstant(offset))));
}
} // namespace
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
......@@ -3985,9 +3988,14 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
// folding trick. Instead, we first allocate the elements in large object
// space, and then allocate the JSArray (and possibly the allocation
// memento) in new space.
if (allocation_flags & kAllowLargeObjectAllocation) {
const bool inline_allocation =
!V8_DISABLE_WRITE_BARRIERS_BOOL || V8_ALLOCATION_FOLDING_BOOL;
if ((allocation_flags & kAllowLargeObjectAllocation) ||
!inline_allocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size), &next);
if (inline_allocation) {
GotoIf(IsRegularHeapObjectSize(size), &next);
}
CSA_CHECK(this, IsValidFastJSArrayCapacity(capacity));
......@@ -4009,14 +4017,17 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
Goto(&out);
BIND(&next);
if (inline_allocation) {
BIND(&next);
}
}
if (!inline_allocation) Unreachable();
// Fold all objects into a single new space allocation.
array =
AllocateUninitializedJSArray(array_map, length, allocation_site, size);
elements = UncheckedCast<FixedArrayBase>(
InnerAllocate(array.value(), elements_offset));
elements = InnerAllocateElements(this, array.value(), elements_offset);
StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
elements.value());
......@@ -5347,12 +5358,28 @@ template TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity<IntPtrT>(
TNode<HeapObject>, TNode<FixedArrayBase>, ElementsKind, ElementsKind,
TNode<IntPtrT>, TNode<IntPtrT>, compiler::CodeAssemblerLabel*);
namespace {
// Helper function for folded memento allocation.
// Memento objects are designed to be put right after the objects they are
// tracking on. So memento allocations have to be folded together with previous
// object allocations.
TNode<HeapObject> InnerAllocateMemento(CodeStubAssembler* csa,
TNode<HeapObject> previous,
TNode<IntPtrT> offset) {
return csa->UncheckedCast<HeapObject>(csa->BitcastWordToTagged(
csa->IntPtrAdd(csa->BitcastTaggedToWord(previous), offset)));
}
} // namespace
void CodeStubAssembler::InitializeAllocationMemento(
TNode<HeapObject> base, TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
Comment("[Initialize AllocationMemento");
TNode<HeapObject> memento = InnerAllocate(base, base_allocation_size);
TNode<HeapObject> memento =
InnerAllocateMemento(this, base, base_allocation_size);
StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
......@@ -13645,8 +13672,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Smi> length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
TNode<FixedArray> elements = UncheckedCast<FixedArray>(
Allocate(elements_size + JSArray::kHeaderSize + JSIteratorResult::kSize));
TNode<FixedArray> elements =
UncheckedCast<FixedArray>(Allocate(elements_size));
StoreObjectFieldRoot(elements, FixedArray::kMapOffset,
RootIndex::kFixedArrayMap);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
......@@ -13654,7 +13681,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
StoreFixedArrayElement(elements, 1, value);
TNode<Map> array_map = CAST(LoadContextElement(
native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX));
TNode<HeapObject> array = InnerAllocate(elements, elements_size);
TNode<HeapObject> array = Allocate(JSArray::kHeaderSize);
StoreMapNoWriteBarrier(array, array_map);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -13662,7 +13689,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
TNode<Map> iterator_map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
TNode<HeapObject> result = InnerAllocate(array, JSArray::kHeaderSize);
TNode<HeapObject> result = Allocate(JSIteratorResult::kSize);
StoreMapNoWriteBarrier(result, iterator_map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......
......@@ -748,9 +748,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment