Commit a15cfb8f authored by Wenyu Zhao's avatar Wenyu Zhao Committed by V8 LUCI CQ

Revert "[csa] Remove InnerAllocate and replace with non-folded allocations"

This reverts commit 59d58d72.

Reason for revert: This CL breaks --predictable

Original change's description:
> [csa] Remove InnerAllocate and replace with non-folded allocations
>
> This CL removes all uses of InnerAllocate (except memento allocations)
> and replace with non-folded allocations. The change is based on the
> fact that 1. Those InnerAllocates are not guarded by --allocation-folding
> flag. 2. Hopefully the MemoryOptimizer can handle the folding and no
> performance regression will happen.
>
> Two special versions of InnerAllocate is still kept:
> * One for memento allocations (renamed to InnerAllocateMemento).
> * One for AllocateUninitializedJSArrayWithElements (renamed to InnerAllocateElements).
>
> Change-Id: Ie77da6b2fba31b048241f7b7d927065305a01c27
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2873767
> Commit-Queue: Wenyu Zhao <wenyu.zhao@anu.edu.au>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74899}

Change-Id: If6a1836634670eff3342f6df1d2a5b76afbdc0ac
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2932796
Auto-Submit: Wenyu Zhao <wenyu.zhao@anu.edu.au>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#74903}
parent 99bfa28b
......@@ -103,14 +103,19 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
// Allocate and initialize the promise.
// Allocate space for the promise, the async function object.
TNode<IntPtrT> size = IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
JSAsyncFunctionObject::kHeaderSize);
TNode<HeapObject> base = AllocateInNewSpace(size);
// Initialize the promise.
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> promise_function =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
TNode<Map> promise_map = LoadObjectField<Map>(
promise_function, JSFunction::kPrototypeOrInitialMapOffset);
TNode<JSPromise> promise = UncheckedCast<JSPromise>(
AllocateInNewSpace(JSPromise::kSizeWithEmbedderFields));
InnerAllocate(base, JSAsyncFunctionObject::kHeaderSize));
StoreMapNoWriteBarrier(promise, promise_map);
StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -118,12 +123,11 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
RootIndex::kEmptyFixedArray);
PromiseInit(promise);
// Allocate and initialize the async function object.
// Initialize the async function object.
TNode<Map> async_function_object_map = CAST(LoadContextElement(
native_context, Context::ASYNC_FUNCTION_OBJECT_MAP_INDEX));
TNode<JSAsyncFunctionObject> async_function_object =
UncheckedCast<JSAsyncFunctionObject>(
AllocateInNewSpace(JSAsyncFunctionObject::kHeaderSize));
UncheckedCast<JSAsyncFunctionObject>(base);
StoreMapNoWriteBarrier(async_function_object, async_function_object_map);
StoreObjectFieldRoot(async_function_object,
JSAsyncFunctionObject::kPropertiesOrHashOffset,
......
......@@ -30,10 +30,17 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
static const int kClosureContextSize =
static const int kWrappedPromiseOffset =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
TNode<Context> closure_context =
UncheckedCast<Context>(AllocateInNewSpace(kClosureContextSize));
static const int kResolveClosureOffset =
kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kRejectClosureOffset =
kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
{
// Initialize the await context, storing the {generator} as extension.
TNode<Map> map = CAST(
......@@ -66,9 +73,9 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
kTaggedSize)));
TNode<JSPromise> promise;
{
// Allocate and initialize Promise
// Initialize Promise
TNode<HeapObject> wrapped_value =
AllocateInNewSpace(JSPromise::kSizeWithEmbedderFields);
InnerAllocate(base, kWrappedPromiseOffset);
StoreMapNoWriteBarrier(wrapped_value, promise_map);
StoreObjectFieldRoot(wrapped_value, JSPromise::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -78,15 +85,13 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
PromiseInit(promise);
}
// Allocate and initialize resolve handler
TNode<HeapObject> on_resolve =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
// Initialize resolve handler
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_sfi);
// Allocate and initialize reject handler
TNode<HeapObject> on_reject =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
// Initialize reject handler
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_sfi);
......@@ -113,14 +118,19 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
static const int kResolveClosureOffset =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
static const int kRejectClosureOffset =
kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
// 2. Let promise be ? PromiseResolve(« promise »).
// We skip this step, because promise is already guaranteed to be a
// JSPRomise at this point.
static const int kClosureContextSize =
FixedArray::SizeFor(Context::MIN_CONTEXT_EXTENDED_SLOTS);
TNode<Context> closure_context =
UncheckedCast<Context>(AllocateInNewSpace(kClosureContextSize));
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
{
// Initialize the await context, storing the {generator} as extension.
TNode<Map> map = CAST(
......@@ -139,15 +149,13 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
generator);
}
// Allocate and initialize resolve handler
TNode<HeapObject> on_resolve =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
// Initialize resolve handler
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_sfi);
// Allocate and initialize reject handler
TNode<HeapObject> on_reject =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
// Initialize reject handler
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_sfi);
......
......@@ -1444,6 +1444,17 @@ TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes,
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset) {
return UncheckedCast<HeapObject>(
BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)));
}
TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
......@@ -3917,20 +3928,6 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return result;
}
namespace {
// To prevent GC between the array and elements allocation, the elements
// object allocation is folded together with the js-array allocation.
TNode<FixedArrayBase> InnerAllocateElements(CodeStubAssembler* csa,
TNode<JSArray> js_array,
int offset) {
return csa->UncheckedCast<FixedArrayBase>(
csa->BitcastWordToTagged(csa->IntPtrAdd(
csa->BitcastTaggedToWord(js_array), csa->IntPtrConstant(offset))));
}
} // namespace
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
......@@ -3988,14 +3985,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
// folding trick. Instead, we first allocate the elements in large object
// space, and then allocate the JSArray (and possibly the allocation
// memento) in new space.
const bool inline_allocation =
!V8_DISABLE_WRITE_BARRIERS_BOOL || V8_ALLOCATION_FOLDING_BOOL;
if ((allocation_flags & kAllowLargeObjectAllocation) ||
!inline_allocation) {
if (allocation_flags & kAllowLargeObjectAllocation) {
Label next(this);
if (inline_allocation) {
GotoIf(IsRegularHeapObjectSize(size), &next);
}
GotoIf(IsRegularHeapObjectSize(size), &next);
CSA_CHECK(this, IsValidFastJSArrayCapacity(capacity));
......@@ -4017,17 +4009,14 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
Goto(&out);
if (inline_allocation) {
BIND(&next);
}
BIND(&next);
}
if (!inline_allocation) Unreachable();
// Fold all objects into a single new space allocation.
array =
AllocateUninitializedJSArray(array_map, length, allocation_site, size);
elements = InnerAllocateElements(this, array.value(), elements_offset);
elements = UncheckedCast<FixedArrayBase>(
InnerAllocate(array.value(), elements_offset));
StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
elements.value());
......@@ -5358,28 +5347,12 @@ template TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity<IntPtrT>(
TNode<HeapObject>, TNode<FixedArrayBase>, ElementsKind, ElementsKind,
TNode<IntPtrT>, TNode<IntPtrT>, compiler::CodeAssemblerLabel*);
namespace {
// Helper function for folded memento allocation.
// Memento objects are designed to be put right after the objects they are
// tracking on. So memento allocations have to be folded together with previous
// object allocations.
TNode<HeapObject> InnerAllocateMemento(CodeStubAssembler* csa,
TNode<HeapObject> previous,
TNode<IntPtrT> offset) {
return csa->UncheckedCast<HeapObject>(csa->BitcastWordToTagged(
csa->IntPtrAdd(csa->BitcastTaggedToWord(previous), offset)));
}
} // namespace
void CodeStubAssembler::InitializeAllocationMemento(
TNode<HeapObject> base, TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
Comment("[Initialize AllocationMemento");
TNode<HeapObject> memento =
InnerAllocateMemento(this, base, base_allocation_size);
TNode<HeapObject> memento = InnerAllocate(base, base_allocation_size);
StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
......@@ -13676,8 +13649,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Smi> length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
TNode<FixedArray> elements =
UncheckedCast<FixedArray>(Allocate(elements_size));
TNode<FixedArray> elements = UncheckedCast<FixedArray>(
Allocate(elements_size + JSArray::kHeaderSize + JSIteratorResult::kSize));
StoreObjectFieldRoot(elements, FixedArray::kMapOffset,
RootIndex::kFixedArrayMap);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
......@@ -13685,7 +13658,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
StoreFixedArrayElement(elements, 1, value);
TNode<Map> array_map = CAST(LoadContextElement(
native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX));
TNode<HeapObject> array = Allocate(JSArray::kHeaderSize);
TNode<HeapObject> array = InnerAllocate(elements, elements_size);
StoreMapNoWriteBarrier(array, array_map);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -13693,7 +13666,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
TNode<Map> iterator_map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
TNode<HeapObject> result = Allocate(JSIteratorResult::kSize);
TNode<HeapObject> result = InnerAllocate(array, JSArray::kHeaderSize);
StoreMapNoWriteBarrier(result, iterator_map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......
......@@ -748,6 +748,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment