Commit 517331c5 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[csa] Return HeapObject from CSA allocation helpers

This addresses comments remaining from

https://crrev.com/c/1301512

Bug: v8:8238
Change-Id: Ia7687d65e90f061bb3bb87c37b84ec5559083816
Reviewed-on: https://chromium-review.googlesource.com/c/1309819Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57171}
parent 3f967aed
......@@ -96,7 +96,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
smi_arguments_count);
Node* arguments = nullptr;
if (!empty) {
arguments = InnerAllocate(result, elements_offset);
arguments = InnerAllocate(CAST(result), elements_offset);
StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
smi_arguments_count);
Node* fixed_array_map = LoadRoot(RootIndex::kFixedArrayMap);
......
......@@ -87,7 +87,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize),
Signed(WordShl(parameters_and_register_length,
IntPtrConstant(kPointerSizeLog2))));
TNode<Object> base = AllocateInNewSpace(size);
TNode<HeapObject> base = AllocateInNewSpace(size);
// Initialize the register file.
TNode<FixedArray> parameters_and_registers = UncheckedCast<FixedArray>(
......
......@@ -39,7 +39,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
TNode<Object> base = AllocateInNewSpace(kTotalSize);
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
{
// Initialize the await context, storing the {generator} as extension.
......@@ -69,7 +69,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
kPointerSize)));
TNode<Object> wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
TNode<HeapObject> wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
{
// Initialize Promise
StoreMapNoWriteBarrier(wrapped_value, promise_map);
......@@ -81,12 +81,12 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
}
// Initialize resolve handler
TNode<Object> on_resolve = InnerAllocate(base, kResolveClosureOffset);
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_context_index);
// Initialize reject handler
TNode<Object> on_reject = InnerAllocate(base, kRejectClosureOffset);
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_context_index);
......@@ -132,7 +132,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
// Node* const promise =
// CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value);
TNode<Object> base = AllocateInNewSpace(kTotalSize);
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
{
// Initialize the await context, storing the {generator} as extension.
......@@ -152,12 +152,12 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
}
// Initialize resolve handler
TNode<Object> on_resolve = InnerAllocate(base, kResolveClosureOffset);
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_context_index);
// Initialize reject handler
TNode<Object> on_reject = InnerAllocate(base, kRejectClosureOffset);
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_context_index);
......
......@@ -58,8 +58,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
// The folded allocation.
TNode<Object> result = Allocate(total_size);
TNode<Object> elements = InnerAllocate(result, kElementsOffset);
TNode<HeapObject> result = Allocate(total_size);
TNode<HeapObject> elements = InnerAllocate(result, kElementsOffset);
// Initialize the JSRegExpResult.
......
......@@ -1133,10 +1133,10 @@ void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects(
if_true);
}
TNode<Object> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address) {
TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address) {
// TODO(jgruber, chromium:848672): Call FatalProcessOutOfMemory if this fails.
{
intptr_t constant_value;
......@@ -1233,17 +1233,17 @@ TNode<Object> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
}
BIND(&out);
return result.value();
return UncheckedCast<HeapObject>(result.value());
}
TNode<Object> CodeStubAssembler::AllocateRawUnaligned(
TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
DCHECK_EQ(flags & kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
TNode<Object> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
......@@ -1258,15 +1258,15 @@ TNode<Object> CodeStubAssembler::AllocateRawDoubleAligned(
#endif
}
TNode<Object> CodeStubAssembler::AllocateInNewSpace(
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags) {
DCHECK(flags == kNone || flags == kDoubleAlignment);
CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
TNode<Object> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
TNode<ExternalReference> top_address = ExternalConstant(
......@@ -1297,25 +1297,26 @@ TNode<Object> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
}
}
TNode<Object> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
CHECK(flags == kNone || flags == kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
TNode<Object> CodeStubAssembler::Allocate(int size_in_bytes,
AllocationFlags flags) {
TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes,
AllocationFlags flags) {
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
TNode<Object> CodeStubAssembler::InnerAllocate(TNode<Object> previous,
TNode<IntPtrT> offset) {
return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset) {
return UncheckedCast<HeapObject>(
BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)));
}
TNode<Object> CodeStubAssembler::InnerAllocate(TNode<Object> previous,
int offset) {
TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
......@@ -3845,6 +3846,7 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
TNode<Map> array_map, TNode<Smi> length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = JSArray::kSize;
......@@ -12808,7 +12810,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
StoreFixedArrayElement(elements, 1, value);
Node* array_map = LoadContextElement(
native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
TNode<Object> array = InnerAllocate(elements, elements_size);
TNode<HeapObject> array = InnerAllocate(elements, elements_size);
StoreMapNoWriteBarrier(array, array_map);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -12816,7 +12818,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Node* iterator_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
TNode<Object> result = InnerAllocate(array, JSArray::kSize);
TNode<HeapObject> result = InnerAllocate(array, JSArray::kSize);
StoreMapNoWriteBarrier(result, iterator_map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......
......@@ -647,13 +647,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> BitwiseOp(Node* left32, Node* right32, Operation bitwise_op);
// Allocate an object of the given size.
TNode<Object> AllocateInNewSpace(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<Object> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
TNode<Object> Allocate(TNode<IntPtrT> size, AllocationFlags flags = kNone);
TNode<Object> Allocate(int size, AllocationFlags flags = kNone);
TNode<Object> InnerAllocate(TNode<Object> previous, int offset);
TNode<Object> InnerAllocate(TNode<Object> previous, TNode<IntPtrT> offset);
TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
......@@ -3197,17 +3199,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void HandleBreakOnNode();
TNode<Object> AllocateRawDoubleAligned(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> AllocateRawDoubleAligned(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
TNode<HeapObject> AllocateRawUnaligned(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
TNode<Object> AllocateRawUnaligned(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
TNode<Object> AllocateRaw(TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
TNode<HeapObject> AllocateRaw(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment