Commit eeb0df63 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[cleanup] More semi-automatic TNodification

Using the tool again, the previous iteration accidentally ignored
Node/TNode behind a typedef. Automatic replacement of types with
manual cleanup/addition of CASTs where necessary.

Bug: v8:9396
Change-Id: I33b6d229669cb80586d5d8e82c04542df671f0b9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1768367
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63409}
parent 6d34271b
......@@ -45,7 +45,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
StoreMapNoWriteBarrier(result, map);
TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array);
Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
TNode<Smi> smi_arguments_count = ParameterToTagged(arguments_count, mode);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
smi_arguments_count);
Node* arguments = nullptr;
......@@ -63,7 +63,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
Node* sloppy_elements_map = SloppyArgumentsElementsMapConstant();
TNode<Map> sloppy_elements_map = SloppyArgumentsElementsMapConstant();
StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
parameter_map_count = ParameterToTagged(parameter_map_count, mode);
StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
......@@ -121,8 +121,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
Node* rest_count =
IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode);
Node* const native_context = LoadNativeContext(context);
Node* const array_map =
TNode<Context> const native_context = LoadNativeContext(context);
TNode<Map> const array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
&no_rest_parameters);
......@@ -173,8 +173,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
info.argument_count, &runtime,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map =
TNode<Context> const native_context = LoadNativeContext(context);
TNode<Object> const map =
LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
GotoIf(BIntEqual(info.argument_count, zero), &empty);
......@@ -237,8 +237,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
elements_allocated, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map = LoadContextElement(
TNode<Context> const native_context = LoadNativeContext(context);
TNode<Object> const map = LoadContextElement(
native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
Node* argument_object;
Node* elements;
......@@ -252,26 +252,26 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER);
Comment("Fill in non-mapped parameters");
Node* argument_offset =
TNode<IntPtrT> argument_offset =
ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
Node* mapped_offset =
TNode<IntPtrT> mapped_offset =
ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
CodeStubArguments arguments(this, info.argument_count, info.frame, mode);
VARIABLE(current_argument, MachineType::PointerRepresentation());
current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode));
VariableList var_list1({&current_argument}, zone());
mapped_offset = BuildFastLoop(
mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop(
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
Increment(&current_argument, kSystemPointerSize);
Node* arg = LoadBufferObject(
TNode<Object> arg = LoadBufferObject(
UncheckedCast<RawPtrT>(current_argument.value()), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
},
-kTaggedSize, INTPTR_PARAMETERS);
-kTaggedSize, INTPTR_PARAMETERS));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
......@@ -287,13 +287,13 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
info.formal_parameter_count, mode),
mapped_count, mode));
Node* the_hole = TheHoleConstant();
TNode<Oddball> the_hole = TheHoleConstant();
VariableList var_list2({&context_index}, zone());
const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2);
Node* adjusted_map_array = IntPtrAdd(
TNode<IntPtrT> adjusted_map_array = IntPtrAdd(
BitcastTaggedToWord(map_array),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
Node* zero_offset = ElementOffsetFromIndex(
TNode<IntPtrT> zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(
var_list2, mapped_offset, zero_offset,
......@@ -317,8 +317,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
GotoIfFixedArraySizeDoesntFitInNewSpace(
info.argument_count, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map =
TNode<Context> const native_context = LoadNativeContext(context);
TNode<Object> const map =
LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
result.Bind(ConstructParametersObjectFromArgs(
map, info.frame, info.argument_count, zero, info.argument_count, mode,
......@@ -331,8 +331,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BIND(&empty);
{
Comment("Empty JSSloppyArgumentsObject");
Node* const native_context = LoadNativeContext(context);
Node* const map =
TNode<Context> const native_context = LoadNativeContext(context);
TNode<Object> const map =
LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
Node* arguments;
Node* elements;
......
This diff is collapsed.
......@@ -263,8 +263,8 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* outer_promise = LoadObjectField(async_function_object,
JSAsyncFunctionObject::kPromiseOffset);
TNode<Object> outer_promise = LoadObjectField(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
......
......@@ -28,7 +28,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
Node* on_resolve_context_index,
Node* on_reject_context_index,
Node* is_predicted_as_caught) {
Node* const native_context = LoadNativeContext(context);
TNode<Context> const native_context = LoadNativeContext(context);
static const int kWrappedPromiseOffset =
FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
......@@ -46,7 +46,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
StoreObjectFieldNoWriteBarrier(closure_context, Context::kLengthOffset,
SmiConstant(Context::MIN_CONTEXT_SLOTS));
Node* const empty_scope_info =
TNode<Object> const empty_scope_info =
LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
StoreContextElementNoWriteBarrier(
closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
......@@ -59,11 +59,11 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
}
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
TNode<JSFunction> const promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
Node* const promise_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
TNode<Map> const promise_map = CAST(
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset));
// Assert that the JSPromise map has an instance size is
// JSPromise::kSizeWithEmbedderFields.
CSA_ASSERT(this,
......@@ -119,7 +119,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
Node* on_resolve_context_index,
Node* on_reject_context_index,
Node* is_predicted_as_caught) {
Node* const native_context = LoadNativeContext(context);
TNode<Context> const native_context = LoadNativeContext(context);
CSA_ASSERT(this, IsJSPromise(promise));
static const int kResolveClosureOffset =
......@@ -140,7 +140,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
StoreObjectFieldNoWriteBarrier(closure_context, Context::kLengthOffset,
SmiConstant(Context::MIN_CONTEXT_SLOTS));
Node* const empty_scope_info =
TNode<Object> const empty_scope_info =
LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
StoreContextElementNoWriteBarrier(
closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
......@@ -278,12 +278,10 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
TNode<Object> const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const on_fulfilled_shared = LoadContextElement(
native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN);
CSA_ASSERT(this,
HasInstanceType(on_fulfilled_shared, SHARED_FUNCTION_INFO_TYPE));
TNode<SharedFunctionInfo> const on_fulfilled_shared = CAST(LoadContextElement(
native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN));
Node* const closure_context =
AllocateAsyncIteratorValueUnwrapContext(native_context, done);
return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
......@@ -306,10 +304,11 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
CSA_ASSERT(this, IsBoolean(done));
TNode<Object> const done =
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
CSA_ASSERT(this, IsBoolean(CAST(done)));
Node* const unwrapped_value =
TNode<Object> const unwrapped_value =
CallBuiltin(Builtins::kCreateIterResultObject, context, value, done);
Return(unwrapped_value);
......
......@@ -25,12 +25,12 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
inline Node* TaggedIsAsyncGenerator(Node* tagged_object) {
TNode<BoolT> if_notsmi = TaggedIsNotSmi(tagged_object);
return Select<BoolT>(if_notsmi,
[=] {
return HasInstanceType(
tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
},
[=] { return if_notsmi; });
return Select<BoolT>(
if_notsmi,
[=] {
return HasInstanceType(tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
},
[=] { return if_notsmi; });
}
inline Node* LoadGeneratorState(Node* const generator) {
return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
......@@ -93,7 +93,7 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
inline Node* IsFastJSIterResult(Node* const value, Node* const context) {
CSA_ASSERT(this, TaggedIsNotSmi(value));
Node* const native_context = LoadNativeContext(context);
TNode<Context> const native_context = LoadNativeContext(context);
return TaggedEqual(
LoadMap(value),
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
......@@ -200,7 +200,7 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value,
Node* promise) {
CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
Node* request = Allocate(AsyncGeneratorRequest::kSize);
TNode<HeapObject> request = Allocate(AsyncGeneratorRequest::kSize);
StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap);
StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset,
UndefinedConstant());
......@@ -219,7 +219,8 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
Node* context, Node* value,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
TNode<Object> const generator =
LoadContextElement(context, Context::EXTENSION_INDEX);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
SetGeneratorNotAwaiting(generator);
......@@ -276,7 +277,8 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue(
{
Label loop_next(this), next_empty(this);
Node* current = var_current.value();
Node* next = LoadObjectField(current, AsyncGeneratorRequest::kNextOffset);
TNode<Object> next =
LoadObjectField(current, AsyncGeneratorRequest::kNextOffset);
Branch(IsUndefined(next), &next_empty, &loop_next);
BIND(&next_empty);
......@@ -299,11 +301,11 @@ Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
// Removes and returns the first AsyncGeneratorRequest from a
// JSAsyncGeneratorObject's queue. Asserts that the queue is not empty.
CSA_ASSERT(this, TaggedIsAsyncGenerator(generator));
Node* request =
LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset);
CSA_ASSERT(this, IsNotUndefined(request));
TNode<AsyncGeneratorRequest> request =
CAST(LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset));
Node* next = LoadObjectField(request, AsyncGeneratorRequest::kNextOffset);
TNode<Object> next =
LoadObjectField(request, AsyncGeneratorRequest::kNextOffset);
StoreObjectField(generator, JSAsyncGeneratorObject::kQueueOffset, next);
return request;
......@@ -315,12 +317,12 @@ Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* generator = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
......@@ -333,12 +335,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* generator = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
......@@ -351,12 +353,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* generator = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
......@@ -446,8 +448,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
// generator is not closed, resume the generator with a "throw" completion.
// If the generator was closed, perform AsyncGeneratorReject(thrownValue).
// In all cases, the last step is to call AsyncGeneratorResumeNext.
Node* is_caught = CallRuntime(Runtime::kAsyncGeneratorHasCatchHandlerForPC,
context, generator);
TNode<Object> is_caught = CallRuntime(
Runtime::kAsyncGeneratorHasCatchHandlerForPC, context, generator);
TailCallBuiltin(Builtins::kAsyncGeneratorReturn, context, generator,
next_value, is_caught);
......@@ -501,10 +503,10 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
// Let iteratorResult be CreateIterResultObject(value, done).
Node* const iter_result = Allocate(JSIteratorResult::kSize);
TNode<HeapObject> const iter_result = Allocate(JSIteratorResult::kSize);
{
Node* map = LoadContextElement(LoadNativeContext(context),
Context::ITERATOR_RESULT_MAP_INDEX);
TNode<Object> map = LoadContextElement(LoadNativeContext(context),
Context::ITERATOR_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(iter_result, map);
StoreObjectFieldRoot(iter_result, JSIteratorResult::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
......@@ -585,7 +587,8 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
TNode<Object> const generator =
LoadContextElement(context, Context::EXTENSION_INDEX);
SetGeneratorNotAwaiting(generator);
......@@ -665,7 +668,8 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
TNode<Object> const generator =
LoadContextElement(context, Context::EXTENSION_INDEX);
SetGeneratorNotAwaiting(generator);
......@@ -682,7 +686,8 @@ TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
TNode<Object> const generator =
LoadContextElement(context, Context::EXTENSION_INDEX);
SetGeneratorNotAwaiting(generator);
......
......@@ -98,7 +98,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
const UndefinedMethodHandler& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
Node* const initial_exception_value) {
Node* const native_context = LoadNativeContext(context);
TNode<Context> const native_context = LoadNativeContext(context);
Node* const promise = AllocateAndInitJSPromise(context);
VARIABLE(var_exception, MachineRepresentation::kTagged,
......@@ -109,7 +109,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
&var_exception, operation_name);
Node* const sync_iterator =
TNode<Object> const sync_iterator =
LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
Node* const method = get_method(sync_iterator);
......@@ -132,13 +132,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
std::tie(value, done) = LoadIteratorResult(
context, native_context, iter_result, &reject_promise, &var_exception);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
TNode<JSFunction> const promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
CSA_ASSERT(this, IsConstructor(promise_fun));
// Let valueWrapper be PromiseResolve(%Promise%, « value »).
Node* const value_wrapper = CallBuiltin(Builtins::kPromiseResolve,
native_context, promise_fun, value);
TNode<Object> const value_wrapper = CallBuiltin(
Builtins::kPromiseResolve, native_context, promise_fun, value);
// IfAbruptRejectPromise(valueWrapper, promiseCapability).
GotoIfException(value_wrapper, &reject_promise, &var_exception);
......@@ -190,13 +190,13 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
{
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
Node* const done =
TNode<Object> const done =
GetProperty(context, iter_result, factory()->done_string());
GotoIfException(done, if_exception, var_exception);
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
Node* const value =
TNode<Object> const value =
GetProperty(context, iter_result, factory()->value_string());
GotoIfException(value, if_exception, var_exception);
......@@ -222,7 +222,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&to_boolean);
{
Node* const result =
TNode<Object> const result =
CallBuiltin(Builtins::kToBoolean, context, var_done.value());
var_done.Bind(result);
Goto(&done);
......@@ -261,8 +261,8 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
Node* const promise, Label* if_exception) {
// If return is undefined, then
// Let iterResult be ! CreateIterResultObject(value, true)
Node* const iter_result = CallBuiltin(Builtins::kCreateIterResultObject,
context, value, TrueConstant());
TNode<Object> const iter_result = CallBuiltin(
Builtins::kCreateIterResultObject, context, value, TrueConstant());
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
// IfAbruptRejectPromise(nextDone, promiseCapability).
......
This diff is collapsed.
This diff is collapsed.
......@@ -29,7 +29,7 @@ class ConversionBuiltinsAssembler : public CodeStubAssembler {
void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
Node* context, Node* input, ToPrimitiveHint hint) {
// Lookup the @@toPrimitive property on the {input}.
Node* exotic_to_prim =
TNode<Object> exotic_to_prim =
GetProperty(context, input, factory()->to_primitive_symbol());
// Check if {exotic_to_prim} is neither null nor undefined.
......@@ -40,7 +40,8 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
// representation of the {hint}.
Callable callable =
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined);
Node* hint_string = HeapConstant(factory()->ToPrimitiveHintString(hint));
TNode<String> hint_string =
HeapConstant(factory()->ToPrimitiveHintString(hint));
Node* result =
CallJS(callable, context, exotic_to_prim, input, hint_string);
......@@ -48,7 +49,7 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
Label if_resultisprimitive(this),
if_resultisnotprimitive(this, Label::kDeferred);
GotoIf(TaggedIsSmi(result), &if_resultisprimitive);
Node* result_instance_type = LoadInstanceType(result);
TNode<Uint16T> result_instance_type = LoadInstanceType(result);
Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive,
&if_resultisnotprimitive);
......@@ -119,7 +120,7 @@ TF_BUILTIN(ToName, CodeStubAssembler) {
Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this),
if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred);
GotoIf(TaggedIsSmi(input), &if_inputisnumber);
Node* input_instance_type = LoadInstanceType(input);
TNode<Uint16T> input_instance_type = LoadInstanceType(input);
STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname);
GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver);
......@@ -230,13 +231,13 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
}
for (Handle<String> name : method_names) {
// Lookup the {name} on the {input}.
Node* method = GetProperty(context, input, name);
TNode<Object> method = GetProperty(context, input, name);
// Check if the {method} is callable.
Label if_methodiscallable(this),
if_methodisnotcallable(this, Label::kDeferred);
GotoIf(TaggedIsSmi(method), &if_methodisnotcallable);
Node* method_map = LoadMap(method);
TNode<Map> method_map = LoadMap(CAST(method));
Branch(IsCallableMap(method_map), &if_methodiscallable,
&if_methodisnotcallable);
......@@ -250,7 +251,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
// Return the {result} if it is a primitive.
GotoIf(TaggedIsSmi(result), &return_result);
Node* result_instance_type = LoadInstanceType(result);
TNode<Uint16T> result_instance_type = LoadInstanceType(result);
GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result);
}
......@@ -340,7 +341,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
BIND(&if_lenisheapnumber);
{
// Load the floating-point value of {len}.
Node* len_value = LoadHeapNumberValue(len);
TNode<Float64T> len_value = LoadHeapNumberValue(len);
// Check if {len} is not greater than zero.
GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)),
......@@ -352,8 +353,8 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
&return_two53minus1);
// Round the {len} towards -Infinity.
Node* value = Float64Floor(len_value);
Node* result = ChangeFloat64ToTagged(value);
TNode<Float64T> value = Float64Floor(len_value);
TNode<Number> result = ChangeFloat64ToTagged(value);
Return(result);
}
......@@ -403,8 +404,8 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
GotoIf(TaggedIsSmi(object), &if_smi);
Node* map = LoadMap(object);
Node* instance_type = LoadMapInstanceType(map);
TNode<Map> map = LoadMap(object);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver);
TNode<IntPtrT> constructor_function_index =
......@@ -422,11 +423,11 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
BIND(&if_wrapjs_primitive_wrapper);
TNode<Context> native_context = LoadNativeContext(context);
Node* constructor = LoadContextElement(
native_context, constructor_function_index_var.value());
Node* initial_map =
TNode<JSFunction> constructor = CAST(LoadContextElement(
native_context, constructor_function_index_var.value()));
TNode<Object> initial_map =
LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize);
TNode<HeapObject> js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize);
StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map);
StoreObjectFieldRoot(js_primitive_wrapper,
JSPrimitiveWrapper::kPropertiesOrHashOffset,
......
......@@ -28,7 +28,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
Label receiver_not_date(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &receiver_not_date);
Node* receiver_instance_type = LoadInstanceType(receiver);
TNode<Uint16T> receiver_instance_type = LoadInstanceType(receiver);
GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_DATE_TYPE),
&receiver_not_date);
......@@ -50,8 +50,8 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
BIND(&stamp_mismatch);
}
Node* field_index_smi = SmiConstant(field_index);
Node* function =
TNode<Smi> field_index_smi = SmiConstant(field_index);
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::get_date_field_function());
Node* result = CallCFunction(
function, MachineType::AnyTagged(),
......@@ -223,7 +223,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
{
Callable callable = CodeFactory::OrdinaryToPrimitive(
isolate(), OrdinaryToPrimitiveHint::kNumber);
Node* result = CallStub(callable, context, receiver);
TNode<Object> result = CallStub(callable, context, receiver);
Return(result);
}
......@@ -232,7 +232,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
{
Callable callable = CodeFactory::OrdinaryToPrimitive(
isolate(), OrdinaryToPrimitiveHint::kString);
Node* result = CallStub(callable, context, receiver);
TNode<Object> result = CallStub(callable, context, receiver);
Return(result);
}
......
......@@ -25,12 +25,12 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
// Check that receiver has instance type of JS_FUNCTION_TYPE
Node* receiver = args.GetReceiver();
TNode<Object> receiver = args.GetReceiver();
GotoIf(TaggedIsSmi(receiver), &slow);
TNode<Map> receiver_map = LoadMap(receiver);
TNode<Map> receiver_map = LoadMap(CAST(receiver));
{
Node* instance_type = LoadMapInstanceType(receiver_map);
TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
GotoIfNot(
Word32Or(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
InstanceTypeEqual(instance_type, JS_BOUND_FUNCTION_TYPE)),
......@@ -68,7 +68,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
TNode<Object> maybe_length_accessor =
LoadValueByDescriptorEntry(descriptors, length_index);
GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
Node* length_value_map = LoadMap(CAST(maybe_length_accessor));
TNode<Map> length_value_map = LoadMap(CAST(maybe_length_accessor));
GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
const int name_index = JSFunction::kNameDescriptorIndex;
......@@ -89,7 +89,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
{
Label with_constructor(this);
VariableList vars({&bound_function_map}, zone());
Node* native_context = LoadNativeContext(context);
TNode<Context> native_context = LoadNativeContext(context);
Label map_done(this, vars);
GotoIf(IsConstructorMap(receiver_map), &with_constructor);
......@@ -164,7 +164,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// Allocate the resulting bound function.
Comment("Allocate the resulting bound function");
{
Node* bound_function = Allocate(JSBoundFunction::kSize);
TNode<HeapObject> bound_function = Allocate(JSBoundFunction::kSize);
StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
StoreObjectFieldNoWriteBarrier(
bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
......@@ -174,7 +174,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(bound_function,
JSBoundFunction::kBoundArgumentsOffset,
argument_array.value());
Node* empty_fixed_array = EmptyFixedArrayConstant();
TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
StoreObjectFieldNoWriteBarrier(
bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
......
......@@ -50,8 +50,8 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
// Resume the {receiver} using our trampoline.
VARIABLE(var_exception, MachineRepresentation::kTagged, UndefinedConstant());
Label if_exception(this, Label::kDeferred), if_final_return(this);
Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
value, receiver);
TNode<Object> result = CallStub(CodeFactory::ResumeGenerator(isolate()),
context, value, receiver);
// Make sure we close the generator if there was an exception.
GotoIfException(result, &if_exception, &var_exception);
......@@ -115,12 +115,12 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* receiver = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
......@@ -132,12 +132,12 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* receiver = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
......@@ -149,12 +149,12 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
const int kExceptionArg = 0;
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* receiver = args.GetReceiver();
Node* exception = args.GetOptionalArgumentValue(kExceptionArg);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> exception = args.GetOptionalArgumentValue(kExceptionArg);
Node* context = Parameter(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, exception, context,
......
......@@ -35,7 +35,7 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
BIND(&if_numisheapnumber);
{
// Check if {num} contains a finite, non-NaN value.
Node* num_value = LoadHeapNumberValue(num);
TNode<Float64T> num_value = LoadHeapNumberValue(num);
BranchIfFloat64IsNaN(Float64Sub(num_value, num_value), &return_false,
&return_true);
}
......
......@@ -35,7 +35,7 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
Node* object = Parameter(Descriptor::kObject);
// Load the {object}s elements.
Node* source = LoadObjectField(object, JSObject::kElementsOffset);
TNode<Object> source = LoadObjectField(object, JSObject::kElementsOffset);
Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
StoreObjectField(object, JSObject::kElementsOffset, target);
Return(target);
......@@ -104,7 +104,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
// the mapped elements (i.e. the first {mapped_count}) with the hole, but
// make sure not to overshoot the {length} if some arguments are missing.
TNode<IntPtrT> number_of_holes = IntPtrMin(mapped_count, length);
Node* the_hole = TheHoleConstant();
TNode<Oddball> the_hole = TheHoleConstant();
// Fill the first elements up to {number_of_holes} with the hole.
TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
......@@ -213,7 +213,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
Node* IsMarking() {
Node* is_marking_addr = ExternalConstant(
TNode<ExternalReference> is_marking_addr = ExternalConstant(
ExternalReference::heap_is_marking_flag_address(this->isolate()));
return Load(MachineType::Uint8(), is_marking_addr);
}
......@@ -323,13 +323,13 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
Label* next) {
Node* store_buffer_top_addr =
TNode<ExternalReference> store_buffer_top_addr =
ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
Node* store_buffer_top =
Load(MachineType::Pointer(), store_buffer_top_addr);
StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
slot);
Node* new_store_buffer_top =
TNode<WordT> new_store_buffer_top =
IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
store_buffer_top_addr, new_store_buffer_top);
......@@ -343,7 +343,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&overflow);
{
Node* function =
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::store_buffer_overflow_function());
CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
MachineType::Pointer(),
......@@ -396,7 +396,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&store_buffer_exit);
{
Node* isolate_constant =
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
......@@ -404,7 +404,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&store_buffer_incremental_wb);
{
Node* isolate_constant =
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
......@@ -436,9 +436,9 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&call_incremental_wb);
{
Node* function = ExternalConstant(
TNode<ExternalReference> function = ExternalConstant(
ExternalReference::incremental_marking_record_write_function());
Node* isolate_constant =
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
TNode<IntPtrT> object =
......@@ -458,12 +458,12 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
TF_BUILTIN(EphemeronKeyBarrier, RecordWriteCodeStubAssembler) {
Label exit(this);
Node* function = ExternalConstant(
TNode<ExternalReference> function = ExternalConstant(
ExternalReference::ephemeron_key_write_barrier_function());
Node* isolate_constant =
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* address = Parameter(Descriptor::kSlotAddress);
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
Node* fp_mode = Parameter(Descriptor::kFPMode);
CallCFunction3WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
......@@ -991,7 +991,7 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
BIND(&if_proxy);
{
// Convert the {key} to a Name first.
Node* name = CallBuiltin(Builtins::kToName, context, key);
TNode<Object> name = CallBuiltin(Builtins::kToName, context, key);
// The {object} is a JSProxy instance, look up the {name} on it, passing
// {object} both as receiver and holder. If {name} is absent we can safely
......@@ -1056,7 +1056,7 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
BIND(&if_proxy);
{
// Convert the {key} to a Name first.
Node* name = CallBuiltin(Builtins::kToName, context, key);
TNode<Name> name = CAST(CallBuiltin(Builtins::kToName, context, key));
// Proxy cannot handle private symbol so bailout.
GotoIf(IsPrivateSymbol(name), &if_slow);
......
......@@ -54,7 +54,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
// For short strings, do the conversion in CSA through the lookup table.
Node* const dst = AllocateSeqOneByteString(length);
TNode<String> const dst = AllocateSeqOneByteString(length);
const int kMaxShortStringLength = 24; // Determined empirically.
GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)),
......@@ -69,7 +69,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
TNode<IntPtrT> const end_address =
Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
Node* const to_lower_table_addr =
TNode<ExternalReference> const to_lower_table_addr =
ExternalConstant(ExternalReference::intl_to_latin1_lower_table());
VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
......@@ -105,7 +105,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
{
TNode<String> const src = to_direct.string();
Node* const function_addr =
TNode<ExternalReference> const function_addr =
ExternalConstant(ExternalReference::intl_convert_one_byte_to_lower());
MachineType type_tagged = MachineType::AnyTagged();
......@@ -122,8 +122,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
BIND(&runtime);
{
Node* const result = CallRuntime(Runtime::kStringToLowerCaseIntl,
NoContextConstant(), string);
TNode<Object> const result = CallRuntime(Runtime::kStringToLowerCaseIntl,
NoContextConstant(), string);
Return(result);
}
}
......
......@@ -27,7 +27,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Node* object,
Label* if_exception,
Variable* exception) {
Node* method = GetIteratorMethod(context, object);
TNode<Object> method = GetIteratorMethod(context, object);
return GetIterator(context, object, method, if_exception, exception);
}
......@@ -44,7 +44,8 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
BIND(&if_not_callable);
{
Node* ret = CallRuntime(Runtime::kThrowIteratorError, context, object);
TNode<Object> ret =
CallRuntime(Runtime::kThrowIteratorError, context, object);
GotoIfException(ret, if_exception, exception);
Unreachable();
}
......@@ -61,13 +62,15 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
BIND(&if_notobject);
{
Node* ret = CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
TNode<Object> ret =
CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
GotoIfException(ret, if_exception, exception);
Unreachable();
}
BIND(&get_next);
Node* const next = GetProperty(context, iterator, factory()->next_string());
TNode<Object> const next =
GetProperty(context, iterator, factory()->next_string());
GotoIfException(next, if_exception, exception);
return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
......@@ -99,7 +102,7 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
// IteratorComplete
// 2. Return ToBoolean(? Get(iterResult, "done")).
Node* done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
TNode<Object> done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
BranchIfToBooleanIsTrue(done, if_done, &return_result);
BIND(&if_generic);
......@@ -112,14 +115,14 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
// IteratorComplete
// 2. Return ToBoolean(? Get(iterResult, "done")).
Node* done = GetProperty(context, result, factory()->done_string());
TNode<Object> done = GetProperty(context, result, factory()->done_string());
GotoIfException(done, if_exception, exception);
BranchIfToBooleanIsTrue(done, if_done, &return_result);
}
BIND(&if_notobject);
{
Node* ret =
TNode<Object> ret =
CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
GotoIfException(ret, if_exception, exception);
Unreachable();
......@@ -170,7 +173,7 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
CSA_ASSERT(this, IsJSReceiver(iterator.object));
// Let return be ? GetMethod(iterator, "return").
Node* method =
TNode<Object> method =
GetProperty(context, iterator.object, factory()->return_string());
GotoIfException(method, if_exception, exception);
......
......@@ -8,6 +8,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/objects/fixed-array.h"
namespace v8 {
namespace internal {
......@@ -39,7 +40,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
// check if support abs function
if (IsIntPtrAbsWithOverflowSupported()) {
Node* pair = IntPtrAbsWithOverflow(x);
TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAbsWithOverflow(x);
Node* overflow = Projection(1, pair);
GotoIf(overflow, &if_overflow);
......@@ -79,9 +80,9 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
BIND(&if_xisheapnumber);
{
Node* x_value = LoadHeapNumberValue(x);
Node* value = Float64Abs(x_value);
Node* result = AllocateHeapNumberWithValue(value);
TNode<Float64T> x_value = LoadHeapNumberValue(x);
TNode<Float64T> value = Float64Abs(x_value);
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
Return(result);
}
......@@ -125,9 +126,9 @@ void MathBuiltinsAssembler::MathRoundingOperation(
BIND(&if_xisheapnumber);
{
Node* x_value = LoadHeapNumberValue(x);
Node* value = (this->*float64op)(x_value);
Node* result = ChangeFloat64ToTagged(value);
TNode<Float64T> x_value = LoadHeapNumberValue(x);
TNode<Float64T> value = (this->*float64op)(x_value);
TNode<Number> result = ChangeFloat64ToTagged(value);
Return(result);
}
......@@ -182,8 +183,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) {
Node* y = Parameter(Descriptor::kY);
Node* x_value = TruncateTaggedToWord32(context, x);
Node* y_value = TruncateTaggedToWord32(context, y);
Node* value = Int32Mul(x_value, y_value);
Node* result = ChangeInt32ToTagged(value);
TNode<Int32T> value = Signed(Int32Mul(x_value, y_value));
TNode<Number> result = ChangeInt32ToTagged(value);
Return(result);
}
......@@ -192,7 +193,7 @@ CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context,
Node* exponent) {
Node* base_value = TruncateTaggedToFloat64(context, base);
Node* exponent_value = TruncateTaggedToFloat64(context, exponent);
Node* value = Float64Pow(base_value, exponent_value);
TNode<Float64T> value = Float64Pow(base_value, exponent_value);
return ChangeFloat64ToTagged(value);
}
......@@ -205,7 +206,7 @@ TF_BUILTIN(MathPow, MathBuiltinsAssembler) {
// ES6 #sec-math.random
TF_BUILTIN(MathRandom, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* native_context = LoadNativeContext(context);
TNode<Context> native_context = LoadNativeContext(context);
// Load cache index.
TVARIABLE(Smi, smi_index);
......@@ -217,9 +218,9 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
GotoIf(SmiAbove(smi_index.value(), SmiConstant(0)), &if_cached);
// Cache exhausted, populate the cache. Return value is the new index.
Node* const refill_math_random =
TNode<ExternalReference> const refill_math_random =
ExternalConstant(ExternalReference::refill_math_random());
Node* const isolate_ptr =
TNode<ExternalReference> const isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
MachineType type_tagged = MachineType::AnyTagged();
MachineType type_ptr = MachineType::Pointer();
......@@ -236,9 +237,9 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
new_smi_index);
// Load and return next cached random number.
Node* array =
LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX);
Node* random = LoadFixedDoubleArrayElement(
TNode<FixedDoubleArray> array = CAST(
LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX));
TNode<Float64T> random = LoadFixedDoubleArrayElement(
array, new_smi_index, MachineType::Float64(), 0, SMI_PARAMETERS);
Return(AllocateHeapNumberWithValue(random));
}
......
......@@ -165,9 +165,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&is_callback);
{
Node* const microtask_callback =
TNode<Object> const microtask_callback =
LoadObjectField(microtask, CallbackTask::kCallbackOffset);
Node* const microtask_data =
TNode<Object> const microtask_data =
LoadObjectField(microtask, CallbackTask::kDataOffset);
// If this turns out to become a bottleneck because of the calls
......@@ -180,7 +180,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
// But from our current measurements it doesn't seem to be a
// serious performance problem, even if the microtask is full
// of CallHandlerTasks (which is not a realistic use case anyways).
Node* const result =
TNode<Object> const result =
CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
microtask_callback, microtask_data);
GotoIfException(result, &if_exception, &var_exception);
......@@ -195,14 +195,14 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> native_context = LoadNativeContext(microtask_context);
PrepareForContext(native_context, &done);
Node* const promise_to_resolve = LoadObjectField(
TNode<Object> const promise_to_resolve = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
Node* const then =
TNode<Object> const then =
LoadObjectField(microtask, PromiseResolveThenableJobTask::kThenOffset);
Node* const thenable = LoadObjectField(
TNode<Object> const thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
Node* const result =
TNode<Object> const result =
CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
promise_to_resolve, thenable, then);
GotoIfException(result, &if_exception, &var_exception);
......@@ -219,18 +219,18 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> native_context = LoadNativeContext(microtask_context);
PrepareForContext(native_context, &done);
Node* const argument =
TNode<Object> const argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
Node* const handler =
TNode<Object> const handler =
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
Node* const promise_or_capability = LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
TNode<HeapObject> const promise_or_capability = CAST(LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
// Run the promise before/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
promise_or_capability);
Node* const result =
TNode<Object> const result =
CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
argument, handler, promise_or_capability);
GotoIfException(result, &if_exception, &var_exception);
......@@ -252,18 +252,18 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> native_context = LoadNativeContext(microtask_context);
PrepareForContext(native_context, &done);
Node* const argument =
TNode<Object> const argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
Node* const handler =
TNode<Object> const handler =
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
Node* const promise_or_capability = LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
TNode<HeapObject> const promise_or_capability = CAST(LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
// Run the promise before/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
promise_or_capability);
Node* const result =
TNode<Object> const result =
CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
argument, handler, promise_or_capability);
GotoIfException(result, &if_exception, &var_exception);
......@@ -381,7 +381,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
BIND(&if_grow);
{
Node* function =
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::call_enter_context_function());
CallCFunction(function, MachineType::Int32(),
std::make_pair(MachineType::Pointer(), hsi),
......@@ -480,9 +480,9 @@ TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
// implementation to grow the buffer.
BIND(&if_grow);
{
Node* isolate_constant =
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* function =
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::call_enqueue_microtask_function());
CallCFunction(function, MachineType::AnyTagged(),
std::make_pair(MachineType::Pointer(), isolate_constant),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -48,7 +48,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
void FastStoreLastIndex(TNode<JSRegExp> regexp, TNode<Smi> value);
void SlowStoreLastIndex(SloppyTNode<Context> context,
SloppyTNode<Object> regexp,
SloppyTNode<Number> value);
SloppyTNode<Object> value);
void StoreLastIndex(TNode<Context> context, TNode<Object> regexp,
TNode<Number> value, bool is_fastpath);
......
......@@ -54,7 +54,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
GotoIf(TaggedIsSmi(tagged), &invalid);
// Fail if the array's instance type is not JSTypedArray.
Node* tagged_map = LoadMap(tagged);
TNode<Map> tagged_map = LoadMap(tagged);
GotoIfNot(IsJSTypedArrayMap(tagged_map), &invalid);
// Fail if the array's JSArrayBuffer is not shared.
......@@ -175,7 +175,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
Node* index_word = ChangeUint32ToWord(index_word32);
TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), other(this);
......@@ -247,14 +247,14 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
Node* index_word = ChangeUint32ToWord(index_word32);
TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
Label u8(this), u16(this), u32(this), u64(this), other(this);
STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64);
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
#if DEBUG
......@@ -326,7 +326,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
value));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), big(this), other(this);
......@@ -334,7 +334,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
......@@ -429,7 +429,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
index_integer, old_value, new_value));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), big(this), other(this);
......@@ -437,8 +437,10 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
TNode<Number> old_value_integer =
ToInteger_Inline(CAST(context), CAST(old_value));
TNode<Number> new_value_integer =
ToInteger_Inline(CAST(context), CAST(new_value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
......@@ -556,7 +558,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
Return(CallRuntime(runtime_function, context, array, index_integer, value));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), big(this), other(this);
......@@ -565,7 +567,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
......
This diff is collapsed.
......@@ -51,8 +51,9 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* const search_ptr, Node* const search_length,
Node* const start_position);
Node* PointerToStringDataAtIndex(Node* const string_data, Node* const index,
String::Encoding encoding);
TNode<IntPtrT> PointerToStringDataAtIndex(Node* const string_data,
Node* const index,
String::Encoding encoding);
// substr and slice have a common way of handling the {start} argument.
void ConvertAndBoundsCheckStartArgument(Node* context, Variable* var_start,
......
......@@ -16,9 +16,10 @@ TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.description");
Node* result = LoadObjectField(value, Symbol::kNameOffset);
TNode<Symbol> value =
CAST(ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.description"));
TNode<Object> result = LoadObjectField(value, Symbol::kNameOffset);
Return(result);
}
......@@ -27,8 +28,8 @@ TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype [ @@toPrimitive ]");
TNode<Object> result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype [ @@toPrimitive ]");
Return(result);
}
......@@ -37,9 +38,10 @@ TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.toString");
Node* result = CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
TNode<Object> value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.toString");
TNode<Object> result =
CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
Return(result);
}
......@@ -48,8 +50,8 @@ TF_BUILTIN(SymbolPrototypeValueOf, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.valueOf");
TNode<Object> result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.valueOf");
Return(result);
}
......
......@@ -89,12 +89,12 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
Node* argc =
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* arg1 = args.GetOptionalArgumentValue(0);
Node* arg2 = args.GetOptionalArgumentValue(1);
Node* arg3 = args.GetOptionalArgumentValue(2);
TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
// If NewTarget is undefined, throw a TypeError exception.
// All the TypedArray constructors have this as the first step:
......@@ -102,8 +102,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
Label throwtypeerror(this, Label::kDeferred);
GotoIf(IsUndefined(new_target), &throwtypeerror);
Node* result = CallBuiltin(Builtins::kCreateTypedArray, context, target,
new_target, arg1, arg2, arg3);
TNode<Object> result = CallBuiltin(Builtins::kCreateTypedArray, context,
target, new_target, arg1, arg2, arg3);
args.PopAndReturn(result);
BIND(&throwtypeerror);
......@@ -649,7 +649,7 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
// that this can be turned into a non-sparse table switch for ideal
// performance.
BIND(&if_receiverisheapobject);
Node* elements_kind =
TNode<Int32T> elements_kind =
Int32Sub(LoadElementsKind(receiver),
Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels,
......@@ -893,7 +893,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// This is not a spec'd limit, so it doesn't particularly matter when we
// throw the range error for typed array length > MaxSmi.
TNode<UnionT<Smi, HeapNumber>> raw_length = LoadJSArrayLength(values);
TNode<Number> raw_length = LoadJSArrayLength(values);
GotoIfNot(TaggedIsSmi(raw_length), &if_length_not_smi);
final_length = CAST(raw_length);
......
This diff is collapsed.
......@@ -3510,7 +3510,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class... TArgs>
Node* MakeTypeError(MessageTemplate message, Node* context, TArgs... args) {
STATIC_ASSERT(sizeof...(TArgs) <= 3);
Node* const make_type_error = LoadContextElement(
TNode<Object> const make_type_error = LoadContextElement(
LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
return CallJS(CodeFactory::Call(isolate()), context, make_type_error,
UndefinedConstant(), SmiConstant(message), args...);
......
......@@ -34,9 +34,9 @@ namespace compiler {
static_assert(std::is_convertible<TNode<Number>, TNode<Object>>::value,
"test subtyping");
static_assert(std::is_convertible<TNode<UnionT<Smi, HeapNumber>>,
TNode<UnionT<Smi, HeapObject>>>::value,
"test subtyping");
static_assert(
std::is_convertible<TNode<Number>, TNode<UnionT<Smi, HeapObject>>>::value,
"test subtyping");
static_assert(
!std::is_convertible<TNode<UnionT<Smi, HeapObject>>, TNode<Number>>::value,
"test subtyping");
......
......@@ -644,7 +644,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(BitcastInt32ToFloat32, Float32T, Word32T) \
V(BitcastFloat32ToInt32, Uint32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
V(RoundInt32ToFloat32, Int32T, Float32T) \
V(RoundInt32ToFloat32, Float32T, Int32T) \
V(Float64SilenceNaN, Float64T, Float64T) \
V(Float64RoundDown, Float64T, Float64T) \
V(Float64RoundUp, Float64T, Float64T) \
......
This diff is collapsed.
......@@ -114,8 +114,9 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&do_fadd);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
Node* value = Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
Node* result = AllocateHeapNumberWithValue(value);
TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&end);
}
......@@ -124,8 +125,9 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_lhsisoddball(this), if_lhsisnotoddball(this);
Node* lhs_instance_type = LoadInstanceType(lhs);
Node* lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
TNode<BoolT> lhs_is_oddball =
InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
BIND(&if_lhsisoddball);
......@@ -154,7 +156,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
// Check if the {rhs} is a smi, and exit the string check early if it is.
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
Node* rhs_instance_type = LoadInstanceType(rhs);
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
// Exit unless {rhs} is a string. Since {lhs} is a string we no longer
// need an Oddball check.
......@@ -173,8 +175,9 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
// Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi.
Node* rhs_instance_type = LoadInstanceType(rhs);
Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
TNode<BoolT> rhs_is_oddball =
InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
Goto(&call_with_any_feedback);
}
......@@ -322,9 +325,10 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_left_bigint(this), if_left_oddball(this);
Node* lhs_instance_type = LoadInstanceType(lhs);
TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint);
Node* lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
TNode<BoolT> lhs_is_oddball =
InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
Branch(lhs_is_oddball, &if_left_oddball, &call_with_any_feedback);
BIND(&if_left_oddball);
......@@ -361,9 +365,10 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
// Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi.
Node* rhs_instance_type = LoadInstanceType(rhs);
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint);
Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
TNode<BoolT> rhs_is_oddball =
InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
var_type_feedback.Bind(
......@@ -437,7 +442,7 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
BIND(&if_overflow);
{
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
Node* value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
var_result = AllocateHeapNumberWithValue(value);
Goto(&end);
}
......@@ -490,7 +495,7 @@ Node* BinaryOpAssembler::Generate_DivideWithFeedback(
{
var_type_feedback->Bind(
SmiConstant(BinaryOperationFeedback::kSignedSmallInputs));
Node* value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
var_result.Bind(AllocateHeapNumberWithValue(value));
Goto(&end);
}
......@@ -528,7 +533,7 @@ Node* BinaryOpAssembler::Generate_ExponentiateWithFeedback(
Node* context, Node* base, Node* exponent, Node* slot_id,
Node* feedback_vector, bool rhs_is_smi) {
// We currently don't optimize exponentiation based on feedback.
Node* dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
UpdateFeedback(dummy_feedback, feedback_vector, slot_id);
return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment