Commit 09af9adf authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[CSA][cleanup] TNodify the binary op assembler

Bug: v8:6949, v8:9396
Change-Id: I4c9382079190379661a26fbe6e1f4f6040a56d08
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1792902
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63658}
parent 67180425
...@@ -9,21 +9,22 @@ ...@@ -9,21 +9,22 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
using compiler::Node; template <typename T>
using TNode = compiler::TNode<T>;
Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
Node* rhs, Node* slot_id, TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
Node* feedback_vector, TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi) { bool rhs_is_smi) {
// Shared entry for floating point addition. // Shared entry for floating point addition.
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred), Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
check_rhsisoddball(this, Label::kDeferred), check_rhsisoddball(this, Label::kDeferred),
call_with_oddball_feedback(this), call_with_any_feedback(this), call_with_oddball_feedback(this), call_with_any_feedback(this),
call_add_stub(this), end(this), bigint(this, Label::kDeferred); call_add_stub(this), end(this), bigint(this, Label::kDeferred);
VARIABLE(var_fadd_lhs, MachineRepresentation::kFloat64); TVARIABLE(Float64T, var_fadd_lhs);
VARIABLE(var_fadd_rhs, MachineRepresentation::kFloat64); TVARIABLE(Float64T, var_fadd_rhs);
VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned); TVARIABLE(Smi, var_type_feedback);
VARIABLE(var_result, MachineRepresentation::kTagged); TVARIABLE(Object, var_result);
// Check if the {lhs} is a Smi or a HeapObject. // Check if the {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(this); Label if_lhsissmi(this);
...@@ -38,6 +39,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -38,6 +39,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_lhsissmi); BIND(&if_lhsissmi);
{ {
Comment("lhs is Smi"); Comment("lhs is Smi");
TNode<Smi> lhs_smi = CAST(lhs);
if (!rhs_is_smi) { if (!rhs_is_smi) {
// Check if the {rhs} is also a Smi. // Check if the {rhs} is also a Smi.
Label if_rhsissmi(this), if_rhsisnotsmi(this); Label if_rhsissmi(this), if_rhsisnotsmi(this);
...@@ -46,10 +48,11 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -46,10 +48,11 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_rhsisnotsmi); BIND(&if_rhsisnotsmi);
{ {
// Check if the {rhs} is a HeapNumber. // Check if the {rhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); TNode<HeapObject> rhs_heap_object = CAST(rhs);
GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
var_fadd_lhs.Bind(SmiToFloat64(lhs)); var_fadd_lhs = SmiToFloat64(lhs_smi);
var_fadd_rhs.Bind(LoadHeapNumberValue(rhs)); var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_fadd); Goto(&do_fadd);
} }
...@@ -62,21 +65,21 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -62,21 +65,21 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
// is for AddSmi operation. For the normal Add operation, we want to fast // is for AddSmi operation. For the normal Add operation, we want to fast
// path both Smi and Number operations, so this path should not be marked // path both Smi and Number operations, so this path should not be marked
// as Deferred. // as Deferred.
TNode<Smi> rhs_smi = CAST(rhs);
Label if_overflow(this, Label if_overflow(this,
rhs_is_smi ? Label::kDeferred : Label::kNonDeferred); rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
TNode<Smi> smi_result = TrySmiAdd(CAST(lhs), CAST(rhs), &if_overflow); TNode<Smi> smi_result = TrySmiAdd(lhs_smi, rhs_smi, &if_overflow);
// Not overflowed. // Not overflowed.
{ {
var_type_feedback.Bind( var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
SmiConstant(BinaryOperationFeedback::kSignedSmall)); var_result = smi_result;
var_result.Bind(smi_result);
Goto(&end); Goto(&end);
} }
BIND(&if_overflow); BIND(&if_overflow);
{ {
var_fadd_lhs.Bind(SmiToFloat64(lhs)); var_fadd_lhs = SmiToFloat64(lhs_smi);
var_fadd_rhs.Bind(SmiToFloat64(rhs)); var_fadd_rhs = SmiToFloat64(rhs_smi);
Goto(&do_fadd); Goto(&do_fadd);
} }
} }
...@@ -85,7 +88,8 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -85,7 +88,8 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_lhsisnotsmi); BIND(&if_lhsisnotsmi);
{ {
// Check if {lhs} is a HeapNumber. // Check if {lhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber); TNode<HeapObject> lhs_heap_object = CAST(lhs);
GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber);
if (!rhs_is_smi) { if (!rhs_is_smi) {
// Check if the {rhs} is Smi. // Check if the {rhs} is Smi.
...@@ -95,29 +99,30 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -95,29 +99,30 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_rhsisnotsmi); BIND(&if_rhsisnotsmi);
{ {
// Check if the {rhs} is a HeapNumber. // Check if the {rhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); TNode<HeapObject> rhs_heap_object = CAST(rhs);
GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
var_fadd_lhs.Bind(LoadHeapNumberValue(lhs)); var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object);
var_fadd_rhs.Bind(LoadHeapNumberValue(rhs)); var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_fadd); Goto(&do_fadd);
} }
BIND(&if_rhsissmi); BIND(&if_rhsissmi);
} }
{ {
var_fadd_lhs.Bind(LoadHeapNumberValue(lhs)); var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object);
var_fadd_rhs.Bind(SmiToFloat64(rhs)); var_fadd_rhs = SmiToFloat64(CAST(rhs));
Goto(&do_fadd); Goto(&do_fadd);
} }
} }
BIND(&do_fadd); BIND(&do_fadd);
{ {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
TNode<Float64T> value = TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value()); Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value); TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
var_result.Bind(result); var_result = result;
Goto(&end); Goto(&end);
} }
...@@ -125,7 +130,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -125,7 +130,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{ {
// No checks on rhs are done yet. We just know lhs is not a number or Smi. // No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_lhsisoddball(this), if_lhsisnotoddball(this); Label if_lhsisoddball(this), if_lhsisnotoddball(this);
TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs); TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs));
TNode<BoolT> lhs_is_oddball = TNode<BoolT> lhs_is_oddball =
InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE); InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball); Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
...@@ -135,39 +140,40 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -135,39 +140,40 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback); GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
// Check if {rhs} is a HeapNumber. // Check if {rhs} is a HeapNumber.
Branch(IsHeapNumber(rhs), &call_with_oddball_feedback, Branch(IsHeapNumber(CAST(rhs)), &call_with_oddball_feedback,
&check_rhsisoddball); &check_rhsisoddball);
} }
BIND(&if_lhsisnotoddball); BIND(&if_lhsisnotoddball);
{ {
// Check if the {rhs} is a smi, and exit the string and bigint check early
// if it is.
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
TNode<HeapObject> rhs_heap_object = CAST(rhs);
Label lhs_is_string(this), lhs_is_bigint(this); Label lhs_is_string(this), lhs_is_bigint(this);
GotoIf(IsStringInstanceType(lhs_instance_type), &lhs_is_string); GotoIf(IsStringInstanceType(lhs_instance_type), &lhs_is_string);
GotoIf(IsBigIntInstanceType(lhs_instance_type), &lhs_is_bigint); GotoIf(IsBigIntInstanceType(lhs_instance_type), &lhs_is_bigint);
Goto(&call_with_any_feedback); Goto(&call_with_any_feedback);
BIND(&lhs_is_bigint); BIND(&lhs_is_bigint);
{ Branch(IsBigInt(rhs_heap_object), &bigint, &call_with_any_feedback);
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
Branch(IsBigInt(rhs), &bigint, &call_with_any_feedback);
}
BIND(&lhs_is_string); BIND(&lhs_is_string);
// Check if the {rhs} is a smi, and exit the string check early if it is. {
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback); TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs_heap_object);
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
// Exit unless {rhs} is a string. Since {lhs} is a string we no longer // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
// need an Oddball check. // need an Oddball check.
GotoIfNot(IsStringInstanceType(rhs_instance_type), GotoIfNot(IsStringInstanceType(rhs_instance_type),
&call_with_any_feedback); &call_with_any_feedback);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kString)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
var_result.Bind( var_result =
CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs)); CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
Goto(&end); Goto(&end);
}
} }
} }
...@@ -175,7 +181,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -175,7 +181,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{ {
// Check if rhs is an oddball. At this point we know lhs is either a // Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi. // Smi or number or oddball and rhs is not a number or Smi.
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs); TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
TNode<BoolT> rhs_is_oddball = TNode<BoolT> rhs_is_oddball =
InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE); InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIf(rhs_is_oddball, &call_with_oddball_feedback); GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
...@@ -186,59 +192,58 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, ...@@ -186,59 +192,58 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{ {
// Both {lhs} and {rhs} are of BigInt type. // Both {lhs} and {rhs} are of BigInt type.
Label bigint_too_big(this); Label bigint_too_big(this);
var_result.Bind( var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs);
CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs));
// Check for sentinel that signals BigIntTooBig exception. // Check for sentinel that signals BigIntTooBig exception.
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big); GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
Goto(&end); Goto(&end);
BIND(&bigint_too_big); BIND(&bigint_too_big);
{ {
// Update feedback to prevent deopt loop. // Update feedback to prevent deopt loop.
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny), UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
feedback_vector, slot_id); maybe_feedback_vector, slot_id);
ThrowRangeError(context, MessageTemplate::kBigIntTooBig); ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
} }
} }
BIND(&call_with_oddball_feedback); BIND(&call_with_oddball_feedback);
{ {
var_type_feedback.Bind( var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
Goto(&call_add_stub); Goto(&call_add_stub);
} }
BIND(&call_with_any_feedback); BIND(&call_with_any_feedback);
{ {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
Goto(&call_add_stub); Goto(&call_add_stub);
} }
BIND(&call_add_stub); BIND(&call_add_stub);
{ {
var_result.Bind(CallBuiltin(Builtins::kAdd, context, lhs, rhs)); var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
Goto(&end); Goto(&end);
} }
BIND(&end); BIND(&end);
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id); UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value(); return var_result.value();
} }
Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector, TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation, const SmiOperation& smiOperation, const FloatOperation& floatOperation,
Operation op, bool rhs_is_smi) { Operation op, bool rhs_is_smi) {
Label do_float_operation(this), end(this), call_stub(this), Label do_float_operation(this), end(this), call_stub(this),
check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this), check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
if_lhsisnotnumber(this, Label::kDeferred), if_lhsisnotnumber(this, Label::kDeferred),
if_bigint(this, Label::kDeferred); if_bigint(this, Label::kDeferred);
VARIABLE(var_float_lhs, MachineRepresentation::kFloat64); TVARIABLE(Float64T, var_float_lhs);
VARIABLE(var_float_rhs, MachineRepresentation::kFloat64); TVARIABLE(Float64T, var_float_rhs);
VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned); TVARIABLE(Smi, var_type_feedback);
VARIABLE(var_result, MachineRepresentation::kTagged); TVARIABLE(Object, var_result);
Label if_lhsissmi(this); Label if_lhsissmi(this);
// If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi // If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi
...@@ -253,18 +258,21 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -253,18 +258,21 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_lhsissmi); BIND(&if_lhsissmi);
{ {
Comment("lhs is Smi"); Comment("lhs is Smi");
TNode<Smi> lhs_smi = CAST(lhs);
if (!rhs_is_smi) { if (!rhs_is_smi) {
// Check if the {rhs} is also a Smi. // Check if the {rhs} is also a Smi.
Label if_rhsissmi(this), if_rhsisnotsmi(this); Label if_rhsissmi(this), if_rhsisnotsmi(this);
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi); Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
BIND(&if_rhsisnotsmi); BIND(&if_rhsisnotsmi);
{ {
// Check if {rhs} is a HeapNumber. // Check if {rhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); TNode<HeapObject> rhs_heap_object = CAST(rhs);
GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
// Perform a floating point operation. // Perform a floating point operation.
var_float_lhs.Bind(SmiToFloat64(lhs)); var_float_lhs = SmiToFloat64(lhs_smi);
var_float_rhs.Bind(LoadHeapNumberValue(rhs)); var_float_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_float_operation); Goto(&do_float_operation);
} }
...@@ -273,7 +281,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -273,7 +281,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{ {
Comment("perform smi operation"); Comment("perform smi operation");
var_result.Bind(smiOperation(lhs, rhs, &var_type_feedback)); var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback);
Goto(&end); Goto(&end);
} }
} }
...@@ -282,7 +290,8 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -282,7 +290,8 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{ {
Comment("lhs is not Smi"); Comment("lhs is not Smi");
// Check if the {lhs} is a HeapNumber. // Check if the {lhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber); TNode<HeapObject> lhs_heap_object = CAST(lhs);
GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber);
if (!rhs_is_smi) { if (!rhs_is_smi) {
// Check if the {rhs} is a Smi. // Check if the {rhs} is a Smi.
...@@ -292,11 +301,12 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -292,11 +301,12 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_rhsisnotsmi); BIND(&if_rhsisnotsmi);
{ {
// Check if the {rhs} is a HeapNumber. // Check if the {rhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); TNode<HeapObject> rhs_heap_object = CAST(rhs);
GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
// Perform a floating point operation. // Perform a floating point operation.
var_float_lhs.Bind(LoadHeapNumberValue(lhs)); var_float_lhs = LoadHeapNumberValue(lhs_heap_object);
var_float_rhs.Bind(LoadHeapNumberValue(rhs)); var_float_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_float_operation); Goto(&do_float_operation);
} }
...@@ -305,19 +315,19 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -305,19 +315,19 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{ {
// Perform floating point operation. // Perform floating point operation.
var_float_lhs.Bind(LoadHeapNumberValue(lhs)); var_float_lhs = LoadHeapNumberValue(lhs_heap_object);
var_float_rhs.Bind(SmiToFloat64(rhs)); var_float_rhs = SmiToFloat64(CAST(rhs));
Goto(&do_float_operation); Goto(&do_float_operation);
} }
} }
BIND(&do_float_operation); BIND(&do_float_operation);
{ {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
Node* lhs_value = var_float_lhs.value(); TNode<Float64T> lhs_value = var_float_lhs.value();
Node* rhs_value = var_float_rhs.value(); TNode<Float64T> rhs_value = var_float_rhs.value();
Node* value = floatOperation(lhs_value, rhs_value); TNode<Float64T> value = floatOperation(lhs_value, rhs_value);
var_result.Bind(AllocateHeapNumberWithValue(value)); var_result = AllocateHeapNumberWithValue(value);
Goto(&end); Goto(&end);
} }
...@@ -325,7 +335,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -325,7 +335,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{ {
// No checks on rhs are done yet. We just know lhs is not a number or Smi. // No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_left_bigint(this), if_left_oddball(this); Label if_left_bigint(this), if_left_oddball(this);
TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs); TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs));
GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint); GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint);
TNode<BoolT> lhs_is_oddball = TNode<BoolT> lhs_is_oddball =
InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE); InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
...@@ -338,18 +348,18 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -338,18 +348,18 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_rhsissmi); BIND(&if_rhsissmi);
{ {
var_type_feedback.Bind( var_type_feedback =
SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
Goto(&call_stub); Goto(&call_stub);
} }
BIND(&if_rhsisnotsmi); BIND(&if_rhsisnotsmi);
{ {
// Check if {rhs} is a HeapNumber. // Check if {rhs} is a HeapNumber.
GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); GotoIfNot(IsHeapNumber(CAST(rhs)), &check_rhsisoddball);
var_type_feedback.Bind( var_type_feedback =
SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
Goto(&call_stub); Goto(&call_stub);
} }
} }
...@@ -357,7 +367,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -357,7 +367,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_left_bigint); BIND(&if_left_bigint);
{ {
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback); GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
Branch(IsBigInt(rhs), &if_bigint, &call_with_any_feedback); Branch(IsBigInt(CAST(rhs)), &if_bigint, &call_with_any_feedback);
} }
} }
...@@ -365,39 +375,38 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -365,39 +375,38 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{ {
// Check if rhs is an oddball. At this point we know lhs is either a // Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi. // Smi or number or oddball and rhs is not a number or Smi.
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs); TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint); GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint);
TNode<BoolT> rhs_is_oddball = TNode<BoolT> rhs_is_oddball =
InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE); InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIfNot(rhs_is_oddball, &call_with_any_feedback); GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
var_type_feedback.Bind( var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
Goto(&call_stub); Goto(&call_stub);
} }
// This handles the case where at least one input is a BigInt. // This handles the case where at least one input is a BigInt.
BIND(&if_bigint); BIND(&if_bigint);
{ {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
if (op == Operation::kAdd) { if (op == Operation::kAdd) {
var_result.Bind(CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs)); var_result = CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs);
} else { } else {
var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs, var_result = CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
SmiConstant(op))); SmiConstant(op));
} }
Goto(&end); Goto(&end);
} }
BIND(&call_with_any_feedback); BIND(&call_with_any_feedback);
{ {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
Goto(&call_stub); Goto(&call_stub);
} }
BIND(&call_stub); BIND(&call_stub);
{ {
Node* result; TNode<Object> result;
switch (op) { switch (op) {
case Operation::kSubtract: case Operation::kSubtract:
result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs); result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
...@@ -414,20 +423,21 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( ...@@ -414,20 +423,21 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
default: default:
UNREACHABLE(); UNREACHABLE();
} }
var_result.Bind(result); var_result = result;
Goto(&end); Goto(&end);
} }
BIND(&end); BIND(&end);
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id); UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value(); return var_result.value();
} }
Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs, TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
Node* rhs, Node* slot_id, TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
Node* feedback_vector, TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi) { bool rhs_is_smi) {
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TVariable<Smi>* var_type_feedback) {
Label end(this); Label end(this);
TVARIABLE(Number, var_result); TVARIABLE(Number, var_result);
// If rhs is known to be an Smi (for SubSmi) we want to fast path Smi // If rhs is known to be an Smi (for SubSmi) we want to fast path Smi
...@@ -435,13 +445,13 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs, ...@@ -435,13 +445,13 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
// Smi and Number operations, so this path should not be marked as Deferred. // Smi and Number operations, so this path should not be marked as Deferred.
Label if_overflow(this, Label if_overflow(this,
rhs_is_smi ? Label::kDeferred : Label::kNonDeferred); rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
var_result = TrySmiSub(CAST(lhs), CAST(rhs), &if_overflow); var_result = TrySmiSub(lhs, rhs, &if_overflow);
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
Goto(&end); Goto(&end);
BIND(&if_overflow); BIND(&if_overflow);
{ {
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber)); *var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs)); TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
var_result = AllocateHeapNumberWithValue(value); var_result = AllocateHeapNumberWithValue(value);
Goto(&end); Goto(&end);
...@@ -450,91 +460,97 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs, ...@@ -450,91 +460,97 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
BIND(&end); BIND(&end);
return var_result.value(); return var_result.value();
}; };
auto floatFunction = [=](Node* lhs, Node* rhs) { auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Sub(lhs, rhs); return Float64Sub(lhs, rhs);
}; };
return Generate_BinaryOperationWithFeedback( return Generate_BinaryOperationWithFeedback(
context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction, context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
Operation::kSubtract, rhs_is_smi); floatFunction, Operation::kSubtract, rhs_is_smi);
} }
Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs, TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
Node* rhs, Node* slot_id, TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
Node* feedback_vector, TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi) { bool rhs_is_smi) {
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TNode<Number> result = SmiMul(CAST(lhs), CAST(rhs)); TVariable<Smi>* var_type_feedback) {
var_type_feedback->Bind(SelectSmiConstant( TNode<Number> result = SmiMul(lhs, rhs);
*var_type_feedback = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber)); BinaryOperationFeedback::kNumber);
return result; return result;
}; };
auto floatFunction = [=](Node* lhs, Node* rhs) { auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Mul(lhs, rhs); return Float64Mul(lhs, rhs);
}; };
return Generate_BinaryOperationWithFeedback( return Generate_BinaryOperationWithFeedback(
context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction, context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
Operation::kMultiply, rhs_is_smi); floatFunction, Operation::kMultiply, rhs_is_smi);
} }
Node* BinaryOpAssembler::Generate_DivideWithFeedback( TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
Node* context, Node* dividend, Node* divisor, Node* slot_id, TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
Node* feedback_vector, bool rhs_is_smi) { TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { bool rhs_is_smi) {
VARIABLE(var_result, MachineRepresentation::kTagged); auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TVariable<Smi>* var_type_feedback) {
TVARIABLE(Object, var_result);
// If rhs is known to be an Smi (for DivSmi) we want to fast path Smi // If rhs is known to be an Smi (for DivSmi) we want to fast path Smi
// operation. For the normal Div operation, we want to fast path both // operation. For the normal Div operation, we want to fast path both
// Smi and Number operations, so this path should not be marked as Deferred. // Smi and Number operations, so this path should not be marked as Deferred.
Label bailout(this, rhs_is_smi ? Label::kDeferred : Label::kNonDeferred), Label bailout(this, rhs_is_smi ? Label::kDeferred : Label::kNonDeferred),
end(this); end(this);
var_result.Bind(TrySmiDiv(CAST(lhs), CAST(rhs), &bailout)); var_result = TrySmiDiv(lhs, rhs, &bailout);
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
Goto(&end); Goto(&end);
BIND(&bailout); BIND(&bailout);
{ {
var_type_feedback->Bind( *var_type_feedback =
SmiConstant(BinaryOperationFeedback::kSignedSmallInputs)); SmiConstant(BinaryOperationFeedback::kSignedSmallInputs);
TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs)); TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
var_result.Bind(AllocateHeapNumberWithValue(value)); var_result = AllocateHeapNumberWithValue(value);
Goto(&end); Goto(&end);
} }
BIND(&end); BIND(&end);
return var_result.value(); return var_result.value();
}; };
auto floatFunction = [=](Node* lhs, Node* rhs) { auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Div(lhs, rhs); return Float64Div(lhs, rhs);
}; };
return Generate_BinaryOperationWithFeedback( return Generate_BinaryOperationWithFeedback(
context, dividend, divisor, slot_id, feedback_vector, smiFunction, context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
floatFunction, Operation::kDivide, rhs_is_smi); floatFunction, Operation::kDivide, rhs_is_smi);
} }
Node* BinaryOpAssembler::Generate_ModulusWithFeedback( TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
Node* context, Node* dividend, Node* divisor, Node* slot_id, TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
Node* feedback_vector, bool rhs_is_smi) { TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { bool rhs_is_smi) {
TNode<Number> result = SmiMod(CAST(lhs), CAST(rhs)); auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
var_type_feedback->Bind(SelectSmiConstant( TVariable<Smi>* var_type_feedback) {
TNode<Number> result = SmiMod(lhs, rhs);
*var_type_feedback = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber)); BinaryOperationFeedback::kNumber);
return result; return result;
}; };
auto floatFunction = [=](Node* lhs, Node* rhs) { auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Mod(lhs, rhs); return Float64Mod(lhs, rhs);
}; };
return Generate_BinaryOperationWithFeedback( return Generate_BinaryOperationWithFeedback(
context, dividend, divisor, slot_id, feedback_vector, smiFunction, context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
floatFunction, Operation::kModulus, rhs_is_smi); floatFunction, Operation::kModulus, rhs_is_smi);
} }
Node* BinaryOpAssembler::Generate_ExponentiateWithFeedback( TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback(
Node* context, Node* base, Node* exponent, Node* slot_id, TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
Node* feedback_vector, bool rhs_is_smi) { TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi) {
// We currently don't optimize exponentiation based on feedback. // We currently don't optimize exponentiation based on feedback.
TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny); TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
UpdateFeedback(dummy_feedback, feedback_vector, slot_id); UpdateFeedback(dummy_feedback, maybe_feedback_vector, slot_id);
return CallBuiltin(Builtins::kExponentiate, context, base, exponent); return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
} }
......
...@@ -17,42 +17,48 @@ class CodeAssemblerState; ...@@ -17,42 +17,48 @@ class CodeAssemblerState;
class BinaryOpAssembler : public CodeStubAssembler { class BinaryOpAssembler : public CodeStubAssembler {
public: public:
using Node = compiler::Node;
explicit BinaryOpAssembler(compiler::CodeAssemblerState* state) explicit BinaryOpAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {} : CodeStubAssembler(state) {}
Node* Generate_AddWithFeedback(Node* context, Node* lhs, Node* rhs, TNode<Object> Generate_AddWithFeedback(
Node* slot_id, Node* feedback_vector, TNode<Context> context, TNode<Object> left, TNode<Object> right,
bool rhs_is_smi); TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi);
Node* Generate_SubtractWithFeedback(Node* context, Node* lhs, Node* rhs, TNode<Object> Generate_SubtractWithFeedback(
Node* slot_id, Node* feedback_vector, TNode<Context> context, TNode<Object> left, TNode<Object> right,
bool rhs_is_smi); TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi);
Node* Generate_MultiplyWithFeedback(Node* context, Node* lhs, Node* rhs, TNode<Object> Generate_MultiplyWithFeedback(
Node* slot_id, Node* feedback_vector, TNode<Context> context, TNode<Object> left, TNode<Object> right,
bool rhs_is_smi); TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi);
Node* Generate_DivideWithFeedback(Node* context, Node* dividend, TNode<Object> Generate_DivideWithFeedback(
Node* divisor, Node* slot_id, TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
Node* feedback_vector, bool rhs_is_smi); TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi);
Node* Generate_ModulusWithFeedback(Node* context, Node* dividend, TNode<Object> Generate_ModulusWithFeedback(
Node* divisor, Node* slot_id, TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
Node* feedback_vector, bool rhs_is_smi); TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi);
Node* Generate_ExponentiateWithFeedback(Node* context, Node* dividend, TNode<Object> Generate_ExponentiateWithFeedback(
Node* divisor, Node* slot_id, TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
Node* feedback_vector, TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi); bool rhs_is_smi);
private: private:
using SmiOperation = std::function<Node*(Node*, Node*, Variable*)>; using SmiOperation =
using FloatOperation = std::function<Node*(Node*, Node*)>; std::function<TNode<Object>(TNode<Smi>, TNode<Smi>, TVariable<Smi>*)>;
using FloatOperation =
Node* Generate_BinaryOperationWithFeedback( std::function<TNode<Float64T>(TNode<Float64T>, TNode<Float64T>)>;
Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
TNode<Object> Generate_BinaryOperationWithFeedback(
TNode<Context> context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation, const SmiOperation& smiOperation, const FloatOperation& floatOperation,
Operation op, bool rhs_is_smi); Operation op, bool rhs_is_smi);
}; };
......
...@@ -832,20 +832,21 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler { ...@@ -832,20 +832,21 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
OperandScale operand_scale) OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {} : InterpreterAssembler(state, bytecode, operand_scale) {}
using BinaryOpGenerator = using BinaryOpGenerator = TNode<Object> (BinaryOpAssembler::*)(
Node* (BinaryOpAssembler::*)(Node* context, Node* left, Node* right, TNode<Context> context, TNode<Object> left, TNode<Object> right,
Node* slot, Node* vector, bool lhs_is_smi); TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_is_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) { void BinaryOpWithFeedback(BinaryOpGenerator generator) {
TNode<Object> lhs = LoadRegisterAtOperandIndex(0); TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
TNode<Object> rhs = GetAccumulator(); TNode<Object> rhs = GetAccumulator();
TNode<Context> context = GetContext(); TNode<Context> context = GetContext();
TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1)); TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state()); BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index, TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
maybe_feedback_vector, false); maybe_feedback_vector, false);
SetAccumulator(result); SetAccumulator(result);
Dispatch(); Dispatch();
} }
...@@ -854,12 +855,12 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler { ...@@ -854,12 +855,12 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
TNode<Object> lhs = GetAccumulator(); TNode<Object> lhs = GetAccumulator();
TNode<Smi> rhs = BytecodeOperandImmSmi(0); TNode<Smi> rhs = BytecodeOperandImmSmi(0);
TNode<Context> context = GetContext(); TNode<Context> context = GetContext();
TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1)); TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state()); BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index, TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
maybe_feedback_vector, true); maybe_feedback_vector, true);
SetAccumulator(result); SetAccumulator(result);
Dispatch(); Dispatch();
} }
......
...@@ -18,7 +18,8 @@ namespace internal { ...@@ -18,7 +18,8 @@ namespace internal {
using compiler::CodeAssemblerTester; using compiler::CodeAssemblerTester;
using compiler::FunctionTester; using compiler::FunctionTester;
using compiler::Node; using compiler::Node;
using compiler::TNode; template <typename T>
using TNode = compiler::TNode<T>;
namespace { namespace {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment