Commit 02dcf19e authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[CSA][cleanup] Remove SloppyTNode from the codebase

Remove sloppy-ness from the CODE_ASSEMBLER_UNARY_OP macros and the
remaining methods.

Bug: v8:6949
Change-Id: I48e2800c6bac558ae4005fa09551a4551c1dbb25
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2725530
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73139}
parent 8890bb21
......@@ -336,8 +336,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&test_old_to_young_flags);
{
// TODO(ishell): do a new-space range check instead.
TNode<IntPtrT> value =
BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
// TODO(albertnetymk): Try to cache the page flag for value and object,
// instead of calling IsPageFlagSet each time.
......@@ -374,8 +373,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Label call_incremental_wb(this);
auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
TNode<IntPtrT> value =
BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
// There are two cases we need to call incremental write barrier.
// 1) value_is_white
......
......@@ -610,7 +610,7 @@ TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
return smi_index;
}
TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<Smi> CodeStubAssembler::SmiFromInt32(TNode<Int32T> value) {
if (COMPRESS_POINTERS_BOOL) {
static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1),
"Use shifting instead of add");
......@@ -3109,7 +3109,7 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
SloppyTNode<Float64T> value) {
TNode<Float64T> value) {
TNode<HeapNumber> result = AllocateHeapNumber();
StoreHeapNumberValue(result, value);
return result;
......@@ -5423,8 +5423,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(TNode<Float64T> value) {
return var_result.value();
}
TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
SloppyTNode<Int32T> value) {
TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(TNode<Int32T> value) {
if (SmiValuesAre32Bits()) {
return SmiTag(ChangeInt32ToIntPtr(value));
}
......@@ -5454,8 +5453,7 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
return var_result.value();
}
TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
SloppyTNode<Uint32T> value) {
TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(TNode<Uint32T> value) {
Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
if_join(this);
TVARIABLE(Number, var_result);
......@@ -9803,7 +9801,7 @@ void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
}
void CodeStubAssembler::ReportFeedbackUpdate(
TNode<FeedbackVector> feedback_vector, SloppyTNode<UintPtrT> slot_id,
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id,
const char* reason) {
// Reset profiler ticks.
StoreObjectFieldNoWriteBarrier(
......@@ -13335,8 +13333,7 @@ TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
return assembler_->UncheckedCast<Object>(
assembler_->LoadFullTagged(AtIndexPtr(index)));
return assembler_->LoadFullTagged(AtIndexPtr(index));
}
TNode<Object> CodeStubArguments::AtIndex(int index) const {
......@@ -13519,10 +13516,9 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
return CAST(BitcastWordToTagged(
Load(MachineType::Pointer(),
ExternalConstant(ExternalReference::builtins_address(isolate())),
offset)));
return CAST(BitcastWordToTagged(Load<RawPtrT>(
ExternalConstant(ExternalReference::builtins_address(isolate())),
offset)));
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
......
......@@ -569,7 +569,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Smi conversions.
TNode<Float64T> SmiToFloat64(TNode<Smi> value);
TNode<Smi> SmiFromIntPtr(TNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
TNode<Smi> SmiFromInt32(TNode<Int32T> value);
TNode<Smi> SmiFromUint32(TNode<Uint32T> value);
TNode<IntPtrT> SmiToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
TNode<Int32T> SmiToInt32(TNode<Smi> value);
......@@ -1695,7 +1695,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Allocate a HeapNumber without initializing its value.
TNode<HeapNumber> AllocateHeapNumber();
// Allocate a HeapNumber with a specific value.
TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value);
TNode<HeapNumber> AllocateHeapNumberWithValue(TNode<Float64T> value);
TNode<HeapNumber> AllocateHeapNumberWithValue(double value) {
return AllocateHeapNumberWithValue(Float64Constant(value));
}
......@@ -2242,8 +2242,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_smi);
TNode<Number> ChangeFloat32ToTagged(TNode<Float32T> value);
TNode<Number> ChangeFloat64ToTagged(TNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Number> ChangeInt32ToTagged(TNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(TNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(TNode<Number> value);
......@@ -3179,7 +3179,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
void ReportFeedbackUpdate(TNode<FeedbackVector> feedback_vector,
SloppyTNode<UintPtrT> slot_id, const char* reason);
TNode<UintPtrT> slot_id, const char* reason);
// Combine the new feedback with the existing_feedback. Do nothing if
// existing_feedback is nullptr.
......
......@@ -365,21 +365,6 @@ class TNode {
compiler::Node* node_;
};
// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
// Node*. It is intended for function arguments as long as some call sites
// still use untyped Node* arguments.
// TODO(turbofan): Delete this class once transition is finished.
template <class T>
class SloppyTNode : public TNode<T> {
public:
SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
: TNode<T>(node) {}
template <class U, typename std::enable_if<is_subtype<U, T>::value,
int>::type = 0>
SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
: TNode<T>(other) {}
};
} // namespace internal
} // namespace v8
......
......@@ -226,7 +226,7 @@ bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
}
#ifdef DEBUG
void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
const char* location) {
Label ok(this);
GotoIf(WordNotEqual(WordAnd(BitcastMaybeObjectToWord(node),
......@@ -650,7 +650,7 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
value, TruncateKind::kSetOverflowToMin));
}
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
TNode<ResType> CodeAssembler::name(SloppyTNode<ArgType> a) { \
TNode<ResType> CodeAssembler::name(TNode<ArgType> a) { \
return UncheckedCast<ResType>(raw_assembler()->name(a)); \
}
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
......@@ -668,14 +668,12 @@ Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
LoadSensitivity needs_poisoning) {
return BitcastWordToTagged(
Load(MachineType::Pointer(), base, needs_poisoning));
return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
}
TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset,
TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning) {
return BitcastWordToTagged(
Load(MachineType::Pointer(), base, offset, needs_poisoning));
return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
}
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
......
......@@ -447,7 +447,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#ifdef DEBUG
if (FLAG_debug_code) {
if (std::is_same<PreviousType, MaybeObject>::value) {
code_assembler_->GenerateCheckMaybeObjectIsObject(node_, location_);
code_assembler_->GenerateCheckMaybeObjectIsObject(
TNode<MaybeObject>::UncheckedCast(node_), location_);
}
TNode<ExternalReference> function = code_assembler_->ExternalConstant(
ExternalReference::check_object_type());
......@@ -464,11 +465,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return TNode<A>::UncheckedCast(node_);
}
template <class A>
operator SloppyTNode<A>() {
return implicit_cast<TNode<A>>(*this);
}
Node* node() const { return node_; }
private:
......@@ -519,7 +515,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#endif
#ifdef DEBUG
void GenerateCheckMaybeObjectIsObject(Node* node, const char* location);
void GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
const char* location);
#endif
// Constants.
......@@ -759,7 +756,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Object> LoadFullTagged(
Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<Object> LoadFullTagged(
Node* base, Node* offset,
Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* LoadFromObject(MachineType type, TNode<Object> object,
......@@ -1005,7 +1002,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
TNode<ResType> name(SloppyTNode<ArgType> a);
TNode<ResType> name(TNode<ArgType> a);
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
......
......@@ -229,12 +229,12 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
TNode<FixedArray> vector =
__ Cast(__ LoadFixedArrayElement(result_array, i));
for (int lane = 0; lane < 4; lane++) {
TNode<Smi> lane_value =
__ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
TNode<Smi> lane_value = __ SmiFromInt32(__ UncheckedCast<Int32T>(
tester.raw_assembler_for_testing()->AddNode(
tester.raw_assembler_for_testing()
->machine()
->I32x4ExtractLane(lane),
param));
param)));
__ StoreFixedArrayElement(vector, lane, lane_value,
UNSAFE_SKIP_WRITE_BARRIER);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment