Commit d913f5a1 authored by Ng Zhi An's avatar Ng Zhi An Committed by V8 LUCI CQ

[compiler] Fix -Wshadow warnings

Bug: v8:12244,v8:12245
Change-Id: I678296c3ebf5d78dac7697a25b27c583406e02cb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3269179
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77828}
parent 8a6b7d8b
......@@ -662,7 +662,6 @@ bool LoopFinder::HasMarkedExits(LoopTree* loop_tree,
}
if (unmarked_exit) {
if (FLAG_trace_turbo_loop) {
Node* loop_node = loop_tree->GetLoopControl(loop);
PrintF(
"Cannot peel loop %i. Loop exit without explicit mark: Node %i "
"(%s) is inside loop, but its use %i (%s) is outside.\n",
......
......@@ -635,34 +635,34 @@ class MachineRepresentationChecker {
switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputIsTagged(node, i);
for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
CheckValueInputIsTagged(node, j);
}
break;
case MachineRepresentation::kTaggedSigned:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
if (COMPRESS_POINTERS_BOOL) {
CheckValueInputIsCompressedOrTagged(node, i);
CheckValueInputIsCompressedOrTagged(node, j);
} else {
CheckValueInputIsTagged(node, i);
CheckValueInputIsTagged(node, j);
}
}
break;
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputIsCompressedOrTagged(node, i);
for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
CheckValueInputIsCompressedOrTagged(node, j);
}
break;
case MachineRepresentation::kWord32:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputForInt32Op(node, i);
for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
CheckValueInputForInt32Op(node, j);
}
break;
default:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
CheckValueInputRepresentationIs(
node, i, inferrer_->GetRepresentation(node));
node, j, inferrer_->GetRepresentation(node));
}
break;
}
......@@ -678,9 +678,9 @@ class MachineRepresentationChecker {
// CheckValueInputRepresentationIs(
// node, 0, MachineType::PointerRepresentation()); // Pop count
size_t return_count = inferrer_->call_descriptor()->ReturnCount();
for (size_t i = 0; i < return_count; i++) {
MachineType type = inferrer_->call_descriptor()->GetReturnType(i);
int input_index = static_cast<int>(i + 1);
for (size_t j = 0; j < return_count; j++) {
MachineType type = inferrer_->call_descriptor()->GetReturnType(j);
int input_index = static_cast<int>(j + 1);
switch (type.representation()) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
......
......@@ -280,7 +280,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = __ UniqueIntPtrConstant(object_size);
Node* reservation_size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
......@@ -290,7 +290,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
Node* check = __ UintLessThan(__ IntAdd(top, reservation_size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
......@@ -298,8 +298,8 @@ Reduction MemoryLowering::ReduceAllocateRaw(
__ Bind(&call_runtime);
{
EnsureAllocateOperator();
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), allocate_builtin, size));
Node* vfalse = __ BitcastTaggedToWord(__ Call(
allocate_operator_.get(), allocate_builtin, reservation_size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
......@@ -319,8 +319,8 @@ Reduction MemoryLowering::ReduceAllocateRaw(
control = gasm()->control();
// Start a new allocation group.
AllocationGroup* group =
zone()->New<AllocationGroup>(value, allocation_type, size, zone());
AllocationGroup* group = zone()->New<AllocationGroup>(
value, allocation_type, reservation_size, zone());
*state_ptr =
AllocationState::Open(group, object_size, top, effect, zone());
}
......
......@@ -373,7 +373,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
ZoneRefUnorderedSet<MapRef>* maps_out) {
HeapObjectMatcher m(receiver);
if (m.HasResolvedValue()) {
HeapObjectRef receiver = m.Ref(broker);
HeapObjectRef ref = m.Ref(broker);
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
// we better make sure that TurboFan doesn't outsmart the system here
......@@ -381,12 +381,12 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
//
// TODO(bmeurer): This can be removed once the Array.prototype and
// Object.prototype have NO_ELEMENTS elements kind.
if (!receiver.IsJSObject() ||
!broker->IsArrayOrObjectPrototype(receiver.AsJSObject())) {
if (receiver.map().is_stable()) {
if (!ref.IsJSObject() ||
!broker->IsArrayOrObjectPrototype(ref.AsJSObject())) {
if (ref.map().is_stable()) {
// The {receiver_map} is only reliable when we install a stability
// code dependency.
*maps_out = RefSetOf(broker, receiver.map());
*maps_out = RefSetOf(broker, ref.map());
return kUnreliableMaps;
}
}
......@@ -442,9 +442,9 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
access.offset == HeapObject::kMapOffset) {
if (IsSame(receiver, object)) {
Node* const value = GetValueInput(effect, 1);
HeapObjectMatcher m(value);
if (m.HasResolvedValue()) {
*maps_out = RefSetOf(broker, m.Ref(broker).AsMap());
HeapObjectMatcher m2(value);
if (m2.HasResolvedValue()) {
*maps_out = RefSetOf(broker, m2.Ref(broker).AsMap());
return result;
}
}
......
......@@ -76,15 +76,15 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
// in this case because we find node1 first, but what we should actually
// do is return Replace(node2) instead.
for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
Node* entry = entries_[j];
if (!entry) {
Node* other_entry = entries_[j];
if (!other_entry) {
// No collision, {node} is fine.
return NoChange();
}
if (entry->IsDead()) {
if (other_entry->IsDead()) {
continue;
}
if (entry == node) {
if (other_entry == node) {
// Collision with ourselves, doesn't count as a real collision.
// Opportunistically clean-up the duplicate entry if we're at the end
// of a bucket.
......@@ -96,11 +96,11 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
// Otherwise, keep searching for another collision.
continue;
}
if (NodeProperties::Equals(entry, node)) {
Reduction reduction = ReplaceIfTypesMatch(node, entry);
if (NodeProperties::Equals(other_entry, node)) {
Reduction reduction = ReplaceIfTypesMatch(node, other_entry);
if (reduction.Changed()) {
// Overwrite the colliding entry with the actual entry.
entries_[i] = entry;
entries_[i] = other_entry;
// Opportunistically clean-up the duplicate entry if we're at the
// end of a bucket.
if (!entries_[(j + 1) & mask]) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment