Commit c8fbe8e8 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Fix cpplint 'runtime/references' warnings

This replaces non-const reference arguments by pointers.

Bug: v8:9429
Change-Id: I5e5d94b020dedf3005f8d74202d31e3a50bdf754
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1690943Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62588}
parent 17b9d872
......@@ -171,16 +171,15 @@ struct Heap::StrongRootsList {
class IdleScavengeObserver : public AllocationObserver {
public:
IdleScavengeObserver(Heap& heap, // NOLINT(runtime/references)
intptr_t step_size)
IdleScavengeObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated);
}
private:
Heap& heap_;
Heap* heap_;
};
Heap::Heap()
......@@ -1328,28 +1327,27 @@ intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
return 0;
}
void ReportDuplicates(
int size, std::vector<HeapObject>& objects) { // NOLINT(runtime/references)
if (objects.size() == 0) return;
void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
if (objects->size() == 0) return;
sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
intptr_t c = CompareWords(size, a, b);
if (c != 0) return c < 0;
return a < b;
});
std::vector<std::pair<int, HeapObject>> duplicates;
HeapObject current = objects[0];
HeapObject current = (*objects)[0];
int count = 1;
for (size_t i = 1; i < objects.size(); i++) {
if (CompareWords(size, current, objects[i]) == 0) {
for (size_t i = 1; i < objects->size(); i++) {
if (CompareWords(size, current, (*objects)[i]) == 0) {
count++;
} else {
if (count > 1) {
duplicates.push_back(std::make_pair(count - 1, current));
}
count = 1;
current = objects[i];
current = (*objects)[i];
}
}
if (count > 1) {
......@@ -1432,7 +1430,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
ReportDuplicates(it->first, it->second);
ReportDuplicates(it->first, &it->second);
}
}
}
......@@ -5016,7 +5014,7 @@ void Heap::SetUpSpaces() {
if (FLAG_idle_time_scavenge) {
scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_.reset(new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
......@@ -5025,12 +5023,12 @@ void Heap::SetUpSpaces() {
if (FLAG_stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
stress_marking_observer_ = new StressMarkingObserver(*this);
stress_marking_observer_ = new StressMarkingObserver(this);
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
if (FLAG_stress_scavenge > 0) {
stress_scavenge_observer_ = new StressScavengeObserver(*this);
stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
......
......@@ -37,14 +37,14 @@ using IncrementalMarkingMarkingVisitor =
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
Heap* heap = incremental_marking_.heap();
Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceOnAllocation();
incremental_marking_->AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
incremental_marking_.EnsureBlackAllocated(addr, size);
incremental_marking_->EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(
......@@ -64,8 +64,8 @@ IncrementalMarking::IncrementalMarking(
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
DCHECK_NOT_NULL(marking_worklist_);
SetState(STOPPED);
}
......
......@@ -255,16 +255,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
private:
class Observer : public AllocationObserver {
public:
Observer(
IncrementalMarking& incremental_marking, // NOLINT(runtime/references)
intptr_t step_size)
Observer(IncrementalMarking* incremental_marking, intptr_t step_size)
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override;
private:
IncrementalMarking& incremental_marking_;
IncrementalMarking* incremental_marking_;
};
void StartMarking();
......
......@@ -9,14 +9,14 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
StressMarkingObserver::StressMarkingObserver(Heap& heap)
StressMarkingObserver::StressMarkingObserver(Heap* heap)
: AllocationObserver(64), heap_(heap) {}
void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
heap_.incremental_marking()->EnsureBlackAllocated(soon_object, size);
heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
heap_->incremental_marking()->EnsureBlackAllocated(soon_object, size);
}
} // namespace internal
......
......@@ -12,12 +12,12 @@ namespace internal {
class StressMarkingObserver : public AllocationObserver {
public:
explicit StressMarkingObserver(Heap& heap); // NOLINT(runtime/references)
explicit StressMarkingObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
private:
Heap& heap_;
Heap* heap_;
};
} // namespace internal
......
......@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
StressScavengeObserver::StressScavengeObserver(Heap& heap)
StressScavengeObserver::StressScavengeObserver(Heap* heap)
: AllocationObserver(64),
heap_(heap),
has_requested_gc_(false),
......@@ -21,22 +21,22 @@ StressScavengeObserver::StressScavengeObserver(Heap& heap)
limit_percentage_ = NextLimit();
if (FLAG_trace_stress_scavenge && !FLAG_fuzzer_gc_analysis) {
heap_.isolate()->PrintWithTimestamp(
heap_->isolate()->PrintWithTimestamp(
"[StressScavenge] %d%% is the new limit\n", limit_percentage_);
}
}
void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
if (has_requested_gc_ || heap_.new_space()->Capacity() == 0) {
if (has_requested_gc_ || heap_->new_space()->Capacity() == 0) {
return;
}
double current_percent =
heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
if (FLAG_trace_stress_scavenge) {
heap_.isolate()->PrintWithTimestamp(
heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
}
......@@ -49,11 +49,11 @@ void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
if (static_cast<int>(current_percent) >= limit_percentage_) {
if (FLAG_trace_stress_scavenge) {
heap_.isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
heap_->isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
}
has_requested_gc_ = true;
heap_.isolate()->stack_guard()->RequestGC();
heap_->isolate()->stack_guard()->RequestGC();
}
}
......@@ -63,15 +63,15 @@ bool StressScavengeObserver::HasRequestedGC() const {
void StressScavengeObserver::RequestedGCDone() {
double current_percent =
heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
limit_percentage_ = NextLimit(static_cast<int>(current_percent));
if (FLAG_trace_stress_scavenge) {
heap_.isolate()->PrintWithTimestamp(
heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
heap_.isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
limit_percentage_);
heap_->isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
limit_percentage_);
}
has_requested_gc_ = false;
......@@ -87,7 +87,7 @@ int StressScavengeObserver::NextLimit(int min) {
return max;
}
return min + heap_.isolate()->fuzzer_rng()->NextInt(max - min + 1);
return min + heap_->isolate()->fuzzer_rng()->NextInt(max - min + 1);
}
} // namespace internal
......
......@@ -12,7 +12,7 @@ namespace internal {
class StressScavengeObserver : public AllocationObserver {
public:
explicit StressScavengeObserver(Heap& heap); // NOLINT(runtime/references)
explicit StressScavengeObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
......@@ -24,7 +24,7 @@ class StressScavengeObserver : public AllocationObserver {
double MaxNewSpaceSizeReached() const;
private:
Heap& heap_;
Heap* heap_;
int limit_percentage_;
bool has_requested_gc_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment