Commit 7e53749d authored by ulan's avatar ulan Committed by Commit bot

Make old generation allocation throughput stats independent from the new space...

Make old generation allocation throughput stats independent from the new space allocation throughput.

BUG=

Review URL: https://codereview.chromium.org/1167563005

Cr-Commit-Position: refs/heads/master@{#28880}
parent d869f4a4
...@@ -51,6 +51,7 @@ void GCIdleTimeHandler::HeapState::Print() { ...@@ -51,6 +51,7 @@ void GCIdleTimeHandler::HeapState::Print() {
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped); PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking); PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
PrintF("sweeping_in_progress=%d ", sweeping_in_progress); PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
PrintF("has_low_allocation_rate=%d", has_low_allocation_rate);
PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ", PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
mark_compact_speed_in_bytes_per_ms); mark_compact_speed_in_bytes_per_ms);
PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ", PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
...@@ -60,8 +61,6 @@ void GCIdleTimeHandler::HeapState::Print() { ...@@ -60,8 +61,6 @@ void GCIdleTimeHandler::HeapState::Print() {
PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity); PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ", PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
new_space_allocation_throughput_in_bytes_per_ms); new_space_allocation_throughput_in_bytes_per_ms);
PrintF("current_allocation_throughput=%" V8_PTR_PREFIX "d",
current_allocation_throughput_in_bytes_per_ms);
} }
......
...@@ -181,6 +181,7 @@ class GCIdleTimeHandler { ...@@ -181,6 +181,7 @@ class GCIdleTimeHandler {
bool can_start_incremental_marking; bool can_start_incremental_marking;
bool sweeping_in_progress; bool sweeping_in_progress;
bool sweeping_completed; bool sweeping_completed;
bool has_low_allocation_rate;
size_t mark_compact_speed_in_bytes_per_ms; size_t mark_compact_speed_in_bytes_per_ms;
size_t incremental_marking_speed_in_bytes_per_ms; size_t incremental_marking_speed_in_bytes_per_ms;
size_t final_incremental_mark_compact_speed_in_bytes_per_ms; size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
...@@ -188,7 +189,6 @@ class GCIdleTimeHandler { ...@@ -188,7 +189,6 @@ class GCIdleTimeHandler {
size_t used_new_space_size; size_t used_new_space_size;
size_t new_space_capacity; size_t new_space_capacity;
size_t new_space_allocation_throughput_in_bytes_per_ms; size_t new_space_allocation_throughput_in_bytes_per_ms;
size_t current_allocation_throughput_in_bytes_per_ms;
}; };
GCIdleTimeHandler() GCIdleTimeHandler()
......
...@@ -105,6 +105,7 @@ GCTracer::GCTracer(Heap* heap) ...@@ -105,6 +105,7 @@ GCTracer::GCTracer(Heap* heap)
allocation_duration_since_gc_(0.0), allocation_duration_since_gc_(0.0),
new_space_allocation_in_bytes_since_gc_(0), new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0), old_generation_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
start_counter_(0) { start_counter_(0) {
current_ = Event(Event::START, NULL, NULL); current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis(); current_.end_time = base::OS::TimeCurrentMillis();
...@@ -226,12 +227,14 @@ void GCTracer::Stop(GarbageCollector collector) { ...@@ -226,12 +227,14 @@ void GCTracer::Stop(GarbageCollector collector) {
.cumulative_pure_incremental_marking_duration; .cumulative_pure_incremental_marking_duration;
longest_incremental_marking_step_ = 0.0; longest_incremental_marking_step_ = 0.0;
incremental_mark_compactor_events_.push_front(current_); incremental_mark_compactor_events_.push_front(current_);
combined_mark_compact_speed_cache_ = 0.0;
} else { } else {
DCHECK(current_.incremental_marking_bytes == 0); DCHECK(current_.incremental_marking_bytes == 0);
DCHECK(current_.incremental_marking_duration == 0); DCHECK(current_.incremental_marking_duration == 0);
DCHECK(current_.pure_incremental_marking_duration == 0); DCHECK(current_.pure_incremental_marking_duration == 0);
longest_incremental_marking_step_ = 0.0; longest_incremental_marking_step_ = 0.0;
mark_compactor_events_.push_front(current_); mark_compactor_events_.push_front(current_);
combined_mark_compact_speed_cache_ = 0.0;
} }
// TODO(ernstm): move the code below out of GCTracer. // TODO(ernstm): move the code below out of GCTracer.
...@@ -292,10 +295,9 @@ void GCTracer::AddAllocation(double current_ms) { ...@@ -292,10 +295,9 @@ void GCTracer::AddAllocation(double current_ms) {
allocation_time_ms_ = current_ms; allocation_time_ms_ = current_ms;
new_space_allocation_events_.push_front(AllocationEvent( new_space_allocation_events_.push_front(AllocationEvent(
allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_)); allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
allocation_events_.push_front( old_generation_allocation_events_.push_front(
AllocationEvent(allocation_duration_since_gc_, AllocationEvent(allocation_duration_since_gc_,
new_space_allocation_in_bytes_since_gc_ + old_generation_allocation_in_bytes_since_gc_));
old_generation_allocation_in_bytes_since_gc_));
allocation_duration_since_gc_ = 0; allocation_duration_since_gc_ = 0;
new_space_allocation_in_bytes_since_gc_ = 0; new_space_allocation_in_bytes_since_gc_ = 0;
old_generation_allocation_in_bytes_since_gc_ = 0; old_generation_allocation_in_bytes_since_gc_ = 0;
...@@ -560,8 +562,8 @@ intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const { ...@@ -560,8 +562,8 @@ intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
} }
if (durations == 0.0) return 0; if (durations == 0.0) return 0;
// Make sure the result is at least 1.
return static_cast<intptr_t>(bytes / durations); return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
} }
...@@ -576,8 +578,8 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const { ...@@ -576,8 +578,8 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
} }
if (durations == 0.0) return 0; if (durations == 0.0) return 0;
// Make sure the result is at least 1.
return static_cast<intptr_t>(bytes / durations); return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
} }
...@@ -592,8 +594,8 @@ intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const { ...@@ -592,8 +594,8 @@ intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
} }
if (durations == 0.0) return 0; if (durations == 0.0) return 0;
// Make sure the result is at least 1.
return static_cast<intptr_t>(bytes / durations); return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
} }
...@@ -609,47 +611,77 @@ intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() ...@@ -609,47 +611,77 @@ intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
} }
if (durations == 0.0) return 0; if (durations == 0.0) return 0;
// Make sure the result is at least 1.
return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
return static_cast<intptr_t>(bytes / durations);
double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
const double kEpsilon = 1;
double speed1 =
static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
double speed2 = static_cast<double>(
FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
if (speed1 + speed2 < kEpsilon) {
// No data for the incremental marking speed.
// Return the non-incremental mark-compact speed.
combined_mark_compact_speed_cache_ =
static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
} else {
// Combine the speed of incremental step and the speed of the final step.
// 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
combined_mark_compact_speed_cache_ = speed1 * speed2 / (speed1 + speed2);
}
return combined_mark_compact_speed_cache_;
} }
size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const { size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
double time_ms) const {
size_t bytes = new_space_allocation_in_bytes_since_gc_; size_t bytes = new_space_allocation_in_bytes_since_gc_;
double durations = allocation_duration_since_gc_; double durations = allocation_duration_since_gc_;
AllocationEventBuffer::const_iterator iter = AllocationEventBuffer::const_iterator iter =
new_space_allocation_events_.begin(); new_space_allocation_events_.begin();
const size_t max_bytes = static_cast<size_t>(-1); const size_t max_bytes = static_cast<size_t>(-1);
while (iter != new_space_allocation_events_.end() && while (iter != new_space_allocation_events_.end() &&
bytes < max_bytes - bytes) { bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
bytes += iter->allocation_in_bytes_; bytes += iter->allocation_in_bytes_;
durations += iter->duration_; durations += iter->duration_;
++iter; ++iter;
} }
if (durations == 0.0) return 0; if (durations == 0.0) return 0;
// Make sure the result is at least 1.
return static_cast<size_t>(bytes / durations + 0.5); return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
} }
size_t GCTracer::AllocationThroughputInBytesPerMillisecond( size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
double time_ms) const { double time_ms) const {
size_t bytes = new_space_allocation_in_bytes_since_gc_ + size_t bytes = old_generation_allocation_in_bytes_since_gc_;
old_generation_allocation_in_bytes_since_gc_;
double durations = allocation_duration_since_gc_; double durations = allocation_duration_since_gc_;
AllocationEventBuffer::const_iterator iter = allocation_events_.begin(); AllocationEventBuffer::const_iterator iter =
old_generation_allocation_events_.begin();
const size_t max_bytes = static_cast<size_t>(-1); const size_t max_bytes = static_cast<size_t>(-1);
while (iter != allocation_events_.end() && bytes < max_bytes - bytes && while (iter != old_generation_allocation_events_.end() &&
durations < time_ms) { bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
bytes += iter->allocation_in_bytes_; bytes += iter->allocation_in_bytes_;
durations += iter->duration_; durations += iter->duration_;
++iter; ++iter;
} }
if (durations == 0.0) return 0; if (durations == 0.0) return 0;
// Make sure the result is at least 1.
return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
return static_cast<size_t>(bytes / durations + 0.5);
size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
double time_ms) const {
return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
} }
......
...@@ -382,9 +382,20 @@ class GCTracer { ...@@ -382,9 +382,20 @@ class GCTracer {
// Returns 0 if no events have been recorded. // Returns 0 if no events have been recorded.
intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const; intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
// Compute the overall mark compact speed including incremental steps
// and the final mark-compact step.
double CombinedMarkCompactSpeedInBytesPerMillisecond();
// Allocation throughput in the new space in bytes/millisecond. // Allocation throughput in the new space in bytes/millisecond.
// Returns 0 if no allocation events have been recorded. // Returns 0 if no allocation events have been recorded.
size_t NewSpaceAllocationThroughputInBytesPerMillisecond() const; size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
double time_ms = 0) const;
// Allocation throughput in the old generation in bytes/millisecond in the
// last time_ms milliseconds.
// Returns 0 if no allocation events have been recorded.
size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
double time_ms = 0) const;
// Allocation throughput in heap in bytes/millisecond in the last time_ms // Allocation throughput in heap in bytes/millisecond in the last time_ms
// milliseconds. // milliseconds.
...@@ -466,7 +477,7 @@ class GCTracer { ...@@ -466,7 +477,7 @@ class GCTracer {
// RingBuffer for allocation events. // RingBuffer for allocation events.
AllocationEventBuffer new_space_allocation_events_; AllocationEventBuffer new_space_allocation_events_;
AllocationEventBuffer allocation_events_; AllocationEventBuffer old_generation_allocation_events_;
// RingBuffer for context disposal events. // RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_; ContextDisposalEventBuffer context_disposal_events_;
...@@ -514,6 +525,8 @@ class GCTracer { ...@@ -514,6 +525,8 @@ class GCTracer {
size_t new_space_allocation_in_bytes_since_gc_; size_t new_space_allocation_in_bytes_since_gc_;
size_t old_generation_allocation_in_bytes_since_gc_; size_t old_generation_allocation_in_bytes_since_gc_;
double combined_mark_compact_speed_cache_;
// Counts how many tracers were started without stopping. // Counts how many tracers were started without stopping.
int start_counter_; int start_counter_;
......
...@@ -470,7 +470,6 @@ void Heap::GarbageCollectionPrologue() { ...@@ -470,7 +470,6 @@ void Heap::GarbageCollectionPrologue() {
} }
CheckNewSpaceExpansionCriteria(); CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter(); UpdateNewSpaceAllocationCounter();
UpdateOldGenerationAllocationCounter();
} }
...@@ -738,8 +737,7 @@ void Heap::GarbageCollectionEpilogue() { ...@@ -738,8 +737,7 @@ void Heap::GarbageCollectionEpilogue() {
new_space_top_after_last_gc_ = new_space()->top(); new_space_top_after_last_gc_ = new_space()->top();
last_gc_time_ = MonotonicallyIncreasingTimeInMs(); last_gc_time_ = MonotonicallyIncreasingTimeInMs();
ReduceNewSpaceSize( ReduceNewSpaceSize();
tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
} }
...@@ -1214,18 +1212,19 @@ bool Heap::PerformGarbageCollection( ...@@ -1214,18 +1212,19 @@ bool Heap::PerformGarbageCollection(
} }
if (collector == MARK_COMPACTOR) { if (collector == MARK_COMPACTOR) {
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction. // Perform mark-sweep with optional compaction.
MarkCompact(); MarkCompact();
sweep_generation_++; sweep_generation_++;
old_gen_exhausted_ = false; old_gen_exhausted_ = false;
old_generation_size_configured_ = true; old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which can
// cause another GC.
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
} else { } else {
Scavenge(); Scavenge();
} }
// This should be updated before PostGarbageCollectionProcessing, which can
// cause another GC.
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
UpdateSurvivalStatistics(start_new_space_size); UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize(); ConfigureInitialOldGenerationSize();
...@@ -4509,15 +4508,38 @@ void Heap::MakeHeapIterable() { ...@@ -4509,15 +4508,38 @@ void Heap::MakeHeapIterable() {
} }
bool Heap::HasLowAllocationRate(size_t allocation_rate) { bool Heap::HasLowYoungGenerationAllocationRate() {
static const size_t kLowAllocationRate = 1000; const double high_mutator_utilization = 0.995;
if (allocation_rate == 0) return false; double mutator_speed = static_cast<double>(
return allocation_rate < kLowAllocationRate; tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
double gc_speed =
static_cast<double>(tracer()->ScavengeSpeedInBytesPerMillisecond());
if (mutator_speed == 0 || gc_speed == 0) return false;
double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
return mutator_utilization > high_mutator_utilization;
} }
void Heap::ReduceNewSpaceSize(size_t allocation_rate) { bool Heap::HasLowOldGenerationAllocationRate() {
if (!FLAG_predictable && HasLowAllocationRate(allocation_rate)) { const double high_mutator_utilization = 0.995;
double mutator_speed = static_cast<double>(
tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
double gc_speed = static_cast<double>(
tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
if (mutator_speed == 0 || gc_speed == 0) return false;
double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
return mutator_utilization > high_mutator_utilization;
}
bool Heap::HasLowAllocationRate() {
return HasLowYoungGenerationAllocationRate() &&
HasLowOldGenerationAllocationRate();
}
void Heap::ReduceNewSpaceSize() {
if (!FLAG_predictable && HasLowAllocationRate()) {
new_space_.Shrink(); new_space_.Shrink();
UncommitFromSpace(); UncommitFromSpace();
} }
...@@ -4573,11 +4595,9 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { ...@@ -4573,11 +4595,9 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
heap_state.new_space_capacity = new_space_.Capacity(); heap_state.new_space_capacity = new_space_.Capacity();
heap_state.new_space_allocation_throughput_in_bytes_per_ms = heap_state.new_space_allocation_throughput_in_bytes_per_ms =
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(); tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
heap_state.current_allocation_throughput_in_bytes_per_ms = heap_state.has_low_allocation_rate = HasLowAllocationRate();
tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
intptr_t limit = old_generation_allocation_limit_; intptr_t limit = old_generation_allocation_limit_;
if (HasLowAllocationRate( if (heap_state.has_low_allocation_rate) {
heap_state.current_allocation_throughput_in_bytes_per_ms)) {
limit = idle_old_generation_allocation_limit_; limit = idle_old_generation_allocation_limit_;
} }
heap_state.can_start_incremental_marking = heap_state.can_start_incremental_marking =
......
...@@ -2142,9 +2142,11 @@ class Heap { ...@@ -2142,9 +2142,11 @@ class Heap {
void SelectScavengingVisitorsTable(); void SelectScavengingVisitorsTable();
bool HasLowAllocationRate(size_t allocaion_rate); bool HasLowYoungGenerationAllocationRate();
bool HasLowOldGenerationAllocationRate();
bool HasLowAllocationRate();
void ReduceNewSpaceSize(size_t allocaion_rate); void ReduceNewSpaceSize();
bool TryFinalizeIdleIncrementalMarking( bool TryFinalizeIdleIncrementalMarking(
double idle_time_in_ms, size_t size_of_objects, double idle_time_in_ms, size_t size_of_objects,
......
...@@ -677,9 +677,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { ...@@ -677,9 +677,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int total_live_bytes = 0; int total_live_bytes = 0;
bool reduce_memory = bool reduce_memory =
reduce_memory_footprint_ || reduce_memory_footprint_ || heap()->HasLowAllocationRate();
heap()->HasLowAllocationRate(
heap()->tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
if (FLAG_manual_evacuation_candidates_selection) { if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) { for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second; Page* p = pages[i].second;
......
...@@ -5808,6 +5808,7 @@ TEST(OldSpaceAllocationCounter) { ...@@ -5808,6 +5808,7 @@ TEST(OldSpaceAllocationCounter) {
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
size_t counter1 = heap->OldGenerationAllocationCounter(); size_t counter1 = heap->OldGenerationAllocationCounter();
heap->CollectGarbage(NEW_SPACE); heap->CollectGarbage(NEW_SPACE);
heap->CollectGarbage(NEW_SPACE);
const size_t kSize = 1024; const size_t kSize = 1024;
AllocateInSpace(isolate, kSize, OLD_SPACE); AllocateInSpace(isolate, kSize, OLD_SPACE);
size_t counter2 = heap->OldGenerationAllocationCounter(); size_t counter2 = heap->OldGenerationAllocationCounter();
...@@ -5868,12 +5869,13 @@ TEST(NewSpaceAllocationThroughput2) { ...@@ -5868,12 +5869,13 @@ TEST(NewSpaceAllocationThroughput2) {
int time2 = 200; int time2 = 200;
size_t counter2 = 2000; size_t counter2 = 2000;
tracer->SampleAllocation(time2, counter2, 0); tracer->SampleAllocation(time2, counter2, 0);
size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100); size_t throughput =
tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput); CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000; int time3 = 1000;
size_t counter3 = 30000; size_t counter3 = 30000;
tracer->SampleAllocation(time3, counter3, 0); tracer->SampleAllocation(time3, counter3, 0);
throughput = tracer->AllocationThroughputInBytesPerMillisecond(100); throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput); CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
} }
...@@ -5932,12 +5934,14 @@ TEST(OldGenerationAllocationThroughput) { ...@@ -5932,12 +5934,14 @@ TEST(OldGenerationAllocationThroughput) {
int time2 = 200; int time2 = 200;
size_t counter2 = 2000; size_t counter2 = 2000;
tracer->SampleAllocation(time2, 0, counter2); tracer->SampleAllocation(time2, 0, counter2);
size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100); size_t throughput =
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput); CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000; int time3 = 1000;
size_t counter3 = 30000; size_t counter3 = 30000;
tracer->SampleAllocation(time3, 0, counter3); tracer->SampleAllocation(time3, 0, counter3);
throughput = tracer->AllocationThroughputInBytesPerMillisecond(100); throughput =
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput); CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment