Commit bc272e9f authored by ulan's avatar ulan Committed by Commit bot

Represent speed in GCTracer functions as double instead of int.

This avoids redundant casts, loss of precision, and potential overflows.

BUG=chromium:597310
LOG=NO

Review URL: https://codereview.chromium.org/1841043002

Cr-Commit-Position: refs/heads/master@{#35113}
parent 2e48dc01
......@@ -12,7 +12,6 @@ namespace v8 {
namespace internal {
const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
......@@ -47,82 +46,50 @@ void GCIdleTimeHeapState::Print() {
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
}
size_t GCIdleTimeHandler::EstimateMarkingStepSize(
size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
DCHECK(idle_time_in_ms > 0);
if (marking_speed_in_bytes_per_ms == 0) {
marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
}
size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
// In the case of an overflow we return maximum marking step size.
double marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
if (marking_step_size >= kMaximumMarkingStepSize) {
return kMaximumMarkingStepSize;
}
if (marking_step_size > kMaximumMarkingStepSize)
return kMaximumMarkingStepSize;
return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
}
size_t GCIdleTimeHandler::EstimateMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
// TODO(hpayer): Be more precise about the type of mark-compact event. It
// makes a huge difference if compaction is happening.
if (mark_compact_speed_in_bytes_per_ms == 0) {
mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
}
size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
return Min(result, kMaxMarkCompactTimeInMs);
}
size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
double GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
double final_incremental_mark_compact_speed_in_bytes_per_ms) {
if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
final_incremental_mark_compact_speed_in_bytes_per_ms =
kInitialConservativeFinalIncrementalMarkCompactSpeed;
}
size_t result =
double result =
size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
return Min(result, kMaxFinalIncrementalMarkCompactTimeInMs);
}
bool GCIdleTimeHandler::ShouldDoMarkCompact(
size_t idle_time_in_ms, size_t size_of_objects,
size_t mark_compact_speed_in_bytes_per_ms) {
return idle_time_in_ms >= kMaxScheduledIdleTime &&
idle_time_in_ms >=
EstimateMarkCompactTime(size_of_objects,
mark_compact_speed_in_bytes_per_ms);
return Min<double>(result, kMaxFinalIncrementalMarkCompactTimeInMs);
}
bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
int contexts_disposed, double contexts_disposal_rate) {
return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
contexts_disposal_rate < kHighContextDisposalRate;
}
bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
size_t idle_time_in_ms, size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
double idle_time_in_ms, size_t size_of_objects,
double final_incremental_mark_compact_speed_in_bytes_per_ms) {
return idle_time_in_ms >=
EstimateFinalIncrementalMarkCompactTime(
size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms);
}
bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
size_t idle_time_in_ms) {
double idle_time_in_ms) {
// TODO(jochen): Estimate the time it will take to build the object groups.
return idle_time_in_ms >= kMinTimeForOverApproximatingWeakClosureInMs;
}
......
......@@ -90,9 +90,6 @@ class GCIdleTimeHandler {
static const size_t kInitialConservativeFinalIncrementalMarkCompactSpeed =
2 * MB;
// Maximum mark-compact time returned by EstimateMarkCompactTime.
static const size_t kMaxMarkCompactTimeInMs;
// Maximum final incremental mark-compact time returned by
// EstimateFinalIncrementalMarkCompactTime.
static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
......@@ -130,27 +127,20 @@ class GCIdleTimeHandler {
void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
size_t marking_speed_in_bytes_per_ms);
static size_t EstimateMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
static size_t EstimateFinalIncrementalMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
static size_t EstimateMarkingStepSize(double idle_time_in_ms,
double marking_speed_in_bytes_per_ms);
static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
size_t size_of_objects,
size_t mark_compact_speed_in_bytes_per_ms);
static double EstimateFinalIncrementalMarkCompactTime(
size_t size_of_objects, double mark_compact_speed_in_bytes_per_ms);
static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
double contexts_disposal_rate);
static bool ShouldDoFinalIncrementalMarkCompact(
size_t idle_time_in_ms, size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
double idle_time_in_ms, size_t size_of_objects,
double final_incremental_mark_compact_speed_in_bytes_per_ms);
static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
static bool ShouldDoOverApproximateWeakClosure(double idle_time_in_ms);
private:
GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
......
......@@ -467,8 +467,7 @@ void GCTracer::PrintNVP() const {
"external_weak_global_handles=%.2f "
"steps_count=%d "
"steps_took=%.1f "
"scavenge_throughput=%" V8_PTR_PREFIX
"d "
"scavenge_throughput=%.f "
"total_size_before=%" V8_PTR_PREFIX
"d "
"total_size_after=%" V8_PTR_PREFIX
......@@ -574,8 +573,7 @@ void GCTracer::PrintNVP() const {
"finalization_steps_count=%d "
"finalization_steps_took=%.1f "
"finalization_longest_step=%.1f "
"incremental_marking_throughput=%" V8_PTR_PREFIX
"d "
"incremental_marking_throughput=%.f "
"total_size_before=%" V8_PTR_PREFIX
"d "
"total_size_after=%" V8_PTR_PREFIX
......@@ -599,7 +597,7 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"context_disposal_rate=%.1f "
"compaction_speed=%" V8_PTR_PREFIX "d\n",
"compaction_speed=%.f\n",
heap_->isolate()->time_millis_since_init(), duration,
spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
current_.scopes[Scope::MC_CLEAR],
......@@ -667,8 +665,8 @@ void GCTracer::PrintNVP() const {
}
}
int GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms) {
double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms) {
BytesAndDuration sum = buffer.Sum(
[time_ms](BytesAndDuration a, BytesAndDuration b) {
if (time_ms != 0 && a.second >= time_ms) return a;
......@@ -678,30 +676,30 @@ int GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
uint64_t bytes = sum.first;
double durations = sum.second;
if (durations == 0.0) return 0;
double speed = bytes / durations + 0.5;
double speed = bytes / durations;
const int max_speed = 1024 * MB;
const int min_speed = 1;
if (speed >= max_speed) return max_speed;
if (speed <= min_speed) return min_speed;
return static_cast<int>(speed);
return speed;
}
int GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
}
intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
if (cumulative_incremental_marking_duration_ == 0.0) return 0;
// We haven't completed an entire round of incremental marking, yet.
// Use data from GCTracer instead of data from event buffers.
if (recorded_incremental_marking_steps_.Count() == 0) {
return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
cumulative_pure_incremental_marking_duration_);
return cumulative_incremental_marking_bytes_ /
cumulative_pure_incremental_marking_duration_;
}
return AverageSpeed(recorded_incremental_marking_steps_);
}
intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode) const {
if (mode == kForAllObjects) {
return AverageSpeed(recorded_scavenges_total_);
......@@ -710,16 +708,15 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
}
}
intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
double GCTracer::CompactionSpeedInBytesPerMillisecond() const {
return AverageSpeed(recorded_compactions_);
}
intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
double GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
return AverageSpeed(recorded_mark_compacts_);
}
intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
const {
double GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const {
return AverageSpeed(recorded_incremental_mark_compacts_);
}
......@@ -727,15 +724,13 @@ double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
const double kMinimumMarkingSpeed = 0.5;
double speed1 =
static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
double speed2 = static_cast<double>(
FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
double speed1 = IncrementalMarkingSpeedInBytesPerMillisecond();
double speed2 = FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
// No data for the incremental marking speed.
// Return the non-incremental mark-compact speed.
combined_mark_compact_speed_cache_ =
static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
MarkCompactSpeedInBytesPerMillisecond();
} else {
// Combine the speed of incremental step and the speed of the final step.
// 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
......@@ -766,13 +761,11 @@ double GCTracer::AllocationThroughputInBytesPerMillisecond(
OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
}
size_t GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
double GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
return AllocationThroughputInBytesPerMillisecond(kThroughputTimeFrameMs);
}
size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
double GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
const {
return OldGenerationAllocationThroughputInBytesPerMillisecond(
kThroughputTimeFrameMs);
......
......@@ -286,25 +286,25 @@ class GCTracer {
// Compute the average incremental marking speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
double IncrementalMarkingSpeedInBytesPerMillisecond() const;
// Compute the average scavenge speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t ScavengeSpeedInBytesPerMillisecond(
double ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode = kForAllObjects) const;
// Compute the average compaction speed in bytes/millisecond.
// Returns 0 if not enough events have been recorded.
intptr_t CompactionSpeedInBytesPerMillisecond() const;
double CompactionSpeedInBytesPerMillisecond() const;
// Compute the average mark-sweep speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
double MarkCompactSpeedInBytesPerMillisecond() const;
// Compute the average incremental mark-sweep finalize speed in
// bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
double FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
// Compute the overall mark compact speed including incremental steps
// and the final mark-compact step.
......@@ -329,12 +329,12 @@ class GCTracer {
// Allocation throughput in heap in bytes/milliseconds in the last
// kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
size_t CurrentAllocationThroughputInBytesPerMillisecond() const;
double CurrentAllocationThroughputInBytesPerMillisecond() const;
// Allocation throughput in old generation in bytes/milliseconds in the last
// kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
size_t CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
// Computes the context disposal rate in milliseconds. It takes the time
// frame of the first recorded context disposal to the current time and
......@@ -356,9 +356,9 @@ class GCTracer {
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
static int AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
static int AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms);
static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms);
private:
// Print one detailed trace line in name=value format.
......
......@@ -1354,9 +1354,8 @@ bool Heap::PerformGarbageCollection(
Relocatable::PostGarbageCollectionProcessing(isolate_);
double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
double mutator_speed = static_cast<double>(
tracer()
->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
double mutator_speed =
tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
......@@ -4135,8 +4134,8 @@ static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
double Heap::YoungGenerationMutatorUtilization() {
double mutator_speed = static_cast<double>(
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
double gc_speed = static_cast<double>(
tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
double gc_speed =
tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
if (FLAG_trace_mutator_utilization) {
PrintIsolate(isolate(),
......@@ -4215,7 +4214,7 @@ void Heap::ReduceNewSpaceSize() {
// TODO(ulan): Unify this constant with the similar constant in
// GCIdleTimeHandler once the change is merged to 4.5.
static const size_t kLowAllocationThroughput = 1000;
const size_t allocation_throughput =
const double allocation_throughput =
tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
if (FLAG_predictable) return;
......@@ -4244,21 +4243,20 @@ void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
static_cast<size_t>(
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
double final_incremental_mark_compact_speed_in_bytes_per_ms =
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_deque()->IsEmpty() &&
gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
static_cast<size_t>(idle_time_in_ms)))) {
idle_time_in_ms))) {
FinalizeIncrementalMarking(
"Idle notification: finalize incremental marking");
return true;
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty() &&
gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
static_cast<size_t>(idle_time_in_ms), size_of_objects,
idle_time_in_ms, size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(current_gc_flags_,
"idle notification: finalize incremental marking");
......
......@@ -1024,11 +1024,10 @@ double IncrementalMarking::AdvanceIncrementalMarking(
if (step_size_in_bytes == 0) {
step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
static_cast<size_t>(
heap()
->tracer()
->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
heap()
->tracer()
->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
}
double remaining_time_in_ms = 0.0;
......
......@@ -644,15 +644,15 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
} else {
const intptr_t estimated_compaction_speed =
const double estimated_compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
if (estimated_compaction_speed != 0) {
// Estimate the target fragmentation based on traced compaction speed
// and a goal for a single page.
const intptr_t estimated_ms_per_area =
1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
*target_fragmentation_percent =
100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
const double estimated_ms_per_area =
1 + area_size / estimated_compaction_speed;
*target_fragmentation_percent = static_cast<int>(
100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
if (*target_fragmentation_percent <
kTargetFragmentationPercentForReduceMemory) {
*target_fragmentation_percent =
......@@ -3058,7 +3058,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;
intptr_t compaction_speed =
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
......@@ -3067,8 +3067,8 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
kNumSweepingTasks - 1);
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
compaction_speed / kTargetCompactionTimeInMs);
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
kTargetCompactionTimeInMs);
} else {
tasks = pages;
}
......@@ -3135,7 +3135,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
DCHECK_GE(job.NumberOfPages(), 1);
// Used for trace summary.
intptr_t compaction_speed = 0;
double compaction_speed = 0;
if (FLAG_trace_evacuation) {
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
......@@ -3158,7 +3158,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
isolate(),
"%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d "
"wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
"d compaction_speed=%" V8_PTR_PREFIX "d\n",
"d compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
......
......@@ -23,8 +23,8 @@ void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
static_cast<double>(base::Time::kMillisecondsPerSecond);
double start_ms = heap->MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
size_t scavenge_speed_in_bytes_per_ms =
static_cast<size_t>(heap->tracer()->ScavengeSpeedInBytesPerMillisecond());
double scavenge_speed_in_bytes_per_ms =
heap->tracer()->ScavengeSpeedInBytesPerMillisecond();
size_t new_space_size = heap->new_space()->Size();
size_t new_space_capacity = heap->new_space()->Capacity();
......@@ -42,9 +42,8 @@ void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
}
}
bool ScavengeJob::ReachedIdleAllocationLimit(
size_t scavenge_speed_in_bytes_per_ms, size_t new_space_size,
double scavenge_speed_in_bytes_per_ms, size_t new_space_size,
size_t new_space_capacity) {
if (scavenge_speed_in_bytes_per_ms == 0) {
scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
......@@ -52,27 +51,24 @@ bool ScavengeJob::ReachedIdleAllocationLimit(
// Set the allocation limit to the number of bytes we can scavenge in an
// average idle task.
size_t allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
double allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
// Keep the limit smaller than the new space capacity.
allocation_limit =
Min(allocation_limit,
static_cast<size_t>(new_space_capacity *
kMaxAllocationLimitAsFractionOfNewSpace));
Min<double>(allocation_limit,
new_space_capacity * kMaxAllocationLimitAsFractionOfNewSpace);
// Adjust the limit to take into account bytes that will be allocated until
// the next check.
allocation_limit = allocation_limit < kBytesAllocatedBeforeNextIdleTask
? 0
: allocation_limit - kBytesAllocatedBeforeNextIdleTask;
// Keep the limit large enough to avoid scavenges in tiny new space.
allocation_limit = Max(allocation_limit, kMinAllocationLimit);
// the next check and keep the limit large enough to avoid scavenges in tiny
// new space.
allocation_limit =
Max<double>(allocation_limit - kBytesAllocatedBeforeNextIdleTask,
kMinAllocationLimit);
return allocation_limit <= new_space_size;
}
bool ScavengeJob::EnoughIdleTimeForScavenge(
double idle_time_in_ms, size_t scavenge_speed_in_bytes_per_ms,
double idle_time_in_ms, double scavenge_speed_in_bytes_per_ms,
size_t new_space_size) {
if (scavenge_speed_in_bytes_per_ms == 0) {
scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
......
......@@ -47,12 +47,12 @@ class ScavengeJob {
void NotifyIdleTask() { idle_task_pending_ = false; }
bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
static bool ReachedIdleAllocationLimit(size_t scavenge_speed_in_bytes_per_ms,
static bool ReachedIdleAllocationLimit(double scavenge_speed_in_bytes_per_ms,
size_t new_space_size,
size_t new_space_capacity);
static bool EnoughIdleTimeForScavenge(double idle_time_ms,
size_t scavenge_speed_in_bytes_per_ms,
double scavenge_speed_in_bytes_per_ms,
size_t new_space_size);
// If we haven't recorded any scavenger events yet, we use a conservative
......
......@@ -6325,14 +6325,14 @@ TEST(OldGenerationAllocationThroughput) {
int time2 = 200;
size_t counter2 = 2000;
tracer->SampleAllocation(time2, 0, counter2);
size_t throughput =
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
size_t throughput = static_cast<size_t>(
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000;
size_t counter3 = 30000;
tracer->SampleAllocation(time3, 0, counter3);
throughput =
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
throughput = static_cast<size_t>(
tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
}
......@@ -6349,7 +6349,8 @@ TEST(AllocationThroughput) {
int time2 = 200;
size_t counter2 = 2000;
tracer->SampleAllocation(time2, counter2, counter2);
size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
size_t throughput = static_cast<size_t>(
tracer->AllocationThroughputInBytesPerMillisecond(100));
CHECK_EQ(2 * (counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000;
size_t counter3 = 30000;
......
......@@ -74,43 +74,6 @@ TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
}
TEST(GCIdleTimeHandler, EstimateMarkCompactTimeInitial) {
size_t size = 100 * MB;
size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, 0);
EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeMarkCompactSpeed,
time);
}
TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
size_t size = 100 * MB;
size_t speed = 1 * MB;
size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
EXPECT_EQ(size / speed, time);
}
TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
size_t size = std::numeric_limits<size_t>::max();
size_t speed = 1;
size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
EXPECT_EQ(GCIdleTimeHandler::kMaxMarkCompactTimeInMs, time);
}
TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
size_t idle_time_ms = GCIdleTimeHandler::kMaxScheduledIdleTime;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_ms, 0, 0));
}
TEST_F(GCIdleTimeHandlerTest, DontDoMarkCompact) {
size_t idle_time_ms = 1;
EXPECT_FALSE(GCIdleTimeHandler::ShouldDoMarkCompact(
idle_time_ms, kSizeOfObjects, kMarkingSpeed));
}
TEST_F(GCIdleTimeHandlerTest, ShouldDoFinalIncrementalMarkCompact) {
size_t idle_time_ms = 16;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
......
......@@ -37,11 +37,11 @@ TEST(GCTracer, AverageSpeed) {
buffer.Push(MakeBytesAndDuration(i + 1, 1));
}
EXPECT_EQ(
static_cast<int>(sum * 1.0 / buffer.kSize + 0.5),
sum * 1.0 / buffer.kSize,
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
buffer.Push(MakeBytesAndDuration(100, 1));
EXPECT_EQ(
static_cast<int>((sum * 1.0 - 1 + 100) / buffer.kSize + 0.5),
(sum * 1.0 - 1 + 100) / buffer.kSize,
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
}
......
......@@ -71,7 +71,7 @@ TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, expected_size - 1, kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, expected_size, kNewSpaceCapacity));
scavenge_speed, expected_size + 1, kNewSpaceCapacity));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment