Commit 58312643 authored by ulan's avatar ulan Committed by Commit bot

[heap] Use size_t in free list and evacuation candidate selection.

BUG=chromium:652721

Review-Url: https://codereview.chromium.org/2406363002
Cr-Commit-Position: refs/heads/master@{#40250}
parent e89eef30
......@@ -141,19 +141,20 @@ const int kMinUInt16 = 0;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kMinUInt32 = 0;
const int kCharSize = sizeof(char); // NOLINT
const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int); // NOLINT
const int kInt32Size = sizeof(int32_t); // NOLINT
const int kInt64Size = sizeof(int64_t); // NOLINT
const int kFloatSize = sizeof(float); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
const int kCharSize = sizeof(char);
const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int);
const int kInt32Size = sizeof(int32_t);
const int kInt64Size = sizeof(int64_t);
const int kSizetSize = sizeof(size_t);
const int kFloatSize = sizeof(float);
const int kDoubleSize = sizeof(double);
const int kIntptrSize = sizeof(intptr_t);
const int kPointerSize = sizeof(void*);
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kRegisterSize = kPointerSize + kPointerSize;
const int kRegisterSize = kPointerSize + kPointerSize;
#else
const int kRegisterSize = kPointerSize;
const int kRegisterSize = kPointerSize;
#endif
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
......
......@@ -575,22 +575,21 @@ const char* AllocationSpaceName(AllocationSpace space) {
return NULL;
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
int area_size, int* target_fragmentation_percent,
int* max_evacuated_bytes) {
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const int kMaxEvacuatedBytes = 4 * MB;
const size_t kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = .5;
......@@ -629,10 +628,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
int number_of_pages = space->CountTotalPages();
int area_size = space->AreaSize();
size_t area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
typedef std::pair<int, Page*> LiveBytesPagePair;
typedef std::pair<size_t, Page*> LiveBytesPagePair;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
......@@ -651,7 +650,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
int candidate_count = 0;
int total_live_bytes = 0;
size_t total_live_bytes = 0;
const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
......@@ -687,12 +686,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// them starting with the page with the most free memory, adding them to the
// set of evacuation candidates as long as both conditions (fragmentation
// and quota) hold.
int max_evacuated_bytes;
size_t max_evacuated_bytes;
int target_fragmentation_percent;
ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
&max_evacuated_bytes);
const intptr_t free_bytes_threshold =
const size_t free_bytes_threshold =
target_fragmentation_percent * (area_size / 100);
// Sort pages from the most free to the least free, then select
......@@ -705,8 +704,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
return a.first < b.first;
});
for (size_t i = 0; i < pages.size(); i++) {
int live_bytes = pages[i].first;
int free_bytes = area_size - live_bytes;
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
size_t free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
((free_bytes >= free_bytes_threshold) &&
((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
......@@ -715,10 +715,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
"compaction-selection-page: space=%s free_bytes_page=%d "
"compaction-selection-page: space=%s free_bytes_page=%zu "
"fragmentation_limit_kb=%" V8PRIdPTR
" fragmentation_limit_percent=%d sum_compaction_kb=%d "
"compaction_limit_kb=%d\n",
" fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
AllocationSpaceName(space->identity()), free_bytes / KB,
free_bytes_threshold / KB, target_fragmentation_percent,
total_live_bytes / KB, max_evacuated_bytes / KB);
......@@ -726,7 +726,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
// How many pages we will allocated for the evacuated objects
// in the worst case: ceil(total_live_bytes / area_size)
int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
int estimated_new_pages =
static_cast<int>((total_live_bytes + area_size - 1) / area_size);
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
......@@ -741,7 +742,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (FLAG_trace_fragmentation) {
PrintIsolate(isolate(),
"compaction-selection: space=%s reduce_memory=%d pages=%d "
"total_live_bytes=%d\n",
"total_live_bytes=%zu\n",
AllocationSpaceName(space->identity()), reduce_memory,
candidate_count, total_live_bytes / KB);
}
......@@ -3354,7 +3355,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
Address free_end = object->address();
if (free_end != free_start) {
int size = static_cast<int>(free_end - free_start);
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
......@@ -3363,7 +3365,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, size,
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
}
......@@ -3385,7 +3387,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
p->ClearLiveness();
if (free_start != p->area_end()) {
int size = static_cast<int>(p->area_end() - free_start);
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
......@@ -3394,7 +3397,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, size,
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
}
......@@ -3856,7 +3859,8 @@ void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
int to_sweep = page->area_size() - page->LiveBytes();
DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
size_t to_sweep = page->area_size() - page->LiveBytes();
if (space != NEW_SPACE)
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
}
......
......@@ -503,9 +503,9 @@ class MarkCompactCollector {
bool WillBeDeoptimized(Code* code);
void ComputeEvacuationHeuristics(int area_size,
void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
int* max_evacuated_bytes);
size_t* max_evacuated_bytes);
void VisitAllObjects(HeapObjectVisitor* visitor);
......
......@@ -279,6 +279,7 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available();
category->Relink();
});
DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
return added;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -170,6 +170,10 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
}
void SimulateFullSpace(v8::internal::PagedSpace* space) {
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
space->EmptyAllocationInfo();
space->ResetFreeList();
space->ClearStats();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment