Commit a89d41f0 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Remove store_buffer_counter from MemoryChunk

It is only used locally when exempting pages from the store buffer. This use
case can be replaced with a hashmap at the call site.

BUG=chromium:524425
LOG=N
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/1593583002

Cr-Commit-Position: refs/heads/master@{#33324}
parent b0119907
......@@ -396,7 +396,7 @@ class MemoryChunk {
+ 2 * kPointerSize // base::VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntSize; // int store_buffer_counter_
+ kIntSize; // int progress_bar_
static const size_t kSlotsBufferOffset =
kLiveBytesOffset + kIntSize; // int live_byte_count_
......@@ -408,7 +408,6 @@ class MemoryChunk {
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
kIntptrSize // intptr_t write_barrier_counter_
+ kIntSize // int progress_bar_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_
......@@ -420,7 +419,7 @@ class MemoryChunk {
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
// Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
static const size_t kHeaderSize = kMinHeaderSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
......@@ -519,11 +518,6 @@ class MemoryChunk {
}
inline void set_scan_on_scavenge(bool scan);
int store_buffer_counter() { return store_buffer_counter_; }
void set_store_buffer_counter(int counter) {
store_buffer_counter_ = counter;
}
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
}
......@@ -751,17 +745,14 @@ class MemoryChunk {
// in a fixed array.
Address owner_;
Heap* heap_;
// Used by the store buffer to keep track of which pages to mark scan-on-
// scavenge.
int store_buffer_counter_;
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
int progress_bar_;
// Count of bytes marked black on page.
int live_byte_count_;
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
int progress_bar_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
AtomicValue<intptr_t> high_water_mark_;
......
......@@ -182,11 +182,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
// Sample the store buffer to see if some pages are taking up a lot of space
// in the store buffer.
void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
chunk->set_store_buffer_counter(0);
}
HashMap store_buffer_counts(HashMap::PointersMatch, 16);
bool created_new_scan_on_scavenge_pages = false;
MemoryChunk* previous_chunk = NULL;
for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
......@@ -197,12 +193,16 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
} else {
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
int old_counter = containing_chunk->store_buffer_counter();
HashMap::Entry* e = store_buffer_counts.LookupOrInsert(
containing_chunk,
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(containing_chunk) >>
kPageSizeBits));
intptr_t old_counter = bit_cast<intptr_t>(e->value);
if (old_counter >= threshold) {
containing_chunk->set_scan_on_scavenge(true);
created_new_scan_on_scavenge_pages = true;
}
containing_chunk->set_store_buffer_counter(old_counter + 1);
(*bit_cast<intptr_t*>(&e->value))++;
previous_chunk = containing_chunk;
}
if (created_new_scan_on_scavenge_pages) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment