Commit 5dc254f6 authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert "[heap] Optimize NewSpace::AllocatedSinceLastGC"

Also revert "[heap] Force inlining of AllocatedSinceLastGC"

This is a speculative revert to see if it actually impacts the benchmarks in
question.

This reverts commit 723e120b.
This reverts commit 08dbdd40.

BUG=chromium:605524
LOG=N
TBR=ulan@chromium.org

Review-Url: https://codereview.chromium.org/1932883002
Cr-Commit-Position: refs/heads/master@{#35881}
parent 9cf856a8
......@@ -460,14 +460,6 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
static_cast<size_t>(byte_size / kPointerSize));
}
void Heap::UpdateNewSpaceAllocationCounter() {
new_space_allocation_counter_ = NewSpaceAllocationCounter();
}
size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
template <Heap::FindMementoMode mode>
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Check if there is potentially a memento behind the object. If
......
......@@ -1258,8 +1258,13 @@ class Heap {
return static_cast<intptr_t>(total);
}
V8_INLINE void UpdateNewSpaceAllocationCounter();
V8_INLINE size_t NewSpaceAllocationCounter();
void UpdateNewSpaceAllocationCounter() {
new_space_allocation_counter_ = NewSpaceAllocationCounter();
}
size_t NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
// This should be used only for testing.
void set_new_space_allocation_counter(size_t new_value) {
......
......@@ -242,23 +242,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
size_t NewSpace::AllocatedSinceLastGC() {
const intptr_t age_mark_offset = Page::OffsetInPage(to_space_.age_mark());
const intptr_t top_offset = Page::OffsetInPage(allocation_info_.top());
const intptr_t age_mark_delta =
age_mark_offset >= Page::kObjectStartOffset
? age_mark_offset - Page::kObjectStartOffset
: Page::kAllocatableMemory;
const intptr_t top_delta = top_offset >= Page::kObjectStartOffset
? top_offset - Page::kObjectStartOffset
: Page::kAllocatableMemory;
DCHECK((allocated_since_last_gc_ > 0) ||
(Page::FromAllocationAreaAddress(allocation_info_.top()) ==
Page::FromAllocationAreaAddress(to_space_.age_mark())));
return static_cast<size_t>(allocated_since_last_gc_ + top_delta -
age_mark_delta);
}
// --------------------------------------------------------------------------
// AllocationResult
......
......@@ -1497,7 +1497,6 @@ bool NewSpace::AddFreshPage() {
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
pages_used_++;
allocated_since_last_gc_ += Page::kAllocatableMemory;
UpdateAllocationInfo();
return true;
......
......@@ -548,10 +548,6 @@ class MemoryChunk {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
static intptr_t OffsetInPage(Address a) {
return reinterpret_cast<intptr_t>(a) & kPageAlignmentMask;
}
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) {
......@@ -2529,7 +2525,6 @@ class NewSpace : public Space {
from_space_(heap, kFromSpace),
reservation_(),
pages_used_(0),
allocated_since_last_gc_(0),
top_on_previous_step_(0),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
......@@ -2601,7 +2596,42 @@ class NewSpace : public Space {
// Return the available bytes without growing.
intptr_t Available() override { return Capacity() - Size(); }
V8_INLINE size_t AllocatedSinceLastGC();
size_t AllocatedSinceLastGC() {
bool seen_age_mark = false;
Address age_mark = to_space_.age_mark();
Page* current_page = to_space_.first_page();
Page* age_mark_page = Page::FromAddress(age_mark);
Page* last_page = Page::FromAddress(top() - kPointerSize);
if (age_mark_page == last_page) {
if (top() - age_mark >= 0) {
return top() - age_mark;
}
// Top was reset at some point, invalidating this metric.
return 0;
}
while (current_page != last_page) {
if (current_page == age_mark_page) {
seen_age_mark = true;
break;
}
current_page = current_page->next_page();
}
if (!seen_age_mark) {
// Top was reset at some point, invalidating this metric.
return 0;
}
intptr_t allocated = age_mark_page->area_end() - age_mark;
DCHECK_EQ(current_page, age_mark_page);
current_page = age_mark_page->next_page();
while (current_page != last_page) {
allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
allocated += top() - current_page->area_start();
DCHECK_LE(0, allocated);
DCHECK_LE(allocated, Size());
return static_cast<size_t>(allocated);
}
bool ReplaceWithEmptyPage(Page* page) {
// This method is called after flipping the semispace.
......@@ -2641,10 +2671,7 @@ class NewSpace : public Space {
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) {
to_space_.set_age_mark(mark);
allocated_since_last_gc_ = 0;
}
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
// The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
......@@ -2765,7 +2792,6 @@ class NewSpace : public Space {
SemiSpace from_space_;
base::VirtualMemory reservation_;
int pages_used_;
intptr_t allocated_since_last_gc_;
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment