Commit a9e6bbba authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Reland concurrent store buffer processing.

BUG=chromium:648973, chromium:648568

Review-Url: https://codereview.chromium.org/2493083003
Cr-Commit-Position: refs/heads/master@{#40928}
parent 8ddc260d
...@@ -439,7 +439,7 @@ void Heap::GarbageCollectionPrologue() { ...@@ -439,7 +439,7 @@ void Heap::GarbageCollectionPrologue() {
} }
CheckNewSpaceExpansionCriteria(); CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter(); UpdateNewSpaceAllocationCounter();
store_buffer()->MoveEntriesToRememberedSet(); store_buffer()->MoveAllEntriesToRememberedSet();
} }
...@@ -5874,10 +5874,10 @@ void Heap::CheckHandleCount() { ...@@ -5874,10 +5874,10 @@ void Heap::CheckHandleCount() {
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) { void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
if (!InNewSpace(object)) { if (!InNewSpace(object)) {
store_buffer()->MoveEntriesToRememberedSet();
Address slot_addr = reinterpret_cast<Address>(slot); Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr); Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr); RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr); RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
} }
...@@ -5886,8 +5886,8 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) { ...@@ -5886,8 +5886,8 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
void Heap::ClearRecordedSlotRange(Address start, Address end) { void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start); Page* page = Page::FromAddress(start);
if (!page->InNewSpace()) { if (!page->InNewSpace()) {
store_buffer()->MoveEntriesToRememberedSet();
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end, RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
SlotSet::PREFREE_EMPTY_BUCKETS); SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end, RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
......
...@@ -16,62 +16,135 @@ namespace v8 { ...@@ -16,62 +16,135 @@ namespace v8 {
namespace internal { namespace internal {
StoreBuffer::StoreBuffer(Heap* heap) StoreBuffer::StoreBuffer(Heap* heap)
: heap_(heap), : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
top_(nullptr), for (int i = 0; i < kStoreBuffers; i++) {
start_(nullptr), start_[i] = nullptr;
limit_(nullptr), limit_[i] = nullptr;
virtual_memory_(nullptr) {} lazy_top_[i] = nullptr;
}
task_running_ = false;
}
void StoreBuffer::SetUp() { void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer // Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of // aligned to 2x the size. This lets us use a bit test to detect the end of
// the area. // the area.
virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2); virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
uintptr_t start_as_int = uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address()); reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize)); start_[0] =
limit_ = start_ + (kStoreBufferSize / kPointerSize); reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>( Address* vm_limit = reinterpret_cast<Address*>(
reinterpret_cast<char*>(virtual_memory_->address()) + reinterpret_cast<char*>(virtual_memory_->address()) +
virtual_memory_->size()); virtual_memory_->size());
DCHECK(start_ <= vm_limit);
DCHECK(limit_ <= vm_limit);
USE(vm_limit); USE(vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0); for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
}
if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_), if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize, kStoreBufferSize * kStoreBuffers,
false)) { // Not executable. false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
} }
top_ = start_; current_ = 0;
top_ = start_[current_];
} }
void StoreBuffer::TearDown() { void StoreBuffer::TearDown() {
delete virtual_memory_; delete virtual_memory_;
top_ = start_ = limit_ = nullptr; top_ = nullptr;
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
lazy_top_[i] = nullptr;
}
} }
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->MoveEntriesToRememberedSet(); isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment(); isolate->counters()->store_buffer_overflows()->Increment();
} }
void StoreBuffer::MoveEntriesToRememberedSet() { void StoreBuffer::FlipStoreBuffers() {
if (top_ == start_) return; base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK(top_ <= limit_); int other = (current_ + 1) % kStoreBuffers;
for (Address* current = start_; current < top_; current++) { MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
current_ = other;
top_ = start_[current_];
if (!task_running_) {
task_running_ = true;
Task* task = new Task(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
}
void StoreBuffer::MoveEntriesToRememberedSet(int index) {
if (!lazy_top_[index]) return;
DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers);
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
DCHECK(!heap_->code_space()->Contains(*current)); DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current; Address addr = *current;
Page* page = Page::FromAnyPointerAddress(heap_, addr); Page* page = Page::FromAnyPointerAddress(heap_, addr);
RememberedSet<OLD_TO_NEW>::Insert(page, addr); if (IsDeletionAddress(addr)) {
current++;
Address end = *current;
DCHECK(!IsDeletionAddress(end));
addr = UnmarkDeletionAddress(addr);
if (end) {
RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
SlotSet::PREFREE_EMPTY_BUCKETS);
} else {
RememberedSet<OLD_TO_NEW>::Remove(page, addr);
}
} else {
DCHECK(!IsDeletionAddress(addr));
RememberedSet<OLD_TO_NEW>::Insert(page, addr);
}
}
lazy_top_[index] = nullptr;
}
void StoreBuffer::MoveAllEntriesToRememberedSet() {
base::LockGuard<base::Mutex> guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
MoveEntriesToRememberedSet(current_);
top_ = start_[current_];
}
void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
base::LockGuard<base::Mutex> guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
task_running_ = false;
}
void StoreBuffer::DeleteEntry(Address start, Address end) {
if (top_ + sizeof(Address) * 2 > limit_[current_]) {
StoreBufferOverflow(heap_->isolate());
} }
top_ = start_; *top_ = MarkDeletionAddress(start);
top_++;
*top_ = end;
top_++;
} }
} // namespace internal } // namespace internal
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/allocation.h" #include "src/allocation.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/cancelable-task.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/heap/slot-set.h" #include "src/heap/slot-set.h"
...@@ -15,11 +16,17 @@ namespace v8 { ...@@ -15,11 +16,17 @@ namespace v8 {
namespace internal { namespace internal {
// Intermediate buffer that accumulates old-to-new stores from the generated // Intermediate buffer that accumulates old-to-new stores from the generated
// code. On buffer overflow the slots are moved to the remembered set. // code. Moreover, it stores invalid old-to-new slots with two entries.
// The first is a tagged address of the start of the invalid range, the second
// one is the end address of the invalid range or null if there is just one slot
// that needs to be removed from the remembered set. On buffer overflow the
// slots are moved to the remembered set.
class StoreBuffer { class StoreBuffer {
public: public:
static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2); static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1; static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
static const intptr_t kDeletionTag = 1;
static void StoreBufferOverflow(Isolate* isolate); static void StoreBufferOverflow(Isolate* isolate);
...@@ -30,17 +37,92 @@ class StoreBuffer { ...@@ -30,17 +37,92 @@ class StoreBuffer {
// Used to add entries from generated code. // Used to add entries from generated code.
inline Address* top_address() { return reinterpret_cast<Address*>(&top_); } inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
void MoveEntriesToRememberedSet(); // Moves entries from a specific store buffer to the remembered set. This
// method takes a lock.
void MoveEntriesToRememberedSet(int index);
// This method ensures that all used store buffer entries are transfered to
// the remembered set.
void MoveAllEntriesToRememberedSet();
inline bool IsDeletionAddress(Address address) const {
return reinterpret_cast<intptr_t>(address) & kDeletionTag;
}
inline Address MarkDeletionAddress(Address address) {
return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
kDeletionTag);
}
inline Address UnmarkDeletionAddress(Address address) {
return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
~kDeletionTag);
}
// If we only want to delete a single slot, end should be set to null which
// will be written into the second field. When processing the store buffer
// the more efficient Remove method will be called in this case.
void DeleteEntry(Address start, Address end = nullptr);
// Used by the concurrent processing thread to transfer entries from the
// store buffer to the remembered set.
void ConcurrentlyProcessStoreBuffer();
bool Empty() {
for (int i = 0; i < kStoreBuffers; i++) {
if (lazy_top_[i]) {
return false;
}
}
return top_ == start_[current_];
}
private: private:
// There are two store buffers. If one store buffer fills up, the main thread
// publishes the top pointer of the store buffer that needs processing in its
// global lazy_top_ field. After that it start the concurrent processing
// thread. The concurrent processing thread uses the pointer in lazy_top_.
// It will grab the given mutex and transfer its entries to the remembered
// set. If the concurrent thread does not make progress, the main thread will
// perform the work.
// Important: there is an ordering constrained. The store buffer with the
// older entries has to be processed first.
class Task : public CancelableTask {
public:
Task(Isolate* isolate, StoreBuffer* store_buffer)
: CancelableTask(isolate), store_buffer_(store_buffer) {}
virtual ~Task() {}
private:
void RunInternal() override {
store_buffer_->ConcurrentlyProcessStoreBuffer();
}
StoreBuffer* store_buffer_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
void FlipStoreBuffers();
Heap* heap_; Heap* heap_;
Address* top_; Address* top_;
// The start and the limit of the buffer that contains store slots // The start and the limit of the buffer that contains store slots
// added from the generated code. // added from the generated code. We have two chunks of store buffers.
Address* start_; // Whenever one fills up, we notify a concurrent processing thread and
Address* limit_; // use the other empty one in the meantime.
Address* start_[kStoreBuffers];
Address* limit_[kStoreBuffers];
// At most one lazy_top_ pointer is set at any time.
Address* lazy_top_[kStoreBuffers];
base::Mutex mutex_;
// We only want to have at most one concurrent processing tas running.
bool task_running_;
// Points to the current buffer in use.
int current_;
base::VirtualMemory* virtual_memory_; base::VirtualMemory* virtual_memory_;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment