Commit d800644c authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [heap] Concurrent store buffer processing. (patchset #13 id:260001...

Revert of [heap] Concurrent store buffer processing. (patchset #13 id:260001 of https://codereview.chromium.org/2453673003/ )

Reason for revert:
Seems to block rolling:
https://codereview.chromium.org/2447393005/

Original issue's description:
> [heap] Concurrent store buffer processing.
>
> BUG=chromium:648973, chromium:648568

TBR=ulan@chromium.org,mlippautz@chromium.org,hpayer@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:648973, chromium:648568

Review-Url: https://codereview.chromium.org/2449853010
Cr-Commit-Position: refs/heads/master@{#40650}
parent a017715d
...@@ -440,7 +440,7 @@ void Heap::GarbageCollectionPrologue() { ...@@ -440,7 +440,7 @@ void Heap::GarbageCollectionPrologue() {
} }
CheckNewSpaceExpansionCriteria(); CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter(); UpdateNewSpaceAllocationCounter();
store_buffer()->MoveAllEntriesToRememberedSet(); store_buffer()->MoveEntriesToRememberedSet();
} }
...@@ -5906,15 +5906,11 @@ void Heap::CheckHandleCount() { ...@@ -5906,15 +5906,11 @@ void Heap::CheckHandleCount() {
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) { void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
if (!InNewSpace(object)) { if (!InNewSpace(object)) {
store_buffer()->MoveEntriesToRememberedSet();
Address slot_addr = reinterpret_cast<Address>(slot); Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr); Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
if (gc_state_ == NOT_IN_GC) { RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
store_buffer()->DeleteEntry(slot_addr);
} else {
DCHECK(store_buffer()->Empty());
RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
}
RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr); RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
} }
} }
...@@ -5922,14 +5918,10 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) { ...@@ -5922,14 +5918,10 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
void Heap::ClearRecordedSlotRange(Address start, Address end) { void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start); Page* page = Page::FromAddress(start);
if (!page->InNewSpace()) { if (!page->InNewSpace()) {
store_buffer()->MoveEntriesToRememberedSet();
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
if (gc_state_ == NOT_IN_GC) { RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
store_buffer()->DeleteEntry(start, end); SlotSet::PREFREE_EMPTY_BUCKETS);
} else {
DCHECK(store_buffer()->Empty());
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
SlotSet::PREFREE_EMPTY_BUCKETS);
}
RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end, RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
} }
......
...@@ -16,135 +16,62 @@ namespace v8 { ...@@ -16,135 +16,62 @@ namespace v8 {
namespace internal { namespace internal {
StoreBuffer::StoreBuffer(Heap* heap) StoreBuffer::StoreBuffer(Heap* heap)
: heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) { : heap_(heap),
for (int i = 0; i < kStoreBuffers; i++) { top_(nullptr),
start_[i] = nullptr; start_(nullptr),
limit_[i] = nullptr; limit_(nullptr),
lazy_top_[i] = nullptr; virtual_memory_(nullptr) {}
}
task_running_ = false;
}
void StoreBuffer::SetUp() { void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer // Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of // aligned to 2x the size. This lets us use a bit test to detect the end of
// the area. // the area.
virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2);
uintptr_t start_as_int = uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address()); reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_[0] = start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize)); limit_ = start_ + (kStoreBufferSize / kPointerSize);
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>( Address* vm_limit = reinterpret_cast<Address*>(
reinterpret_cast<char*>(virtual_memory_->address()) + reinterpret_cast<char*>(virtual_memory_->address()) +
virtual_memory_->size()); virtual_memory_->size());
DCHECK(start_ <= vm_limit);
DCHECK(limit_ <= vm_limit);
USE(vm_limit); USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) { DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0);
DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
}
if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]), if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
kStoreBufferSize * kStoreBuffers, kStoreBufferSize,
false)) { // Not executable. false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
} }
current_ = 0; top_ = start_;
top_ = start_[current_];
} }
void StoreBuffer::TearDown() { void StoreBuffer::TearDown() {
delete virtual_memory_; delete virtual_memory_;
top_ = nullptr; top_ = start_ = limit_ = nullptr;
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
lazy_top_[i] = nullptr;
}
} }
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->FlipStoreBuffers(); isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
isolate->counters()->store_buffer_overflows()->Increment(); isolate->counters()->store_buffer_overflows()->Increment();
} }
void StoreBuffer::FlipStoreBuffers() { void StoreBuffer::MoveEntriesToRememberedSet() {
base::LockGuard<base::Mutex> guard(&mutex_); if (top_ == start_) return;
int other = (current_ + 1) % kStoreBuffers; DCHECK(top_ <= limit_);
MoveEntriesToRememberedSet(other); for (Address* current = start_; current < top_; current++) {
lazy_top_[current_] = top_;
current_ = other;
top_ = start_[current_];
if (!task_running_) {
task_running_ = true;
Task* task = new Task(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
}
void StoreBuffer::MoveEntriesToRememberedSet(int index) {
if (!lazy_top_[index]) return;
DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers);
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
DCHECK(!heap_->code_space()->Contains(*current)); DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current; Address addr = *current;
Page* page = Page::FromAnyPointerAddress(heap_, addr); Page* page = Page::FromAnyPointerAddress(heap_, addr);
if (IsDeletionAddress(addr)) { RememberedSet<OLD_TO_NEW>::Insert(page, addr);
current++;
Address end = *current;
DCHECK(!IsDeletionAddress(end));
addr = UnmarkDeletionAddress(addr);
if (end) {
RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
SlotSet::PREFREE_EMPTY_BUCKETS);
} else {
RememberedSet<OLD_TO_NEW>::Remove(page, addr);
}
} else {
DCHECK(!IsDeletionAddress(addr));
RememberedSet<OLD_TO_NEW>::Insert(page, addr);
}
}
lazy_top_[index] = nullptr;
}
void StoreBuffer::MoveAllEntriesToRememberedSet() {
base::LockGuard<base::Mutex> guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
MoveEntriesToRememberedSet(current_);
top_ = start_[current_];
}
void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
base::LockGuard<base::Mutex> guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
task_running_ = false;
}
void StoreBuffer::DeleteEntry(Address start, Address end) {
if (top_ + sizeof(Address) * 2 > limit_[current_]) {
StoreBufferOverflow(heap_->isolate());
} }
*top_ = MarkDeletionAddress(start); top_ = start_;
top_++;
*top_ = end;
top_++;
} }
} // namespace internal } // namespace internal
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include "src/allocation.h" #include "src/allocation.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/cancelable-task.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/heap/slot-set.h" #include "src/heap/slot-set.h"
...@@ -16,17 +15,11 @@ namespace v8 { ...@@ -16,17 +15,11 @@ namespace v8 {
namespace internal { namespace internal {
// Intermediate buffer that accumulates old-to-new stores from the generated // Intermediate buffer that accumulates old-to-new stores from the generated
// code. Moreover, it stores invalid old-to-new slots with two entries. // code. On buffer overflow the slots are moved to the remembered set.
// The first is a tagged address of the start of the invalid range, the second
// one is the end address of the invalid range or null if there is just one slot
// that needs to be removed from the remembered set. On buffer overflow the
// slots are moved to the remembered set.
class StoreBuffer { class StoreBuffer {
public: public:
static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2); static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1; static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
static const intptr_t kDeletionTag = 1;
static void StoreBufferOverflow(Isolate* isolate); static void StoreBufferOverflow(Isolate* isolate);
...@@ -37,92 +30,17 @@ class StoreBuffer { ...@@ -37,92 +30,17 @@ class StoreBuffer {
// Used to add entries from generated code. // Used to add entries from generated code.
inline Address* top_address() { return reinterpret_cast<Address*>(&top_); } inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
// Moves entries from a specific store buffer to the remembered set. This void MoveEntriesToRememberedSet();
// method takes a lock.
void MoveEntriesToRememberedSet(int index);
// This method ensures that all used store buffer entries are transfered to
// the remembered set.
void MoveAllEntriesToRememberedSet();
inline bool IsDeletionAddress(Address address) const {
return reinterpret_cast<intptr_t>(address) & kDeletionTag;
}
inline Address MarkDeletionAddress(Address address) {
return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
kDeletionTag);
}
inline Address UnmarkDeletionAddress(Address address) {
return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
~kDeletionTag);
}
// If we only want to delete a single slot, end should be set to null which
// will be written into the second field. When processing the store buffer
// the more efficient Remove method will be called in this case.
void DeleteEntry(Address start, Address end = nullptr);
// Used by the concurrent processing thread to transfer entries from the
// store buffer to the remembered set.
void ConcurrentlyProcessStoreBuffer();
bool Empty() {
for (int i = 0; i < kStoreBuffers; i++) {
if (lazy_top_[i]) {
return false;
}
}
return top_ == start_[current_];
}
private: private:
// There are two store buffers. If one store buffer fills up, the main thread
// publishes the top pointer of the store buffer that needs processing in its
// global lazy_top_ field. After that it start the concurrent processing
// thread. The concurrent processing thread uses the pointer in lazy_top_.
// It will grab the given mutex and transfer its entries to the remembered
// set. If the concurrent thread does not make progress, the main thread will
// perform the work.
// Important: there is an ordering constrained. The store buffer with the
// older entries has to be processed first.
class Task : public CancelableTask {
public:
Task(Isolate* isolate, StoreBuffer* store_buffer)
: CancelableTask(isolate), store_buffer_(store_buffer) {}
virtual ~Task() {}
private:
void RunInternal() override {
store_buffer_->ConcurrentlyProcessStoreBuffer();
}
StoreBuffer* store_buffer_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
void FlipStoreBuffers();
Heap* heap_; Heap* heap_;
Address* top_; Address* top_;
// The start and the limit of the buffer that contains store slots // The start and the limit of the buffer that contains store slots
// added from the generated code. We have two chunks of store buffers. // added from the generated code.
// Whenever one fills up, we notify a concurrent processing thread and Address* start_;
// use the other empty one in the meantime. Address* limit_;
Address* start_[kStoreBuffers];
Address* limit_[kStoreBuffers];
// At most one lazy_top_ pointer is set at any time.
Address* lazy_top_[kStoreBuffers];
base::Mutex mutex_;
// We only want to have at most one concurrent processing tas running.
bool task_running_;
// Points to the current buffer in use.
int current_;
base::VirtualMemory* virtual_memory_; base::VirtualMemory* virtual_memory_;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment