Commit 26bc5906 authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Reland: Take page lock when scavenging old to new references in Scavenger.

BUG=v8:5807

Review-Url: https://codereview.chromium.org/2826593004
Cr-Commit-Position: refs/heads/master@{#44697}
parent 2e4b86b0
...@@ -17,7 +17,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) { ...@@ -17,7 +17,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
size_t length = NumberToSize(buffer->byte_length()); size_t length = NumberToSize(buffer->byte_length());
Page* page = Page::FromAddress(buffer->address()); Page* page = Page::FromAddress(buffer->address());
{ {
base::LockGuard<base::Mutex> guard(page->mutex()); base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker(); LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) { if (tracker == nullptr) {
page->AllocateLocalTracker(); page->AllocateLocalTracker();
...@@ -39,7 +39,7 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) { ...@@ -39,7 +39,7 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address()); Page* page = Page::FromAddress(buffer->address());
size_t length = 0; size_t length = 0;
{ {
base::LockGuard<base::Mutex> guard(page->mutex()); base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker(); LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker); DCHECK_NOT_NULL(tracker);
length = tracker->Remove(buffer); length = tracker->Remove(buffer);
......
...@@ -130,7 +130,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) { ...@@ -130,7 +130,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) { bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address()); Page* page = Page::FromAddress(buffer->address());
{ {
base::LockGuard<base::Mutex> guard(page->mutex()); base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker(); LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return false; if (tracker == nullptr) return false;
return tracker->IsTracked(buffer); return tracker->IsTracked(buffer);
......
...@@ -1729,12 +1729,14 @@ void Heap::Scavenge() { ...@@ -1729,12 +1729,14 @@ void Heap::Scavenge() {
{ {
// Copy objects reachable from the old generation. // Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS); TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) { RememberedSet<OLD_TO_NEW>::Iterate(
return Scavenger::CheckAndScavengeObject(this, addr); this, SYNCHRONIZED, [this](Address addr) {
}); return Scavenger::CheckAndScavengeObject(this, addr);
});
RememberedSet<OLD_TO_NEW>::IterateTyped( RememberedSet<OLD_TO_NEW>::IterateTyped(
this, [this](SlotType type, Address host_addr, Address addr) { this, SYNCHRONIZED,
[this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot( return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate(), type, addr, [this](Object** addr) { isolate(), type, addr, [this](Object** addr) {
// We expect that objects referenced by code are long living. // We expect that objects referenced by code are long living.
......
...@@ -2384,11 +2384,12 @@ void MinorMarkCompactCollector::MarkLiveObjects() { ...@@ -2384,11 +2384,12 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
{ {
TRACE_GC(heap()->tracer(), TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS); GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
RememberedSet<OLD_TO_NEW>::Iterate(heap(), [this](Address addr) { RememberedSet<OLD_TO_NEW>::Iterate(
return CheckAndMarkObject(heap(), addr); heap(), NON_SYNCHRONIZED,
}); [this](Address addr) { return CheckAndMarkObject(heap(), addr); });
RememberedSet<OLD_TO_NEW>::IterateTyped( RememberedSet<OLD_TO_NEW>::IterateTyped(
heap(), [this](SlotType type, Address host_addr, Address addr) { heap(), NON_SYNCHRONIZED,
[this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot( return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate(), type, addr, [this](Object** addr) { isolate(), type, addr, [this](Object** addr) {
return CheckAndMarkObject(heap(), return CheckAndMarkObject(heap(),
...@@ -3986,7 +3987,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, ...@@ -3986,7 +3987,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) { AllocationSpace identity) {
int max_freed = 0; int max_freed = 0;
{ {
base::LockGuard<base::Mutex> guard(page->mutex()); base::LockGuard<base::RecursiveMutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here. // If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0; if (page->SweepingDone()) return 0;
DCHECK_EQ(Page::kSweepingPending, DCHECK_EQ(Page::kSweepingPending,
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
// TODO(ulan): Investigate performance of de-templatizing this class. // TODO(ulan): Investigate performance of de-templatizing this class.
template <RememberedSetType type> template <RememberedSetType type>
class RememberedSet : public AllStatic { class RememberedSet : public AllStatic {
...@@ -98,9 +100,13 @@ class RememberedSet : public AllStatic { ...@@ -98,9 +100,13 @@ class RememberedSet : public AllStatic {
// Iterates and filters the remembered set with the given callback. // Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotCallbackResult. // The callback should take (Address slot) and return SlotCallbackResult.
template <typename Callback> template <typename Callback>
static void Iterate(Heap* heap, Callback callback) { static void Iterate(Heap* heap, RememberedSetIterationMode mode,
IterateMemoryChunks( Callback callback) {
heap, [callback](MemoryChunk* chunk) { Iterate(chunk, callback); }); IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
Iterate(chunk, callback);
if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
});
} }
// Iterates over all memory chunks that contains non-empty slot sets. // Iterates over all memory chunks that contains non-empty slot sets.
...@@ -177,9 +183,12 @@ class RememberedSet : public AllStatic { ...@@ -177,9 +183,12 @@ class RememberedSet : public AllStatic {
// The callback should take (SlotType slot_type, SlotAddress slot) and return // The callback should take (SlotType slot_type, SlotAddress slot) and return
// SlotCallbackResult. // SlotCallbackResult.
template <typename Callback> template <typename Callback>
static void IterateTyped(Heap* heap, Callback callback) { static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
IterateMemoryChunks(heap, [callback](MemoryChunk* chunk) { Callback callback) {
IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
IterateTyped(chunk, callback); IterateTyped(chunk, callback);
if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
}); });
} }
......
...@@ -535,7 +535,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -535,7 +535,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone); chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->mutex_ = new base::Mutex(); chunk->mutex_ = new base::RecursiveMutex();
chunk->available_in_free_list_ = 0; chunk->available_in_free_list_ = 0;
chunk->wasted_memory_ = 0; chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr; chunk->young_generation_bitmap_ = nullptr;
......
...@@ -344,7 +344,7 @@ class MemoryChunk { ...@@ -344,7 +344,7 @@ class MemoryChunk {
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // SkipList* skip_list_ + kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_ + kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_ + kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_ + kPointerSize // base::AtomicWord concurrent_sweeping_
+ 2 * kSizetSize // AtomicNumber free-list statistics + 2 * kSizetSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_ + kPointerSize // AtomicValue next_chunk_
...@@ -404,7 +404,7 @@ class MemoryChunk { ...@@ -404,7 +404,7 @@ class MemoryChunk {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this)); return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
} }
base::Mutex* mutex() { return mutex_; } base::RecursiveMutex* mutex() { return mutex_; }
bool Contains(Address addr) { bool Contains(Address addr) {
return addr >= area_start() && addr < area_end(); return addr >= area_start() && addr < area_end();
...@@ -613,7 +613,7 @@ class MemoryChunk { ...@@ -613,7 +613,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page. // count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_; base::AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_; base::RecursiveMutex* mutex_;
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment