Commit 889b27b8 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Add a simple implementation of Heap::IsPendingAllocation

The new predicate allows a background thread to check if the given
object was recently allocated and may potentially be unsafe to read
from the background thread.

The current implementation has relatively high overhead as it loads
two pointers per heap space. It will be optimized in the future.

Bug: v8:11148

Change-Id: I2a9dfb2c70de4b8214b8f8a35681a8bab1a63ca8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2532296
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71130}
parent 64206b2d
......@@ -563,6 +563,25 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
(*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
}
bool Heap::IsPendingAllocation(HeapObject object) {
// TODO(ulan): Optimize this function to perform 3 loads at most.
Address addr = object.address();
Address top = new_space_->original_top_acquire();
Address limit = new_space_->original_limit_relaxed();
if (top <= addr && addr < limit) return true;
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
top = space->original_top_acquire();
limit = space->original_limit_relaxed();
if (top <= addr && addr < limit) return true;
}
if (addr == lo_space_->pending_object()) return true;
if (addr == new_lo_space_->pending_object()) return true;
if (addr == code_lo_space_->pending_object()) return true;
return false;
}
void Heap::ExternalStringTable::AddString(String string) {
DCHECK(string.IsExternalString());
DCHECK(!Contains(string));
......
......@@ -933,6 +933,18 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
}
}
void Heap::PublishPendingAllocations() {
new_space_->MoveOriginalTopForward();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->MoveOriginalTopForward();
}
lo_space_->ResetPendingObject();
new_lo_space_->ResetPendingObject();
code_lo_space_->ResetPendingObject();
}
namespace {
inline bool MakePretenureDecision(
AllocationSite site, AllocationSite::PretenureDecision current_decision,
......
......@@ -1447,6 +1447,16 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer);
// Check if the given object was recently allocated and its fields may appear
// as uninitialized to background threads.
// This predicate may be invoked from a background thread.
inline bool IsPendingAllocation(HeapObject object);
// Notifies that all previously allocated objects are properly initialized
// and ensures that IsPendingAllocation returns false for them. This function
// may be invoked only on the main thread.
V8_EXPORT_PRIVATE void PublishPendingAllocations();
// ===========================================================================
// Heap object allocation tracking. ==========================================
// ===========================================================================
......
......@@ -97,7 +97,8 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, new NoFreeList()),
size_(0),
page_count_(0),
objects_size_(0) {}
objects_size_(0),
pending_object_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
......@@ -140,6 +141,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
pending_object_.store(object.address(), std::memory_order_release);
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
......@@ -441,7 +443,6 @@ OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
: LargeObjectSpace(heap, NEW_LO_SPACE),
pending_object_(0),
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
......@@ -465,7 +466,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result.address(), std::memory_order_relaxed);
pending_object_.store(result.address(), std::memory_order_release);
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
......
......@@ -112,6 +112,16 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
void Print() override;
#endif
// The last allocated object that is not guaranteed to be initialized when the
// concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_acquire);
}
void ResetPendingObject() {
pending_object_.store(0, std::memory_order_release);
}
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
......@@ -123,6 +133,7 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int page_count_; // number of chunks
std::atomic<size_t> objects_size_; // size of objects
base::Mutex allocation_mutex_;
std::atomic<Address> pending_object_;
private:
friend class LargeObjectSpaceObjectIterator;
......@@ -165,16 +176,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when the
// concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_relaxed);
}
void ResetPendingObject() { pending_object_.store(0); }
private:
std::atomic<Address> pending_object_;
size_t capacity_;
};
......
......@@ -275,6 +275,9 @@ void PagedSpace::SetTopAndLimit(Address top, Address limit) {
Page::FromAddress(top) == Page::FromAddress(limit - 1));
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
// The order of the following two stores is important.
original_limit_.store(limit, std::memory_order_relaxed);
original_top_.store(top, std::memory_order_release);
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
......
......@@ -300,6 +300,20 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit);
Address original_top_acquire() {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() {
return original_limit_.load(std::memory_order_relaxed);
}
void MoveOriginalTopForward() {
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
private:
class ConcurrentAllocationMutex {
public:
......@@ -401,6 +415,11 @@ class V8_EXPORT_PRIVATE PagedSpace
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
......
......@@ -7248,6 +7248,56 @@ UNINITIALIZED_HEAP_TEST(CodeLargeObjectSpace64k) {
isolate->Dispose();
}
TEST(IsPendingAllocationNewSpace) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
Handle<FixedArray> object = factory->NewFixedArray(5, AllocationType::kYoung);
CHECK(heap->IsPendingAllocation(*object));
heap->PublishPendingAllocations();
CHECK(!heap->IsPendingAllocation(*object));
}
TEST(IsPendingAllocationNewLOSpace) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
Handle<FixedArray> object = factory->NewFixedArray(
FixedArray::kMaxRegularLength + 1, AllocationType::kYoung);
CHECK(heap->IsPendingAllocation(*object));
heap->PublishPendingAllocations();
CHECK(!heap->IsPendingAllocation(*object));
}
TEST(IsPendingAllocationOldSpace) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
Handle<FixedArray> object = factory->NewFixedArray(5, AllocationType::kOld);
CHECK(heap->IsPendingAllocation(*object));
heap->PublishPendingAllocations();
CHECK(!heap->IsPendingAllocation(*object));
}
TEST(IsPendingAllocationLOSpace) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
Handle<FixedArray> object = factory->NewFixedArray(
FixedArray::kMaxRegularLength + 1, AllocationType::kOld);
CHECK(heap->IsPendingAllocation(*object));
heap->PublishPendingAllocations();
CHECK(!heap->IsPendingAllocation(*object));
}
TEST(Regress10900) {
FLAG_always_compact = true;
CcTest::InitializeVM();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment