Commit d29f54bb authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Add ConcurrentAllocator

Add ConcurrentAllocator which can be used for concurrent allocation from a background thread in the old space. ConcurrentAllocator doesn't request a GC yet when an allocation fails. This will be implemented in later CLs.

Bug: v8:10315
Change-Id: I81260ebbd8863c143e93aedb93c66d0e7c28bddb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2144066
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67150}
parent 52412058
......@@ -2362,6 +2362,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/code-stats.h",
"src/heap/combined-heap.cc",
"src/heap/combined-heap.h",
"src/heap/concurrent-allocator-inl.h",
"src/heap/concurrent-allocator.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
#define V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
#include "src/common/globals.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/heap.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
AllocationResult ConcurrentAllocator::Allocate(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
// TODO(dinfuehr): Add support for allocation observers
CHECK(FLAG_concurrent_allocation);
if (object_size > kMaxLabObjectSize) {
auto result = space_->SlowGetLinearAllocationAreaBackground(
object_size, object_size, alignment, origin);
if (result) {
HeapObject object = HeapObject::FromAddress(result->first);
return AllocationResult(object);
} else {
return AllocationResult::Retry(OLD_SPACE);
}
}
return AllocateInLab(object_size, alignment, origin);
}
AllocationResult ConcurrentAllocator::AllocateInLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult allocation;
if (!lab_.IsValid() && !EnsureLab(origin)) {
return AllocationResult::Retry(space_->identity());
}
allocation = lab_.AllocateRawAligned(object_size, alignment);
if (allocation.IsRetry()) {
if (!EnsureLab(origin)) {
return AllocationResult::Retry(space_->identity());
} else {
allocation = lab_.AllocateRawAligned(object_size, alignment);
CHECK(!allocation.IsRetry());
}
}
return allocation;
}
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
LocalAllocationBuffer saved_lab = lab_;
auto result = space_->SlowGetLinearAllocationAreaBackground(
kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false;
HeapObject object = HeapObject::FromAddress(result->first);
lab_ = LocalAllocationBuffer::FromResult(
local_heap_->heap(), AllocationResult(object), result->second);
if (lab_.IsValid()) {
lab_.TryMerge(&saved_lab);
return true;
}
lab_ = saved_lab;
return false;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CONCURRENT_ALLOCATOR_H_
#define V8_HEAP_CONCURRENT_ALLOCATOR_H_
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
class LocalHeap;
// Concurrent allocator for allocation from background threads/tasks.
// Allocations are served from a TLAB if possible.
class ConcurrentAllocator {
public:
static const int kLabSize = 4 * KB;
static const int kMaxLabSize = 32 * KB;
static const int kMaxLabObjectSize = 2 * KB;
explicit ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
: local_heap_(local_heap),
space_(space),
lab_(LocalAllocationBuffer::InvalidBuffer()) {}
inline AllocationResult Allocate(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin);
private:
inline bool EnsureLab(AllocationOrigin origin);
inline AllocationResult AllocateInLab(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin);
LocalHeap* const local_heap_;
PagedSpace* const space_;
LocalAllocationBuffer lab_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_CONCURRENT_ALLOCATOR_H_
......@@ -22,7 +22,8 @@ LocalHeap::LocalHeap(Heap* heap,
prev_(nullptr),
next_(nullptr),
handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)) {
persistent_handles_(std::move(persistent_handles)),
old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this);
if (persistent_handles_) {
persistent_handles_->Attach(this);
......
......@@ -11,6 +11,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/execution/isolate.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/safepoint.h"
namespace v8 {
......@@ -44,6 +45,10 @@ class LocalHeap {
bool IsParked();
Heap* heap() { return heap_; }
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
......@@ -78,6 +83,8 @@ class LocalHeap {
std::unique_ptr<LocalHandles> handles_;
std::unique_ptr<PersistentHandles> persistent_handles_;
ConcurrentAllocator old_space_allocator_;
friend class Heap;
friend class GlobalSafepoint;
friend class ParkedScope;
......
......@@ -11,6 +11,7 @@
#include "src/base/bits.h"
#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
......@@ -20,6 +21,7 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact.h"
......@@ -30,6 +32,7 @@
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
#include "src/sanitizer/msan.h"
......@@ -2130,6 +2133,97 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
return true;
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::SlowGetLinearAllocationAreaBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!is_local_space() && identity() == OLD_SPACE);
DCHECK_EQ(origin, AllocationOrigin::kRuntime);
base::MutexGuard lock(&allocation_mutex_);
auto result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
// Retry the free list allocation.
auto result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
const int kMaxPagesToSweep = 1;
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
invalidated_slots_in_free_space);
RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(min_size_in_bytes <= free_list_->Available()));
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
// TODO(dinfuehr): Complete sweeping here and try allocation again.
return {};
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK_EQ(identity(), OLD_SPACE);
size_t new_node_size = 0;
FreeSpace new_node =
free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
if (new_node.is_null()) return {};
DCHECK_GE(new_node_size, min_size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
// TODO(dinfuehr): Start incremental marking if allocation limit is reached
size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = new_node.address() + used_size_in_bytes;
DCHECK_LE(limit, end);
DCHECK_LE(min_size_in_bytes, limit - start);
if (limit != end) {
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
return std::make_pair(start, used_size_in_bytes);
}
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
......
......@@ -19,6 +19,7 @@
#include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
......@@ -2188,9 +2189,10 @@ class LocalAllocationBuffer {
~LocalAllocationBuffer() { Close(); }
// Convert to C++11 move-semantics once allowed by the style guide.
LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
V8_EXPORT_PRIVATE LocalAllocationBuffer(const LocalAllocationBuffer& other)
V8_NOEXCEPT;
V8_EXPORT_PRIVATE LocalAllocationBuffer& operator=(
const LocalAllocationBuffer& other) V8_NOEXCEPT;
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
......@@ -2355,6 +2357,14 @@ class V8_EXPORT_PRIVATE PagedSpace
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
SlowGetLinearAllocationAreaBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
......@@ -2576,6 +2586,12 @@ class V8_EXPORT_PRIVATE PagedSpace
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
Executability executable_;
LocalSpaceKind local_space_kind_;
......
......@@ -125,6 +125,7 @@ v8_source_set("cctest_sources") {
"heap/test-alloc.cc",
"heap/test-array-buffer-tracker.cc",
"heap/test-compaction.cc",
"heap/test-concurrent-allocation.cc",
"heap/test-concurrent-marking.cc",
"heap/test-embedder-tracing.cc",
"heap/test-external-string-tracker.cc",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <memory>
#include "src/api/api.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/handles/local-handles-inl.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
#include "src/heap/safepoint.h"
#include "src/objects/heap-number.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
const int kNumObjects = 100;
const int kObjectSize = 10 * kTaggedSize;
class ConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit ConcurrentAllocationThread(Heap* heap)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap) {}
void Run() override {
LocalHeap local_heap(heap_);
ConcurrentAllocator* allocator = local_heap.old_space_allocator();
for (int i = 0; i < kNumObjects; i++) {
AllocationResult result =
allocator->Allocate(kObjectSize, AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime);
if (result.IsRetry()) {
break;
}
}
}
Heap* heap_;
};
TEST(ConcurrentAllocationInOldSpace) {
CcTest::InitializeVM();
FLAG_local_heaps = true;
FLAG_concurrent_allocation = true;
Isolate* isolate = CcTest::i_isolate();
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<ConcurrentAllocationThread>(isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment