Commit a731b86e authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Enable concurrent allocation of old space large objects

Allow the allocation of large old space objects through
LocalHeap::AllocateRaw. OldLargeObjectSpace::AllocateRawBackground will
allocate a large object on the background thread.

Bug: v8:10315
Change-Id: I9212f0c6770855dbe33490516aae7056987e192d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2332804
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69195}
parent 75d4b18a
......@@ -4,6 +4,8 @@
#include "src/heap/large-spaces.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
#include "src/heap/incremental-marking.h"
......@@ -140,6 +142,30 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
return object;
}
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
return object;
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
......@@ -147,7 +173,10 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
AddPage(page, object_size);
{
base::MutexGuard guard(&allocation_mutex_);
AddPage(page, object_size);
}
HeapObject object = page->GetObject();
......
......@@ -11,6 +11,7 @@
#include <unordered_map>
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
......@@ -21,6 +22,7 @@ namespace v8 {
namespace internal {
class Isolate;
class LocalHeap;
class LargePage : public MemoryChunk {
public:
......@@ -115,9 +117,10 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes
std::atomic<size_t> size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
std::atomic<size_t> objects_size_; // size of objects
base::Mutex allocation_mutex_;
private:
friend class LargeObjectSpaceObjectIterator;
......@@ -132,6 +135,9 @@ class OldLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
......
......@@ -37,9 +37,11 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
CHECK_EQ(type, AllocationType::kOld);
CHECK(!large_object);
return old_space_allocator()->AllocateRaw(size_in_bytes, alignment, origin);
if (large_object)
return heap()->lo_space()->AllocateRawBackground(this, size_in_bytes);
else
return old_space_allocator()->AllocateRaw(size_in_bytes, alignment, origin);
}
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
......
......@@ -104,6 +104,67 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
isolate->Dispose();
}
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit LargeObjectConcurrentAllocationThread(Heap* heap,
std::atomic<int>* pending)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap),
pending_(pending) {}
void Run() override {
LocalHeap local_heap(heap_);
const size_t kLargeObjectSize = kMaxRegularHeapObjectSize * 2;
for (int i = 0; i < kNumIterations; i++) {
Address address = local_heap.AllocateRawOrFail(
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
CreateFixedArray(heap_, address, kLargeObjectSize);
local_heap.Safepoint();
}
pending_->fetch_sub(1);
}
Heap* heap_;
std::atomic<int>* pending_;
};
UNINITIALIZED_TEST(ConcurrentAllocationInLargeSpace) {
FLAG_max_old_space_size = 32;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<LargeObjectConcurrentAllocationThread>> threads;
const int kThreads = 4;
std::atomic<int> pending(kThreads);
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<LargeObjectConcurrentAllocationThread>(
i_isolate->heap(), &pending);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
while (pending > 0) {
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(), isolate);
}
for (auto& thread : threads) {
thread->Join();
}
isolate->Dispose();
}
const int kWhiteIterations = 1000;
class ConcurrentBlackAllocationThread final : public v8::base::Thread {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment