Commit ec1046f9 authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Divide available memory upon compaction tasks (patchset #9...

Revert of [heap] Divide available memory upon compaction tasks (patchset #9 id:340001 of https://codereview.chromium.org/1382003002/ )

Reason for revert:
Failing tests: https://chromegw.corp.google.com/i/client.v8/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20nosnap%20-%20debug%20-%202/builds/3804/steps/Check/logs/DontLeakGlobalObjects

Original issue's description:
> [heap] Divide available memory upon compaction tasks
>
> - Fairly (round-robin) divide available memory upon compaction tasks.
> - Ensure an upper limit (of memory) since dividing is O(n) for n free-space
>   nodes.
> - Refill from free lists managed by sweeper once a compaction space becomes
>   empty.
>
> Assumption for dividing memory: Memory in the free lists is sparse upon starting
> compaction (which means that only few nodes are available), except for memory
> reducer GCs, which happen in idle time though (so it's less of a problem).
>
> BUG=chromium:524425
> LOG=N
>
> Committed: https://crrev.com/30236c052ba9266fc55412a8fd63b17f683ff40b
> Cr-Commit-Position: refs/heads/master@{#31234}

TBR=ulan@chromium.org,hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1406533002

Cr-Commit-Position: refs/heads/master@{#31235}
parent 30236c05
......@@ -566,9 +566,9 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
RefillFreeList(heap()->paged_space(OLD_SPACE));
RefillFreeList(heap()->paged_space(CODE_SPACE));
RefillFreeList(heap()->paged_space(MAP_SPACE));
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
......@@ -588,6 +588,26 @@ bool MarkCompactCollector::IsSweepingCompleted() {
}
void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
FreeList* free_list;
if (space == heap()->old_space()) {
free_list = free_list_old_space_.get();
} else if (space == heap()->code_space()) {
free_list = free_list_code_space_.get();
} else if (space == heap()->map_space()) {
free_list = free_list_map_space_.get();
} else {
// Any PagedSpace might invoke RefillFreeLists, so we need to make sure
// to only refill them for the old space.
return;
}
intptr_t added = space->free_list()->Concatenate(free_list);
space->accounting_stats_.IncreaseCapacity(added);
}
void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
......@@ -3378,10 +3398,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}
heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
heap()->old_space());
compaction_spaces_for_tasks[0]
->Get(CODE_SPACE)
->MoveOverFreeMemory(heap()->code_space());
compaction_in_progress_ = true;
// Kick off parallel tasks.
......@@ -3393,7 +3414,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Contribute in main thread. Counter and signal are in principal not needed.
concurrent_compaction_tasks_active_++;
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
pending_compaction_tasks_semaphore_.Signal();
WaitUntilCompactionCompleted();
......
......@@ -466,6 +466,8 @@ class MarkCompactCollector {
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
void RefillFreeList(PagedSpace* space);
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeping_in_progress_; }
......@@ -510,20 +512,6 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
//
// Free lists filled by sweeper and consumed by corresponding spaces
// (including compaction spaces).
//
base::SmartPointer<FreeList>& free_list_old_space() {
return free_list_old_space_;
}
base::SmartPointer<FreeList>& free_list_code_space() {
return free_list_code_space_;
}
base::SmartPointer<FreeList>& free_list_map_space() {
return free_list_map_space_;
}
private:
class CompactionTask;
class SweeperTask;
......
......@@ -982,101 +982,6 @@ void PagedSpace::TearDown() {
}
void PagedSpace::AddMemory(Address start, intptr_t size) {
accounting_stats_.ExpandSpace(static_cast<int>(size));
Free(start, static_cast<int>(size));
}
FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
if (free_space != nullptr) {
accounting_stats_.DecreaseCapacity(free_space->size());
}
return free_space;
}
void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
int num, intptr_t limit) {
DCHECK_GT(num, 0);
DCHECK(other != nullptr);
if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
EmptyAllocationInfo();
bool memory_available = true;
bool spaces_need_memory = true;
FreeSpace* node = nullptr;
CompactionSpace* current_space = nullptr;
// Iterate over spaces and memory as long as we have memory and there are
// spaces in need of some.
while (memory_available && spaces_need_memory) {
spaces_need_memory = false;
// Round-robin over all spaces.
for (int i = 0; i < num; i++) {
current_space = other[i]->Get(identity());
if (current_space->free_list()->available() < limit) {
// Space has not reached its limit. Try to get some memory.
spaces_need_memory = true;
node = TryRemoveMemory(limit - current_space->free_list()->available());
if (node != nullptr) {
CHECK(current_space->identity() == identity());
current_space->AddMemory(node->address(), node->size());
} else {
memory_available = false;
break;
}
}
}
}
}
void PagedSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
FreeList* free_list = nullptr;
if (this == heap()->old_space()) {
free_list = collector->free_list_old_space().get();
} else if (this == heap()->code_space()) {
free_list = collector->free_list_code_space().get();
} else if (this == heap()->map_space()) {
free_list = collector->free_list_map_space().get();
} else {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
return;
}
DCHECK(free_list != nullptr);
intptr_t added = free_list_.Concatenate(free_list);
accounting_stats_.IncreaseCapacity(added);
}
void CompactionSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
FreeList* free_list = nullptr;
if (identity() == OLD_SPACE) {
free_list = collector->free_list_old_space().get();
} else if (identity() == CODE_SPACE) {
free_list = collector->free_list_code_space().get();
} else {
// Compaction spaces only represent old or code space.
UNREACHABLE();
}
DCHECK(free_list != nullptr);
intptr_t refilled = 0;
while (refilled < kCompactionMemoryWanted) {
FreeSpace* node =
free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
if (node == nullptr) return;
refilled += node->size();
AddMemory(node->address(), node->size());
}
}
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
......@@ -1089,7 +994,8 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
intptr_t added = free_list_.Concatenate(other->free_list());
// Moved memory is not recorded as allocated memory, but rather increases and
// decreases capacity of the corresponding spaces.
// decreases capacity of the corresponding spaces. Used size and waste size
// are maintained by the receiving space upon allocating and freeing blocks.
other->accounting_stats_.DecreaseCapacity(added);
accounting_stats_.IncreaseCapacity(added);
}
......@@ -1098,19 +1004,16 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields:
// area_size_
// allocation_info_
// end_of_unswept_pages_
// unswept_free_bytes_
// anchor_
MoveOverFreeMemory(other);
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
other->accounting_stats_.Clear();
// The linear allocation area of {other} should be destroyed now.
DCHECK(other->top() == nullptr);
DCHECK(other->limit() == nullptr);
DCHECK(other->end_of_unswept_pages_ == nullptr);
other->accounting_stats_.Reset();
AccountCommitted(other->CommittedMemory());
......@@ -2489,28 +2392,6 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
base::LockGuard<base::Mutex> guard(&mutex_);
FreeSpace* node = nullptr;
int node_size = 0;
// Try to find a node that fits exactly.
node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
// If no node could be found get as much memory as possible.
if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
if (node != nullptr) {
// Give back left overs that were not required by {size_in_bytes}.
intptr_t aligned_size = RoundUp(hint_size_in_bytes, kPointerSize);
intptr_t left_over = node_size - aligned_size;
if (left_over > 0) {
Free(node->address() + aligned_size, static_cast<int>(left_over));
node->set_size(static_cast<int>(aligned_size));
}
}
return node;
}
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
......@@ -2746,7 +2627,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
......@@ -2754,7 +2635,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk = collector->SweepInParallel(this, size_in_bytes);
RefillFreeList();
collector->RefillFreeList(this);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
......
......@@ -19,7 +19,6 @@
namespace v8 {
namespace internal {
class CompactionSpaceCollection;
class Isolate;
// -----------------------------------------------------------------------------
......@@ -1730,14 +1729,6 @@ class FreeList {
// The size should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
// The method tries to find a {FreeSpace} node of at least {size_in_bytes}
// size in the free list category exactly matching the size. If no suitable
// node could be found, the method falls back to retrieving a {FreeSpace}
// from the large or huge free list category.
//
// Can be used concurrently.
MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
bool IsEmpty() {
return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
large_list_.IsEmpty() && huge_list_.IsEmpty();
......@@ -1792,6 +1783,7 @@ class FreeList {
return nullptr;
}
PagedSpace* owner_;
Heap* heap_;
base::Mutex mutex_;
......@@ -1850,8 +1842,6 @@ STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
class PagedSpace : public Space {
public:
static const intptr_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
......@@ -2053,26 +2043,15 @@ class PagedSpace : public Space {
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
virtual bool is_local() { return false; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
intptr_t limit = kCompactionMemoryWanted);
void MoveOverFreeMemory(PagedSpace* other);
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
virtual bool is_local() { return false; }
protected:
void AddMemory(Address start, intptr_t size);
FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
void MoveOverFreeMemory(PagedSpace* other);
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
......@@ -2133,9 +2112,6 @@ class PagedSpace : public Space {
friend class MarkCompactCollector;
friend class PageIterator;
// Used in cctest.
friend class HeapTester;
};
......@@ -2813,13 +2789,11 @@ class CompactionSpace : public PagedSpace {
Free(start, size_in_bytes);
}
virtual bool is_local() override { return true; }
virtual void RefillFreeList() override;
virtual bool is_local() { return true; }
protected:
// The space is temporary and not included in any snapshots.
virtual bool snapshotable() override { return false; }
virtual bool snapshotable() { return false; }
};
......
......@@ -10,19 +10,17 @@
// Tests that should have access to private methods of {v8::internal::Heap}.
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
#define HEAP_TEST_METHODS(V) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(GCFlags) \
V(MarkCompactCollector) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
V(Promotion) \
V(Regression39128) \
V(ResetWeakHandle) \
V(StressHandles) \
V(TestSizeOfObjects) \
#define HEAP_TEST_METHODS(V) \
V(GCFlags) \
V(MarkCompactCollector) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
V(Promotion) \
V(Regression39128) \
V(ResetWeakHandle) \
V(StressHandles) \
V(TestSizeOfObjects) \
V(WriteBarriersInCopyJSObject)
......@@ -54,25 +52,6 @@ class HeapTester {
/* test-api.cc */
static void ResetWeakHandle(bool global_gc);
/* test-spaces.cc */
static CompactionSpaceCollection** InitializeCompactionSpaces(Heap* heap,
int num_spaces);
static void DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
int num_spaces);
static void MergeCompactionSpaces(PagedSpace* space,
CompactionSpaceCollection** spaces,
int num_spaces);
static void AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
int num_objects, int object_size);
static void CompactionStats(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
intptr_t* capacity, intptr_t* size);
static void TestCompactionSpaceDivide(int num_additional_objects,
int object_size,
int num_compaction_spaces,
int additional_capacity_in_bytes);
};
} // namespace internal
......
......@@ -34,7 +34,7 @@
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap-tester.h"
using namespace v8::internal;
......@@ -463,8 +463,8 @@ TEST(CompactionSpaceUsingExternalMemory) {
CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, allocator);
CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
CompactionSpace* compaction_space =
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(compaction_space != NULL);
CHECK(compaction_space->SetUp());
......@@ -503,11 +503,17 @@ TEST(CompactionSpaceUsingExternalMemory) {
// We expect two pages to be reachable from old_space in the end.
const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
Object* chunk =
old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
CHECK(chunk != nullptr);
CHECK(chunk->IsHeapObject());
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), 0);
// Make the rest of memory available for compaction.
old_space->DivideUponCompactionSpaces(&collection, 1, rest);
compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
static_cast<int>(rest));
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), rest);
while (num_rest_objects-- > 0) {
......@@ -524,7 +530,7 @@ TEST(CompactionSpaceUsingExternalMemory) {
old_space->MergeCompactionSpace(compaction_space);
CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
delete collection;
delete compaction_space;
delete old_space;
allocator->TearDown();
......@@ -532,157 +538,6 @@ TEST(CompactionSpaceUsingExternalMemory) {
}
CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
Heap* heap, int num_spaces) {
CompactionSpaceCollection** spaces =
new CompactionSpaceCollection*[num_spaces];
for (int i = 0; i < num_spaces; i++) {
spaces[i] = new CompactionSpaceCollection(heap);
}
return spaces;
}
void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
int num_spaces) {
for (int i = 0; i < num_spaces; i++) {
delete spaces[i];
}
delete[] spaces;
}
void HeapTester::MergeCompactionSpaces(PagedSpace* space,
CompactionSpaceCollection** spaces,
int num_spaces) {
AllocationSpace id = space->identity();
for (int i = 0; i < num_spaces; i++) {
space->MergeCompactionSpace(spaces[i]->Get(id));
CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
}
}
void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
int num_objects, int object_size) {
for (int i = 0; i < num_spaces; i++) {
for (int j = 0; j < num_objects; j++) {
spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
}
spaces[i]->Get(id)->EmptyAllocationInfo();
CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
num_objects * object_size);
CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
spaces[i]->Get(id)->accounting_stats_.Size());
}
}
void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
intptr_t* capacity, intptr_t* size) {
*capacity = 0;
*size = 0;
for (int i = 0; i < num_spaces; i++) {
*capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
*size += spaces[i]->Get(id)->accounting_stats_.Size();
}
}
void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
int object_size,
int num_compaction_spaces,
int additional_capacity_in_bytes) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(old_space != nullptr);
CHECK(old_space->SetUp());
old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
old_space->EmptyAllocationInfo();
intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
old_space->accounting_stats_.Size();
intptr_t capacity_for_compaction_space =
rest_capacity / num_compaction_spaces;
int num_objects_in_compaction_space =
static_cast<int>(capacity_for_compaction_space) / object_size +
num_additional_objects;
CHECK_GT(num_objects_in_compaction_space, 0);
intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();
CompactionSpaceCollection** spaces =
InitializeCompactionSpaces(heap, num_compaction_spaces);
old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
capacity_for_compaction_space);
intptr_t compaction_capacity = 0;
intptr_t compaction_size = 0;
CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
&compaction_capacity, &compaction_size);
intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
intptr_t old_space_size = old_space->accounting_stats_.Size();
// Compaction space memory is subtracted from the original space's capacity.
CHECK_EQ(old_space_capacity,
initial_old_space_capacity - compaction_capacity);
CHECK_EQ(compaction_size, 0);
AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
num_objects_in_compaction_space, object_size);
// Old space size and capacity should be the same as after dividing.
CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);
CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
&compaction_capacity, &compaction_size);
MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);
CHECK_EQ(old_space->accounting_stats_.Capacity(),
old_space_capacity + compaction_capacity);
CHECK_EQ(old_space->accounting_stats_.Size(),
old_space_size + compaction_size);
// We check against the expected end capacity.
CHECK_EQ(old_space->accounting_stats_.Capacity(),
initial_old_space_capacity + additional_capacity_in_bytes);
DestroyCompactionSpaces(spaces, num_compaction_spaces);
delete old_space;
}
HEAP_TEST(CompactionSpaceDivideSinglePage) {
const int kObjectSize = KB;
const int kCompactionSpaces = 4;
// Since the bound for objects is tight and the dividing is best effort, we
// subtract some objects to make sure we still fit in the initial page.
// A CHECK makes sure that the overall number of allocated objects stays
// > 0.
const int kAdditionalObjects = -10;
const int kAdditionalCapacityRequired = 0;
TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
kAdditionalCapacityRequired);
}
HEAP_TEST(CompactionSpaceDivideMultiplePages) {
const int kObjectSize = KB;
const int kCompactionSpaces = 4;
// Allocate half a page of objects to ensure that we need one more page per
// compaction space.
const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
const int kAdditionalCapacityRequired =
Page::kAllocatableMemory * kCompactionSpaces;
TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
kAdditionalCapacityRequired);
}
TEST(LargeObjectSpace) {
v8::V8::Initialize();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment