Commit acf49293 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Replace concurrent marking deque with work-stealing worklist.

BUG=chromium:694255
TBR=mlippautz@chromium.org

Change-Id: I8eaec556d187453bd0d1cfbd0a12c0e81306862c
Reviewed-on: https://chromium-review.googlesource.com/548597Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46224}
parent 0d833cb9
......@@ -1572,7 +1572,6 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/concurrent-marking-deque.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CONCURRENT_MARKING_DEQUE_
#define V8_HEAP_CONCURRENT_MARKING_DEQUE_
#include <deque>
#include "src/base/platform/mutex.h"
namespace v8 {
namespace internal {
class Heap;
class Isolate;
class HeapObject;
enum class MarkingThread { kMain, kConcurrent };
enum class TargetDeque { kShared, kBailout };
// The concurrent marking deque supports deque operations for two threads:
// main and concurrent. It is implemented using two deques: shared and bailout.
//
// The concurrent thread can use the push and pop operations with the
// MarkingThread::kConcurrent argument. All other operations are intended
// to be used by the main thread only.
//
// The interface of the concurrent marking deque for the main thread matches
// that of the sequential marking deque, so they can be easily switched
// at compile time without updating the main thread call-sites.
//
// The shared deque is shared between the main thread and the concurrent
// thread, so both threads can push to and pop from the shared deque.
// The bailout deque stores objects that cannot be processed by the concurrent
// thread. Only the concurrent thread can push to it and only the main thread
// can pop from it.
class ConcurrentMarkingDeque {
public:
// The heap parameter is needed to match the interface
// of the sequential marking deque.
explicit ConcurrentMarkingDeque(Heap* heap) {}
// Pushes the object into the specified deque assuming that the function is
// called on the specified thread. The main thread can push only to the shared
// deque. The concurrent thread can push to both deques.
bool Push(HeapObject* object, MarkingThread thread = MarkingThread::kMain,
TargetDeque target = TargetDeque::kShared) {
switch (target) {
case TargetDeque::kShared:
shared_deque_.Push(object);
break;
case TargetDeque::kBailout:
bailout_deque_.Push(object);
break;
}
return true;
}
// Pops an object from the bailout or shared deque assuming that the function
// is called on the specified thread. The main thread first tries to pop the
// bailout deque. If the deque is empty then it tries the shared deque.
// If the shared deque is also empty, then the function returns nullptr.
// The concurrent thread pops only from the shared deque.
HeapObject* Pop(MarkingThread thread = MarkingThread::kMain) {
if (thread == MarkingThread::kMain) {
HeapObject* result = bailout_deque_.Pop();
if (result != nullptr) return result;
}
return shared_deque_.Pop();
}
// All the following operations can used only by the main thread.
void Clear() {
bailout_deque_.Clear();
shared_deque_.Clear();
}
bool IsFull() { return false; }
bool IsEmpty() { return bailout_deque_.IsEmpty() && shared_deque_.IsEmpty(); }
int Size() { return bailout_deque_.Size() + shared_deque_.Size(); }
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
// The callback must accept HeapObject* and return HeapObject*.
template <typename Callback>
void Update(Callback callback) {
bailout_deque_.Update(callback);
shared_deque_.Update(callback);
}
// These empty functions are needed to match the interface
// of the sequential marking deque.
void SetUp() {}
void TearDown() {}
void StartUsing() {}
void StopUsing() {}
void ClearOverflowed() {}
void SetOverflowed() {}
bool overflowed() const { return false; }
private:
// Simple, slow, and thread-safe deque that forwards all operations to
// a lock-protected std::deque.
class Deque {
public:
Deque() { cache_padding_[0] = 0; }
void Clear() {
base::LockGuard<base::Mutex> guard(&mutex_);
return deque_.clear();
}
bool IsEmpty() {
base::LockGuard<base::Mutex> guard(&mutex_);
return deque_.empty();
}
int Size() {
base::LockGuard<base::Mutex> guard(&mutex_);
return static_cast<int>(deque_.size());
}
void Push(HeapObject* object) {
base::LockGuard<base::Mutex> guard(&mutex_);
deque_.push_back(object);
}
HeapObject* Pop() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (deque_.empty()) return nullptr;
HeapObject* result = deque_.back();
deque_.pop_back();
return result;
}
template <typename Callback>
void Update(Callback callback) {
base::LockGuard<base::Mutex> guard(&mutex_);
std::deque<HeapObject*> new_deque;
for (auto object : deque_) {
HeapObject* new_object = callback(object);
if (new_object) {
new_deque.push_back(new_object);
}
}
deque_.swap(new_deque);
}
private:
base::Mutex mutex_;
std::deque<HeapObject*> deque_;
// Ensure that two deques do not share the same cache line.
static int const kCachePadding = 64;
char cache_padding_[kCachePadding];
};
Deque bailout_deque_;
Deque shared_deque_;
DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkingDeque);
};
} // namespace internal
} // namespace v8
#endif // V8_CONCURRENT_MARKING_DEQUE_
......@@ -7,12 +7,12 @@
#include <stack>
#include <unordered_map>
#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/worklist.h"
#include "src/isolate.h"
#include "src/locked-queue-inl.h"
#include "src/utils-inl.h"
......@@ -48,8 +48,9 @@ class ConcurrentMarkingVisitor final
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
explicit ConcurrentMarkingVisitor(ConcurrentMarkingDeque* deque)
: deque_(deque) {}
explicit ConcurrentMarkingVisitor(Worklist* shared, Worklist* bailout,
int task_id)
: shared_(shared, task_id), bailout_(bailout, task_id) {}
bool ShouldVisit(HeapObject* object) {
return ObjectMarking::GreyToBlack<AccessMode::ATOMIC>(
......@@ -118,7 +119,7 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitCode(Map* map, Code* object) {
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
return 0;
}
......@@ -133,7 +134,7 @@ class ConcurrentMarkingVisitor final
VisitMapPointer(object, object->map_slot());
BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
// Aging of bytecode arrays is done on the main thread.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
}
return 0;
}
......@@ -148,7 +149,7 @@ class ConcurrentMarkingVisitor final
int VisitMap(Map* map, Map* object) {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
return 0;
}
......@@ -160,7 +161,7 @@ class ConcurrentMarkingVisitor final
Context::BodyDescriptorWeak::IterateBody(object, size, this);
// TODO(ulan): implement proper weakness for normalized map cache
// and remove this bailout.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
}
return 0;
}
......@@ -172,26 +173,26 @@ class ConcurrentMarkingVisitor final
VisitMapPointer(object, object->map_slot());
SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this);
// Resetting of IC age counter is done on the main thread.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
}
return 0;
}
int VisitTransitionArray(Map* map, TransitionArray* object) {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
return 0;
}
int VisitWeakCell(Map* map, WeakCell* object) {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
return 0;
}
int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_.Push(object);
return 0;
}
......@@ -205,7 +206,7 @@ class ConcurrentMarkingVisitor final
#endif
if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kShared);
shared_.Push(object);
}
}
......@@ -245,47 +246,50 @@ class ConcurrentMarkingVisitor final
return MarkingState::Internal(object);
}
ConcurrentMarkingDeque* deque_;
WorklistView shared_;
WorklistView bailout_;
SlotSnapshot slot_snapshot_;
};
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
base::Semaphore* on_finish)
base::Semaphore* on_finish, int task_id)
: CancelableTask(isolate),
concurrent_marking_(concurrent_marking),
on_finish_(on_finish) {}
on_finish_(on_finish),
task_id_(task_id) {}
virtual ~Task() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
concurrent_marking_->Run();
concurrent_marking_->Run(task_id_);
on_finish_->Signal();
}
ConcurrentMarking* concurrent_marking_;
base::Semaphore* on_finish_;
int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
ConcurrentMarking::ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque)
ConcurrentMarking::ConcurrentMarking(Heap* heap, Worklist* shared,
Worklist* bailout)
: heap_(heap),
pending_task_semaphore_(0),
deque_(deque),
visitor_(new ConcurrentMarkingVisitor(deque_)),
shared_(shared),
bailout_(bailout),
is_task_pending_(false) {
// The runtime flag should be set only if the compile time flag was set.
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
}
ConcurrentMarking::~ConcurrentMarking() { delete visitor_; }
void ConcurrentMarking::Run() {
void ConcurrentMarking::Run(int task_id) {
ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id);
double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
size_t bytes_marked = 0;
base::Mutex* relocation_mutex = heap_->relocation_mutex();
......@@ -293,18 +297,24 @@ void ConcurrentMarking::Run() {
TimedScope scope(&time_ms);
while (true) {
base::LockGuard<base::Mutex> guard(relocation_mutex);
HeapObject* object = deque_->Pop(MarkingThread::kConcurrent);
if (object == nullptr) break;
HeapObject* object;
if (!shared_->Pop(task_id, &object)) break;
Address new_space_top = heap_->new_space()->original_top();
Address new_space_limit = heap_->new_space()->original_limit();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
bailout_->Push(task_id, object);
} else {
Map* map = object->synchronized_map();
bytes_marked += visitor_->Visit(map, object);
bytes_marked += visitor.Visit(map, object);
}
}
{
// Take the lock to synchronize with worklist update after
// young generation GC.
base::LockGuard<base::Mutex> guard(relocation_mutex);
bailout_->FlushToGlobal(task_id);
}
}
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp("concurrently marked %dKB in %.2fms\n",
......@@ -314,10 +324,12 @@ void ConcurrentMarking::Run() {
}
void ConcurrentMarking::StartTask() {
const int kConcurrentMarkingTaskId = 1;
if (!FLAG_concurrent_marking) return;
is_task_pending_ = true;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &pending_task_semaphore_),
new Task(heap_->isolate(), this, &pending_task_semaphore_,
kConcurrentMarkingTaskId),
v8::Platform::kShortRunningTask);
}
......
......@@ -13,15 +13,13 @@
namespace v8 {
namespace internal {
class ConcurrentMarkingDeque;
class ConcurrentMarkingVisitor;
class Heap;
class Isolate;
class Worklist;
class ConcurrentMarking {
public:
ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque_);
~ConcurrentMarking();
ConcurrentMarking(Heap* heap, Worklist* shared_, Worklist* bailout_);
void StartTask();
void WaitForTaskToComplete();
......@@ -30,11 +28,11 @@ class ConcurrentMarking {
private:
class Task;
void Run();
void Run(int task_id);
Heap* heap_;
base::Semaphore pending_task_semaphore_;
ConcurrentMarkingDeque* deque_;
ConcurrentMarkingVisitor* visitor_;
Worklist* shared_;
Worklist* bailout_;
bool is_task_pending_;
};
......
......@@ -5763,10 +5763,12 @@ bool Heap::SetUp() {
incremental_marking_->set_marking_worklist(
mark_compact_collector_->marking_worklist());
#ifdef V8_CONCURRENT_MARKING
concurrent_marking_ =
new ConcurrentMarking(this, mark_compact_collector_->marking_worklist());
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(this, marking_worklist->shared(),
marking_worklist->bailout());
#else
concurrent_marking_ = new ConcurrentMarking(this, nullptr);
concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr);
#endif
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
......
......@@ -143,7 +143,7 @@ void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj));
if (ObjectMarking::GreyToBlack<kAtomicity>(obj, marking_state(obj))) {
#ifdef V8_CONCURRENT_MARKING
marking_worklist()->Push(obj, MarkingThread::kMain, TargetDeque::kBailout);
marking_worklist()->PushBailout(obj);
#else
if (!marking_worklist()->Push(obj)) {
ObjectMarking::BlackToGrey<kAtomicity>(obj, marking_state(obj));
......
......@@ -10,11 +10,11 @@
#include "src/base/bits.h"
#include "src/base/platform/condition-variable.h"
#include "src/cancelable-task.h"
#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/marking.h"
#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
......@@ -382,7 +382,71 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
#ifdef V8_CONCURRENT_MARKING
using MarkingWorklist = ConcurrentMarkingDeque;
// Wrapper for the shared and bailout worklists.
class MarkingWorklist {
public:
static const int kMainThread = 0;
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
bool Push(HeapObject* object) { return shared_.Push(kMainThread, object); }
bool PushBailout(HeapObject* object) {
return bailout_.Push(kMainThread, object);
}
HeapObject* Pop() {
HeapObject* result;
if (bailout_.Pop(kMainThread, &result)) return result;
if (shared_.Pop(kMainThread, &result)) return result;
return nullptr;
}
void Clear() {
bailout_.Clear();
shared_.Clear();
}
bool IsFull() { return false; }
bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty();
}
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread));
}
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
// The callback must accept HeapObject* and return HeapObject*.
template <typename Callback>
void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback);
}
Worklist* shared() { return &shared_; }
Worklist* bailout() { return &bailout_; }
// These empty functions are needed to match the interface
// of the sequential marking deque.
void SetUp() {}
void TearDown() { Clear(); }
void StartUsing() {}
void StopUsing() {}
void ClearOverflowed() {}
void SetOverflowed() {}
bool overflowed() const { return false; }
private:
Worklist shared_;
Worklist bailout_;
};
#else
using MarkingWorklist = SequentialMarkingDeque;
#endif
......
......@@ -82,6 +82,11 @@ class Worklist {
private_push_segment_[task_id]->IsEmpty();
}
bool IsGlobalPoolEmpty() {
base::LockGuard<base::Mutex> guard(&lock_);
return global_pool_.empty();
}
bool IsGlobalEmpty() {
for (int i = 0; i < kMaxNumTasks; i++) {
if (!IsLocalEmpty(i)) return false;
......@@ -125,10 +130,16 @@ class Worklist {
global_pool_[i] = global_pool_.back();
global_pool_.pop_back();
delete segment;
--i;
}
}
}
void FlushToGlobal(int task_id) {
PublishPushSegmentToGlobal(task_id);
PublishPopSegmentToGlobal(task_id);
}
private:
FRIEND_TEST(Worklist, SegmentCreate);
FRIEND_TEST(Worklist, SegmentPush);
......@@ -149,14 +160,12 @@ class Worklist {
bool Push(HeapObject* object) {
if (IsFull()) return false;
objects_[index_++] = object;
return true;
}
bool Pop(HeapObject** object) {
if (IsEmpty()) return false;
*object = objects_[--index_];
return true;
}
......@@ -195,6 +204,14 @@ class Worklist {
}
}
V8_NOINLINE void PublishPopSegmentToGlobal(int task_id) {
base::LockGuard<base::Mutex> guard(&lock_);
if (!private_pop_segment_[task_id]->IsEmpty()) {
global_pool_.push_back(private_pop_segment_[task_id]);
private_pop_segment_[task_id] = new Segment();
}
}
V8_NOINLINE bool StealPopSegmentFromGlobal(int task_id) {
base::LockGuard<base::Mutex> guard(&lock_);
if (global_pool_.empty()) return false;
......
......@@ -1005,7 +1005,6 @@
'heap/array-buffer-tracker.h',
'heap/code-stats.cc',
'heap/code-stats.h',
'heap/concurrent-marking-deque.h',
'heap/concurrent-marking.cc',
'heap/concurrent-marking.h',
'heap/embedder-tracing.cc',
......
......@@ -9,6 +9,7 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/worklist.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
......@@ -18,9 +19,12 @@ TEST(ConcurrentMarking) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarkingDeque deque(heap);
deque.Push(heap->undefined_value());
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(heap, &deque);
Worklist shared, bailout;
for (int i = 0; i <= Worklist::kSegmentCapacity; i++) {
shared.Push(0, heap->undefined_value());
}
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout);
concurrent_marking->StartTask();
concurrent_marking->WaitForTaskToComplete();
delete concurrent_marking;
......
......@@ -101,7 +101,6 @@ v8_executable("unittests") {
"eh-frame-iterator-unittest.cc",
"eh-frame-writer-unittest.cc",
"heap/bitmap-unittest.cc",
"heap/concurrent-marking-deque-unittest.cc",
"heap/embedder-tracing-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
"heap/gc-tracer-unittest.cc",
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdlib.h>
#include "src/globals.h"
#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
class ConcurrentMarkingDequeTest : public TestWithIsolate {
public:
ConcurrentMarkingDequeTest() {
marking_deque_ = new ConcurrentMarkingDeque(i_isolate()->heap());
object_ = i_isolate()->heap()->undefined_value();
}
~ConcurrentMarkingDequeTest() { delete marking_deque_; }
ConcurrentMarkingDeque* marking_deque() { return marking_deque_; }
HeapObject* object() { return object_; }
private:
ConcurrentMarkingDeque* marking_deque_;
HeapObject* object_;
DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkingDequeTest);
};
TEST_F(ConcurrentMarkingDequeTest, Empty) {
EXPECT_TRUE(marking_deque()->IsEmpty());
EXPECT_EQ(0, marking_deque()->Size());
}
TEST_F(ConcurrentMarkingDequeTest, SharedDeque) {
marking_deque()->Push(object());
EXPECT_FALSE(marking_deque()->IsEmpty());
EXPECT_EQ(1, marking_deque()->Size());
EXPECT_EQ(object(), marking_deque()->Pop(MarkingThread::kConcurrent));
}
TEST_F(ConcurrentMarkingDequeTest, BailoutDeque) {
marking_deque()->Push(object(), MarkingThread::kConcurrent,
TargetDeque::kBailout);
EXPECT_FALSE(marking_deque()->IsEmpty());
EXPECT_EQ(1, marking_deque()->Size());
EXPECT_EQ(nullptr, marking_deque()->Pop(MarkingThread::kConcurrent));
}
} // namespace internal
} // namespace v8
......@@ -168,19 +168,26 @@ TEST(Worklist, GlobalUpdateNull) {
TEST(Worklist, GlobalUpdate) {
Worklist worklist;
WorklistView worklist_view(&worklist, 0);
HeapObject* objectA;
HeapObject* objectA = nullptr;
objectA = reinterpret_cast<HeapObject*>(&objectA);
HeapObject* objectB;
HeapObject* objectB = nullptr;
objectB = reinterpret_cast<HeapObject*>(&objectB);
HeapObject* objectC = nullptr;
objectC = reinterpret_cast<HeapObject*>(&objectC);
for (size_t i = 0; i < Worklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view.Push(objectA));
}
for (size_t i = 0; i < Worklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view.Push(objectB));
}
EXPECT_TRUE(worklist_view.Push(objectA));
worklist.Update([objectB](HeapObject* object) { return objectB; });
for (size_t i = 0; i < Worklist::kSegmentCapacity + 1; i++) {
worklist.Update([objectA, objectC](HeapObject* object) {
return (object == objectA) ? nullptr : objectC;
});
for (size_t i = 0; i < Worklist::kSegmentCapacity; i++) {
HeapObject* object;
EXPECT_TRUE(worklist_view.Pop(&object));
EXPECT_EQ(object, objectB);
EXPECT_EQ(object, objectC);
}
}
......
......@@ -98,7 +98,6 @@
'eh-frame-iterator-unittest.cc',
'eh-frame-writer-unittest.cc',
'heap/bitmap-unittest.cc',
'heap/concurrent-marking-deque-unittest.cc',
'heap/embedder-tracing-unittest.cc',
'heap/gc-idle-time-handler-unittest.cc',
'heap/gc-tracer-unittest.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment