Commit d9a03631 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Track transition arrays using worklists.

This allows handling transitions arrays in concurrent marking

Bug: chromium:694255
Change-Id: I28196fccbf03bfba7d7dada1884813be372ddb54
Reviewed-on: https://chromium-review.googlesource.com/610961
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47303}
parent 622852e5
......@@ -74,13 +74,13 @@ class ConcurrentMarkingVisitor final
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
ConcurrentMarking::WeakCellWorklist* weak_cells, int task_id)
explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
WeakObjects* weak_objects, int task_id)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_cells_(weak_cells, task_id) {}
weak_objects_(weak_objects),
task_id_(task_id) {}
bool ShouldVisit(HeapObject* object) {
return marking_state_.GreyToBlack(object);
......@@ -217,10 +217,30 @@ class ConcurrentMarkingVisitor final
return 0;
}
int VisitTransitionArray(Map* map, TransitionArray* object) {
// TODO(ulan): implement iteration of strong fields.
bailout_.Push(object);
return 0;
int VisitTransitionArray(Map* map, TransitionArray* array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
// Visit strong references.
if (array->HasPrototypeTransitions()) {
VisitPointer(array, array->GetPrototypeTransitionsSlot());
}
int num_transitions = array->number_of_entries();
for (int i = 0; i < num_transitions; ++i) {
VisitPointer(array, array->GetKeySlot(i));
// A TransitionArray can hold maps or (transitioning StoreIC) handlers.
// Maps have custom weak handling; handlers (which in turn weakly point
// to maps) are marked strongly for now, and will be cleared during
// compaction when the maps they refer to are dead.
Object* target = array->GetRawTarget(i);
if (target->IsHeapObject()) {
Map* map = HeapObject::cast(target)->synchronized_map();
if (map->instance_type() != MAP_TYPE) {
VisitPointer(array, array->GetTargetSlot(i));
}
}
}
weak_objects_->transition_arrays.Push(task_id_, array);
return TransitionArray::BodyDescriptor::SizeOf(map, array);
}
int VisitWeakCell(Map* map, WeakCell* object) {
......@@ -237,7 +257,7 @@ class ConcurrentMarkingVisitor final
// If we do not know about liveness of values of weak cells, we have to
// process them when we know the liveness of the whole transitive
// closure.
weak_cells_.Push(object);
weak_objects_->weak_cells.Push(task_id_, object);
}
}
return WeakCell::BodyDescriptor::SizeOf(map, object);
......@@ -295,8 +315,9 @@ class ConcurrentMarkingVisitor final
}
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
ConcurrentMarking::WeakCellWorklist::View weak_cells_;
WeakObjects* weak_objects_;
ConcurrentMarkingState marking_state_;
int task_id_;
SlotSnapshot slot_snapshot_;
};
......@@ -325,11 +346,11 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
WeakCellWorklist* weak_cells)
WeakObjects* weak_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
weak_cells_(weak_cells),
weak_objects_(weak_objects),
pending_task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
......@@ -343,7 +364,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(shared_, bailout_, weak_cells_, task_id);
ConcurrentMarkingVisitor visitor(shared_, bailout_, weak_objects_, task_id);
double time_ms;
size_t total_bytes_marked = 0;
if (FLAG_trace_concurrent_marking) {
......@@ -386,7 +407,8 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
base::LockGuard<base::Mutex> guard(&interrupt->lock);
bailout_->FlushToGlobal(task_id);
}
weak_cells_->FlushToGlobal(task_id);
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
......
......@@ -16,7 +16,7 @@ namespace internal {
class Heap;
class Isolate;
class WeakCell;
struct WeakObjects;
class ConcurrentMarking {
public:
......@@ -33,10 +33,9 @@ class ConcurrentMarking {
static const int kTasks = 4;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
using WeakCellWorklist = Worklist<WeakCell*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, WeakCellWorklist* weak_cells);
MarkingWorklist* bailout, WeakObjects* weak_objects);
void ScheduleTasks();
void EnsureCompleted();
......@@ -60,7 +59,7 @@ class ConcurrentMarking {
Heap* heap_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
WeakCellWorklist* weak_cells_;
WeakObjects* weak_objects_;
TaskInterrupt task_interrupt_[kTasks + 1];
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
......
......@@ -177,7 +177,6 @@ Heap::Heap()
set_native_contexts_list(NULL);
set_allocation_sites_list(Smi::kZero);
set_encountered_weak_collections(Smi::kZero);
set_encountered_transition_arrays(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
......@@ -2738,11 +2737,7 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
// Transition arrays are tenured. When black allocation is on we have to
// add the transition array to the list of encountered_transition_arrays.
if (incremental_marking()->black_allocation()) {
array->set_next_link(encountered_transition_arrays(),
UPDATE_WEAK_WRITE_BARRIER);
set_encountered_transition_arrays(array);
} else {
array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
mark_compact_collector()->AddTransitionArray(array);
}
return array;
}
......@@ -5965,7 +5960,7 @@ bool Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
mark_compact_collector_->weak_cells());
mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr);
......
......@@ -748,13 +748,6 @@ class Heap {
}
void IterateEncounteredWeakCollections(RootVisitor* visitor);
void set_encountered_transition_arrays(Object* transition_array) {
encountered_transition_arrays_ = transition_array;
}
Object* encountered_transition_arrays() const {
return encountered_transition_arrays_;
}
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
......@@ -2291,8 +2284,6 @@ class Heap {
// contains Smi(0) while marking is not active.
Object* encountered_weak_collections_;
Object* encountered_transition_arrays_;
List<GCCallbackPair> gc_epilogue_callbacks_;
List<GCCallbackPair> gc_prologue_callbacks_;
......
......@@ -481,7 +481,7 @@ void MinorMarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
weak_cells_.Clear();
AbortWeakObjects();
marking_worklist()->TearDown();
}
......@@ -1008,8 +1008,7 @@ void MarkCompactCollector::Prepare() {
heap()->incremental_marking()->AbortBlackAllocation();
ClearMarkbits();
AbortWeakCollections();
AbortWeakCells();
AbortTransitionArrays();
AbortWeakObjects();
AbortCompaction();
heap_->local_embedder_heap_tracer()->AbortTracing();
marking_worklist()->Clear();
......@@ -2798,6 +2797,9 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization(dependent_code_list);
ClearWeakCollections();
DCHECK(weak_objects_.weak_cells.IsGlobalEmpty());
DCHECK(weak_objects_.transition_arrays.IsGlobalEmpty());
}
......@@ -2894,10 +2896,8 @@ void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
}
void MarkCompactCollector::ClearFullMapTransitions() {
HeapObject* undefined = heap()->undefined_value();
Object* obj = heap()->encountered_transition_arrays();
while (obj != Smi::kZero) {
TransitionArray* array = TransitionArray::cast(obj);
TransitionArray* array;
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
int num_transitions = array->number_of_entries();
if (num_transitions > 0) {
Map* map = array->GetTarget(0);
......@@ -2912,10 +2912,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
TrimDescriptorArray(parent, descriptors);
}
}
obj = array->next_link();
array->set_next_link(undefined, SKIP_WRITE_BARRIER);
}
heap()->set_encountered_transition_arrays(Smi::kZero);
}
bool MarkCompactCollector::CompactTransitionArray(
......@@ -3088,7 +3085,7 @@ void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
DependentCode* dependent_code_head =
DependentCode::cast(heap->empty_fixed_array());
WeakCell* weak_cell;
while (weak_cells_.Pop(kMainThread, &weak_cell)) {
while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
HeapObject* value = HeapObject::cast(weak_cell->value());
......@@ -3139,17 +3136,9 @@ void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
*dependent_code_list = dependent_code_head;
}
void MarkCompactCollector::AbortWeakCells() { weak_cells_.Clear(); }
void MarkCompactCollector::AbortTransitionArrays() {
HeapObject* undefined = heap()->undefined_value();
Object* obj = heap()->encountered_transition_arrays();
while (obj != Smi::kZero) {
TransitionArray* array = TransitionArray::cast(obj);
obj = array->next_link();
array->set_next_link(undefined, SKIP_WRITE_BARRIER);
}
heap()->set_encountered_transition_arrays(Smi::kZero);
void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
}
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
......
......@@ -477,6 +477,12 @@ class MajorNonAtomicMarkingState final
}
};
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
};
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
......@@ -587,8 +593,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
ConcurrentMarkingWorklist bailout_;
};
using WeakCellWorklist = Worklist<WeakCell*, 64 /* segment size */>;
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
......@@ -738,10 +742,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
WeakCellWorklist* weak_cells() { return &weak_cells_; }
WeakObjects* weak_objects() { return &weak_objects_; }
void AddWeakCell(WeakCell* weak_cell) {
weak_cells_.Push(kMainThread, weak_cell);
weak_objects_.weak_cells.Push(kMainThread, weak_cell);
}
void AddTransitionArray(TransitionArray* array) {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
Sweeper& sweeper() { return sweeper_; }
......@@ -878,9 +886,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// transition.
void ClearWeakCellsAndSimpleMapTransitions(
DependentCode** dependent_code_list);
void AbortWeakCells();
void AbortTransitionArrays();
void AbortWeakObjects();
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
......@@ -939,7 +945,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
MarkingWorklist marking_worklist_;
WeakCellWorklist weak_cells_;
WeakObjects weak_objects_;
// Candidates for pages that should be evacuated.
std::vector<Page*> evacuation_candidates_;
......
......@@ -210,13 +210,7 @@ int MarkingVisitor<ConcreteVisitor>::VisitTransitionArray(
visitor->VisitPointer(array, array->GetTargetSlot(i));
}
}
// Enqueue the array in linked list of encountered transition arrays if it is
// not already in the list.
if (array->next_link()->IsUndefined(heap_->isolate())) {
array->set_next_link(heap_->encountered_transition_arrays(),
UPDATE_WEAK_WRITE_BARRIER);
heap_->set_encountered_transition_arrays(array);
}
collector_->AddTransitionArray(array);
return TransitionArray::BodyDescriptor::SizeOf(map, array);
}
......
......@@ -513,8 +513,6 @@ void TransitionArray::TransitionArrayVerify() {
VerifyPointer(e);
}
CHECK_LE(LengthFor(number_of_transitions()), length());
CHECK(next_link()->IsUndefined(GetIsolate()) || next_link()->IsSmi() ||
next_link()->IsTransitionArray());
}
void JSArgumentsObject::JSArgumentsObjectVerify() {
......
......@@ -687,7 +687,6 @@ void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
os << "\n - capacity: " << length();
for (int i = 0; i < length(); i++) {
os << "\n [" << i << "]: " << Brief(get(i));
if (i == kNextLinkIndex) os << " (next link)";
if (i == kPrototypeTransitionsIndex) os << " (prototype transitions)";
if (i == kTransitionLengthIndex) os << " (number of transitions)";
}
......
......@@ -40,14 +40,6 @@ TransitionArray* TransitionArray::cast(Object* object) {
}
Object* TransitionArray::next_link() { return get(kNextLinkIndex); }
void TransitionArray::set_next_link(Object* next, WriteBarrierMode mode) {
return set(kNextLinkIndex, next, mode);
}
bool TransitionArray::HasPrototypeTransitions() {
return get(kPrototypeTransitionsIndex) != Smi::kZero;
}
......
......@@ -519,8 +519,6 @@ Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
}
void TransitionArray::Zap() {
// Do not zap the next link that is used by GC.
STATIC_ASSERT(kNextLinkIndex + 1 == kPrototypeTransitionsIndex);
MemsetPointer(data_start() + kPrototypeTransitionsIndex,
GetHeap()->the_hole_value(),
length() - kPrototypeTransitionsIndex);
......
......@@ -235,10 +235,6 @@ class TransitionArray : public FixedArray {
void Sort();
// This field should be used only by the GC.
inline void set_next_link(Object* next, WriteBarrierMode mode);
inline Object* next_link();
#if defined(DEBUG) || defined(OBJECT_PRINT)
// For our gdb macros.
void Print();
......@@ -281,10 +277,9 @@ class TransitionArray : public FixedArray {
int value);
// Layout for full transition arrays.
static const int kNextLinkIndex = 0;
static const int kPrototypeTransitionsIndex = 1;
static const int kTransitionLengthIndex = 2;
static const int kFirstIndex = 3;
static const int kPrototypeTransitionsIndex = 0;
static const int kTransitionLengthIndex = 1;
static const int kFirstIndex = 2;
// Layout of map transition entries in full transition arrays.
static const int kTransitionKey = 0;
......
......@@ -9,6 +9,7 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/worklist.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
......@@ -30,9 +31,9 @@ TEST(ConcurrentMarking) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarking::MarkingWorklist shared, bailout;
ConcurrentMarking::WeakCellWorklist weak_cells;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &weak_cells);
new ConcurrentMarking(heap, &shared, &bailout, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->EnsureCompleted();
......@@ -44,9 +45,9 @@ TEST(ConcurrentMarkingReschedule) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarking::MarkingWorklist shared, bailout;
ConcurrentMarking::WeakCellWorklist weak_cells;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &weak_cells);
new ConcurrentMarking(heap, &shared, &bailout, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->EnsureCompleted();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment