Commit b04632d5 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Support concurrent marking of code objects.

The main change is about tracking typed slots locally in
each concurrent marking thread.

This generalizes the old LiveBytesMap to MemoryChunkData, which
now contains the live bytes and the typed slots.

With that in place it is straightforward to mark code concurrently.

Bug: v8:8459
Change-Id: I103fff0ad39beadea5151a1d8519f5d3c6602e58
Reviewed-on: https://chromium-review.googlesource.com/c/1337747
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58072}
parent 67f30185
......@@ -31,8 +31,8 @@ namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
: live_bytes_(live_bytes) {}
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
: memory_chunk_data_(memory_chunk_data) {}
Bitmap* bitmap(const MemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
......@@ -42,14 +42,14 @@ class ConcurrentMarkingState final
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
(*live_bytes_)[chunk] += by;
(*memory_chunk_data_)[chunk].live_bytes += by;
}
// The live_bytes and SetLiveBytes methods of the marking state are
// not used by the concurrent marker.
private:
LiveBytesMap* live_bytes_;
MemoryChunkDataMap* memory_chunk_data_;
};
// Helper class for storing in-object slot addresses and values.
......@@ -78,15 +78,16 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout, LiveBytesMap* live_bytes,
WeakObjects* weak_objects,
ConcurrentMarking::MarkingWorklist* bailout,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id),
marking_state_(live_bytes),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data),
task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled) {}
......@@ -166,7 +167,28 @@ class ConcurrentMarkingVisitor final
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
ObjectSlot end) override {}
ObjectSlot end) final {}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
RecordRelocSlot(host, rinfo, object);
if (!marking_state_.IsBlackOrGrey(object)) {
if (host->IsWeakObject(object)) {
weak_objects_->weak_objects_in_code.Push(task_id_,
std::make_pair(object, host));
} else {
MarkObject(object);
}
}
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
RecordRelocSlot(host, rinfo, target);
MarkObject(target);
}
void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
for (int i = 0; i < snapshot.number_of_slots(); i++) {
......@@ -287,15 +309,6 @@ class ConcurrentMarkingVisitor final
return VisitLeftTrimmableArray(map, object);
}
// ===========================================================================
// Code object ===============================================================
// ===========================================================================
int VisitCode(Map map, Code object) {
bailout_.Push(object);
return 0;
}
// ===========================================================================
// Side-effectful visitation.
// ===========================================================================
......@@ -491,11 +504,24 @@ class ConcurrentMarkingVisitor final
return slot_snapshot_;
}
void RecordRelocSlot(Code host, RelocInfo* rinfo, Object* target) {
auto info =
MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
if (info.should_record) {
MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
if (!data.typed_slots) {
data.typed_slots.reset(new TypedSlots());
}
data.typed_slots->Insert(info.slot_type, info.host_offset, info.offset);
}
}
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_;
MemoryChunkDataMap* memory_chunk_data_;
int task_id_;
SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_;
......@@ -580,7 +606,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(
shared_, bailout_, &task_state->live_bytes, weak_objects_,
shared_, bailout_, &task_state->memory_chunk_data, weak_objects_,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse());
double time_ms;
size_t marked_bytes = 0;
......@@ -658,6 +684,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
weak_objects_->js_weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
......@@ -769,28 +796,36 @@ bool ConcurrentMarking::IsStopped() {
return pending_task_count_ == 0;
}
void ConcurrentMarking::FlushLiveBytes(
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
for (int i = 1; i <= task_count_; i++) {
LiveBytesMap& live_bytes = task_state_[i].live_bytes;
for (auto pair : live_bytes) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero.
// Pages with zero live bytes might be already unmapped.
if (pair.second != 0) {
marking_state->IncrementLiveBytes(pair.first, pair.second);
MemoryChunk* memory_chunk = pair.first;
MemoryChunkData& data = pair.second;
if (data.live_bytes) {
marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
}
if (data.typed_slots) {
RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
std::move(data.typed_slots));
}
}
live_bytes.clear();
memory_chunk_data.clear();
task_state_[i].marked_bytes = 0;
}
total_marked_bytes_ = 0;
}
void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
for (int i = 1; i <= task_count_; i++) {
if (task_state_[i].live_bytes.count(chunk)) {
task_state_[i].live_bytes[chunk] = 0;
auto it = task_state_[i].memory_chunk_data.find(chunk);
if (it != task_state_[i].memory_chunk_data.end()) {
it->second.live_bytes = 0;
it->second.typed_slots.reset();
}
}
}
......
......@@ -11,6 +11,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
#include "src/utils.h"
......@@ -24,8 +25,13 @@ class Isolate;
class MajorNonAtomicMarkingState;
struct WeakObjects;
using LiveBytesMap =
std::unordered_map<MemoryChunk*, intptr_t, MemoryChunk::Hasher>;
struct MemoryChunkData {
intptr_t live_bytes;
std::unique_ptr<TypedSlots> typed_slots;
};
using MemoryChunkDataMap =
std::unordered_map<MemoryChunk*, MemoryChunkData, MemoryChunk::Hasher>;
class ConcurrentMarking {
public:
......@@ -75,11 +81,11 @@ class ConcurrentMarking {
bool Stop(StopRequest stop_request);
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
// Flushes memory chunk data using the given marking state.
void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
// This function is called for a new space page that was cleared after
// scavenge and is going to be re-used.
void ClearLiveness(MemoryChunk* chunk);
void ClearMemoryChunkData(MemoryChunk* chunk);
int TaskCount() { return task_count_; }
......@@ -98,8 +104,7 @@ class ConcurrentMarking {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
std::atomic<bool> preemption_request;
LiveBytesMap live_bytes;
MemoryChunkDataMap memory_chunk_data;
size_t marked_bytes = 0;
char cache_line_padding[64];
};
......
......@@ -5615,8 +5615,14 @@ void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, host.ptr(), slot_type,
addr);
Address host_addr = host.ptr();
uintptr_t offset = addr - source_page->address();
uintptr_t host_offset = host_addr - source_page->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
static_cast<uint32_t>(host_offset),
static_cast<uint32_t>(offset));
}
void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
......
......@@ -248,10 +248,12 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
collector_->RecordRelocSlot(host, rinfo, object);
if (!host->IsWeakObject(object)) {
MarkObject(host, object);
} else if (!marking_state()->IsBlackOrGrey(object)) {
collector_->AddWeakObjectInCode(object, host);
if (!marking_state()->IsBlackOrGrey(object)) {
if (host->IsWeakObject(object)) {
collector_->AddWeakObjectInCode(object, host);
} else {
MarkObject(host, object);
}
}
}
......
......@@ -796,7 +796,8 @@ void MarkCompactCollector::FinishConcurrentMarking(
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
}
}
......@@ -2182,8 +2183,11 @@ bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
Object* target) {
MarkCompactCollector::RecordRelocSlotInfo
MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
Object* target) {
RecordRelocSlotInfo result;
result.should_record = false;
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(host.ptr());
if (target_page->IsEvacuationCandidate() &&
......@@ -2201,8 +2205,26 @@ void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, host.ptr(), slot_type,
addr);
Address host_addr = host.ptr();
uintptr_t offset = addr - source_page->address();
uintptr_t host_offset = host_addr - source_page->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
result.should_record = true;
result.memory_chunk = source_page;
result.slot_type = slot_type;
result.host_offset = static_cast<uint32_t>(host_offset);
result.offset = static_cast<uint32_t>(offset);
}
return result;
}
void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
Object* target) {
auto info = PrepareRecordRelocSlot(host, rinfo, target);
if (info.should_record) {
RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
info.host_offset, info.offset);
}
}
......@@ -3931,7 +3953,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
// going to be unmapped.
heap()->concurrent_marking()->ClearLiveness(p);
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
}
......
......@@ -625,7 +625,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
static bool IsOnEvacuationCandidate(MaybeObject obj);
void RecordRelocSlot(Code host, RelocInfo* rinfo, Object* target);
struct RecordRelocSlotInfo {
bool should_record;
MemoryChunk* memory_chunk;
SlotType slot_type;
uint32_t host_offset;
uint32_t offset;
};
static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
Object* target);
static void RecordRelocSlot(Code host, RelocInfo* rinfo, Object* target);
V8_INLINE static void RecordSlot(HeapObject* object, ObjectSlot slot,
HeapObject* target);
V8_INLINE static void RecordSlot(HeapObject* object, HeapObjectSlot slot,
......
......@@ -190,23 +190,24 @@ class RememberedSet : public AllStatic {
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
Address slot_addr) {
TypedSlotSet* slot_set = page->typed_slot_set<type>();
static void InsertTyped(MemoryChunk* memory_chunk, SlotType slot_type,
uint32_t host_offset, uint32_t offset) {
TypedSlotSet* slot_set = memory_chunk->typed_slot_set<type>();
if (slot_set == nullptr) {
slot_set = page->AllocateTypedSlotSet<type>();
}
if (host_addr == kNullAddress) {
host_addr = page->address();
slot_set = memory_chunk->AllocateTypedSlotSet<type>();
}
uintptr_t offset = slot_addr - page->address();
uintptr_t host_offset = host_addr - page->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
slot_set->Insert(slot_type, static_cast<uint32_t>(host_offset),
static_cast<uint32_t>(offset));
}
static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> slots) {
TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set == nullptr) {
slot_set = page->AllocateTypedSlotSet<type>();
}
slot_set->Merge(slots.get());
}
// Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set.
static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
......
......@@ -253,7 +253,7 @@ void ScavengerCollector::CollectGarbage() {
// going to be unmapped.
for (Page* p :
PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
heap_->concurrent_marking()->ClearLiveness(p);
heap_->concurrent_marking()->ClearMemoryChunkData(p);
}
}
......
......@@ -2260,7 +2260,7 @@ void NewSpace::ResetLinearAllocationArea() {
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearLiveness(p);
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment