Commit 77866f69 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Add Isolate::shared_heap_isolate() and has_shared_heap()

Isolate::shared_isolate() was used in many locations to check for the
shared heap feature. Now that we also have shared_space_isolate()
checking shared_isolate() isn't sufficient anymore.

This CL replaces many invocations of this method with either
has_shared_heap() or shared_heap_isolate(). These methods work for
both shared_isolate() and shared_space_isolate(). As soon as we remove
the shared isolate we can remove them again.

Bug: v8:13267
Change-Id: I68a3588aca2a12e204450c2b99635dd158d12111
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3899316Reviewed-by: 's avatarShu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83260}
parent cf60ee8e
......@@ -4132,11 +4132,14 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
if (HasFlagThatRequiresSharedHeap() && v8_flags.shared_space) {
if (process_wide_shared_space_isolate_) {
attach_to_shared_space_isolate = process_wide_shared_space_isolate_;
owns_shareable_data_ = false;
} else {
process_wide_shared_space_isolate_ = this;
is_shared_space_isolate_ = true;
DCHECK(owns_shareable_data_);
}
attach_to_shared_space_isolate = process_wide_shared_space_isolate_;
}
CHECK_IMPLIES(is_shared_space_isolate_, V8_CAN_CREATE_SHARED_HEAP_BOOL);
......@@ -4229,6 +4232,11 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
AttachToSharedIsolate();
AttachToSharedSpaceIsolate(attach_to_shared_space_isolate);
// Ensure that we use at most one of shared_isolate() and
// shared_space_isolate().
DCHECK_IMPLIES(shared_isolate(), !shared_space_isolate());
DCHECK_IMPLIES(shared_space_isolate(), !shared_isolate());
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp(main_thread_local_heap());
......@@ -4241,9 +4249,11 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
string_forwarding_table_ = std::make_shared<StringForwardingTable>(this);
} else {
// Only refer to shared string table after attaching to the shared isolate.
DCHECK_NOT_NULL(shared_isolate());
string_table_ = shared_isolate()->string_table_;
string_forwarding_table_ = shared_isolate()->string_forwarding_table_;
DCHECK(has_shared_heap());
DCHECK(!is_shared());
DCHECK(!is_shared_space_isolate());
string_table_ = shared_heap_isolate()->string_table_;
string_forwarding_table_ = shared_heap_isolate()->string_forwarding_table_;
}
if (V8_SHORT_BUILTIN_CALLS_BOOL && v8_flags.short_builtin_calls) {
......@@ -4290,9 +4300,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
isolate_data_.shared_external_pointer_table_ = new ExternalPointerTable();
shared_external_pointer_table().Init(this);
} else {
DCHECK_NOT_NULL(shared_isolate());
DCHECK(has_shared_heap());
isolate_data_.shared_external_pointer_table_ =
shared_isolate()->isolate_data_.shared_external_pointer_table_;
shared_heap_isolate()->isolate_data_.shared_external_pointer_table_;
}
#endif // V8_COMPRESS_POINTERS
......@@ -5955,7 +5965,7 @@ void Isolate::DetachFromSharedIsolate() {
void Isolate::AttachToSharedSpaceIsolate(Isolate* shared_space_isolate) {
DCHECK(!shared_space_isolate_.has_value());
shared_space_isolate_ = shared_space_isolate;
if (shared_space_isolate) {
if (shared_space_isolate && shared_space_isolate != this) {
shared_space_isolate->global_safepoint()->AppendClient(this);
}
}
......@@ -5963,7 +5973,7 @@ void Isolate::AttachToSharedSpaceIsolate(Isolate* shared_space_isolate) {
void Isolate::DetachFromSharedSpaceIsolate() {
DCHECK(shared_space_isolate_.has_value());
Isolate* shared_space_isolate = shared_space_isolate_.value();
if (shared_space_isolate) {
if (shared_space_isolate && shared_space_isolate != this) {
shared_space_isolate->global_safepoint()->RemoveClient(this);
}
shared_space_isolate_.reset();
......
......@@ -1996,10 +1996,25 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
DCHECK(shared_isolate->is_shared());
DCHECK_NULL(shared_isolate_);
DCHECK(!attached_to_shared_isolate_);
DCHECK(!v8_flags.shared_space);
shared_isolate_ = shared_isolate;
owns_shareable_data_ = false;
}
// Returns true when this isolate supports allocation in shared spaces.
bool has_shared_heap() const {
return shared_isolate() || shared_space_isolate();
}
// Returns the isolate that owns the shared spaces.
Isolate* shared_heap_isolate() {
DCHECK(has_shared_heap());
Isolate* isolate =
shared_isolate() ? shared_isolate() : shared_space_isolate();
DCHECK_NOT_NULL(isolate);
return isolate;
}
GlobalSafepoint* global_safepoint() const { return global_safepoint_.get(); }
bool owns_shareable_data() { return owns_shareable_data_; }
......@@ -2009,7 +2024,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// TODO(pthier): Unify with owns_shareable_data() once the flag
// --shared-string-table is removed.
bool OwnsStringTables() {
return !v8_flags.shared_string_table || is_shared();
return !v8_flags.shared_string_table || is_shared() ||
is_shared_space_isolate();
}
#if USE_SIMULATOR
......
......@@ -12,7 +12,8 @@ namespace internal {
// TODO(v8:12547): Currently the shared isolate owns all the conveyors. Change
// the owner to the main isolate once the shared isolate is removed.
SharedObjectConveyorHandles::SharedObjectConveyorHandles(Isolate* isolate)
: persistent_handles_(isolate->shared_isolate()->NewPersistentHandles()) {}
: persistent_handles_(
isolate->shared_heap_isolate()->NewPersistentHandles()) {}
uint32_t SharedObjectConveyorHandles::Persist(HeapObject shared_object) {
DCHECK(shared_object.IsShared());
......
......@@ -2036,7 +2036,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
DisallowGarbageCollection no_gc;
Heap* roots = allocation_type == AllocationType::kMap
? isolate()->heap()
: isolate()->shared_isolate()->heap();
: isolate()->shared_heap_isolate()->heap();
result.set_map_after_allocation(ReadOnlyRoots(roots).meta_map(),
SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
......
......@@ -122,7 +122,7 @@ AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
// We need always_allocate() to be true both on the client- and
// server-isolate. It is used in both code paths.
AlwaysAllocateScope shared_scope(
heap_->isolate()->shared_isolate()->heap());
heap_->isolate()->shared_heap_isolate()->heap());
AlwaysAllocateScope client_scope(heap_);
result = AllocateRaw(size, allocation, origin, alignment);
} else {
......
......@@ -120,7 +120,7 @@ void LocalHeap::SetUp() {
std::make_unique<ConcurrentAllocator>(this, heap_->code_space());
DCHECK_NULL(shared_old_space_allocator_);
if (heap_->isolate()->shared_isolate()) {
if (heap_->isolate()->has_shared_heap()) {
shared_old_space_allocator_ =
std::make_unique<ConcurrentAllocator>(this, heap_->shared_old_space());
}
......
......@@ -4109,7 +4109,7 @@ void MarkCompactCollector::EvacuateEpilogue() {
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
if (v8_flags.shared_string_table && heap->isolate()->shared_isolate()) {
if (v8_flags.shared_string_table && heap->isolate()->has_shared_heap()) {
return new ConcurrentAllocator(nullptr, heap->shared_old_space());
}
......@@ -4876,7 +4876,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
: heap_(heap),
marking_state_(marking_state),
chunk_(chunk),
updating_mode_(updating_mode) {}
updating_mode_(updating_mode),
record_old_to_shared_slots_(
heap->isolate()->has_shared_heap() &&
!heap->isolate()->is_shared_space_isolate()) {}
~RememberedSetUpdatingItem() override = default;
void Process() override {
......@@ -4990,7 +4993,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
void UpdateUntypedPointers() {
const bool has_shared_isolate = this->heap_->isolate()->shared_isolate();
const PtrComprCageBase cage_base = heap_->isolate();
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
// Marking bits are cleared already when the page is already swept. This
......@@ -5005,12 +5007,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
InvalidatedSlotsFilter::OldToNew(chunk_, liveness_check);
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, &filter, has_shared_isolate, cage_base](MaybeObjectSlot slot) {
[this, &filter, cage_base](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
SlotCallbackResult result = CheckAndUpdateOldToNewSlot(slot);
// A new space string might have been promoted into the shared heap
// during GC.
if (has_shared_isolate) {
if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
}
return result;
......@@ -5036,12 +5038,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[this, has_shared_isolate, &filter, cage_base](MaybeObjectSlot slot) {
[this, &filter, cage_base](MaybeObjectSlot slot) {
if (filter.IsValid(slot.address())) {
UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
// A string might have been promoted into the shared heap during
// GC.
if (has_shared_isolate) {
if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
}
}
......@@ -5106,7 +5108,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
void UpdateTypedPointers() {
const bool has_shared_isolate = heap_->isolate()->shared_isolate();
if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
......@@ -5115,14 +5116,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return CheckAndUpdateOldToNewSlot(slot);
};
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_,
[this, has_shared_isolate, &check_and_update_old_to_new_slot_fn](
SlotType slot_type, Address slot) {
chunk_, [this, &check_and_update_old_to_new_slot_fn](
SlotType slot_type, Address slot) {
SlotCallbackResult result = UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
// A new space string might have been promoted into the shared heap
// during GC.
if (has_shared_isolate) {
if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedTyped(chunk_, slot_type, slot);
}
return result;
......@@ -5133,7 +5133,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr)) {
CHECK_NE(chunk_->owner(), heap_->map_space());
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk_, [this, has_shared_isolate](SlotType slot_type, Address slot) {
chunk_, [this](SlotType slot_type, Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
PtrComprCageBase cage_base = heap_->isolate();
......@@ -5145,7 +5145,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return KEEP_SLOT;
});
// A string might have been promoted into the shared heap during GC.
if (has_shared_isolate) {
if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedTyped(chunk_, slot_type, slot);
}
return result;
......@@ -5158,6 +5158,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
MarkingState* marking_state_;
MemoryChunk* chunk_;
RememberedSetUpdatingMode updating_mode_;
const bool record_old_to_shared_slots_;
};
std::unique_ptr<UpdatingItem>
......
......@@ -598,7 +598,7 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
if (v8_flags.shared_string_table && heap->isolate()->shared_isolate()) {
if (v8_flags.shared_string_table && heap->isolate()->has_shared_heap()) {
return new ConcurrentAllocator(nullptr, heap->shared_old_space());
}
return nullptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment