Commit a20499e0 authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[ext-code-space] Prepare GC for making code space external

... by explicitly passing pointer compression cage base value to various
IsXXX() and map() calls in order to avoid using incorrect auto-computed
cage base value when applied to objects allocated in external code space.

This CL also introduces IsCodeObject(HeapObject) predicate which checks
the IS_EXECUTABLE bit in the page header's flags.

Bug: v8:11880
Change-Id: Ib44398c3125392e46e939044a9bd27e09d7944d5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3229368Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77459}
parent bd4ea1e1
......@@ -762,15 +762,13 @@ struct SlotTraits {
using TMaybeObjectSlot = CompressedMaybeObjectSlot;
using THeapObjectSlot = CompressedHeapObjectSlot;
using TOffHeapObjectSlot = OffHeapCompressedObjectSlot;
// TODO(v8:11880): switch to OffHeapCompressedObjectSlot.
using TCodeObjectSlot = CompressedObjectSlot;
using TCodeObjectSlot = OffHeapCompressedObjectSlot;
#else
using TObjectSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
using TOffHeapObjectSlot = OffHeapFullObjectSlot;
// TODO(v8:11880): switch to OffHeapFullObjectSlot.
using TCodeObjectSlot = FullObjectSlot;
using TCodeObjectSlot = OffHeapFullObjectSlot;
#endif
};
......
......@@ -124,7 +124,8 @@ void Object::ObjectVerify(Isolate* isolate) {
} else {
HeapObject::cast(*this).HeapObjectVerify(isolate);
}
CHECK(!IsConstructor() || IsCallable());
PtrComprCageBase cage_base(isolate);
CHECK(!IsConstructor(cage_base) || IsCallable(cage_base));
}
void Object::VerifyPointer(Isolate* isolate, Object p) {
......@@ -322,7 +323,8 @@ void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
void HeapObject::VerifyCodePointer(Isolate* isolate, Object p) {
CHECK(p.IsHeapObject());
CHECK(IsValidCodeObject(isolate->heap(), HeapObject::cast(p)));
CHECK(HeapObject::cast(p).IsCode());
PtrComprCageBase cage_base(isolate);
CHECK(HeapObject::cast(p).IsCode(cage_base));
}
void Symbol::SymbolVerify(Isolate* isolate) {
......
......@@ -14,12 +14,16 @@ namespace v8 {
namespace internal {
// Verify write barrier offsets match the the real offsets.
STATIC_ASSERT(BasicMemoryChunk::Flag::IS_EXECUTABLE ==
heap_internals::MemoryChunk::kIsExecutableBit);
STATIC_ASSERT(BasicMemoryChunk::Flag::INCREMENTAL_MARKING ==
heap_internals::MemoryChunk::kMarkingBit);
STATIC_ASSERT(BasicMemoryChunk::Flag::FROM_PAGE ==
heap_internals::MemoryChunk::kFromPageBit);
STATIC_ASSERT(BasicMemoryChunk::Flag::TO_PAGE ==
heap_internals::MemoryChunk::kToPageBit);
STATIC_ASSERT(BasicMemoryChunk::Flag::READ_ONLY_HEAP ==
heap_internals::MemoryChunk::kReadOnlySpaceBit);
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
heap_internals::MemoryChunk::kFlagsOffset);
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
......
......@@ -42,11 +42,14 @@ V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
V8_WARN_UNUSED_RESULT inline bool IsValidCodeObject(Heap* heap,
HeapObject object) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return third_party_heap::Heap::IsValidCodeObject(object);
}
return heap->ContainsCode(object);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
return heap->ContainsCode(object);
} else {
return ReadOnlyHeap::Contains(object) || heap->ContainsCode(object);
}
}
} // namespace internal
......
......@@ -230,7 +230,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
data_container->SetCodeAndEntryPoint(isolate_, raw_code);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) raw_code.ObjectVerify(isolate_);
if (FLAG_verify_heap) HeapObject::VerifyCodePointer(isolate_, raw_code);
#endif
// Flush the instruction cache before changing the permissions.
......
......@@ -44,6 +44,7 @@ namespace heap_internals {
struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
static constexpr uintptr_t kIsExecutableBit = uintptr_t{1} << 0;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
......@@ -78,6 +79,8 @@ struct MemoryChunk {
V8_INLINE bool InReadOnlySpace() const {
return GetFlags() & kReadOnlySpaceBit;
}
V8_INLINE bool InCodeSpace() const { return GetFlags() & kIsExecutableBit; }
};
inline void GenerationalBarrierInternal(HeapObject object, Address slot,
......@@ -204,6 +207,12 @@ inline bool IsReadOnlyHeapObject(HeapObject object) {
return chunk->InReadOnlySpace();
}
inline bool IsCodeObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->InCodeSpace();
}
base::Optional<Heap*> WriteBarrier::GetHeapIfMarking(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return {};
heap_internals::MemoryChunk* chunk =
......
......@@ -9,7 +9,6 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/maybe-object.h"
#include "src/objects/slots-inl.h"
#include "src/objects/slots.h"
namespace v8 {
namespace internal {
......
......@@ -345,7 +345,7 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
if (!object.IsFreeSpaceOrFiller()) object.Iterate(this);
if (!object.IsFreeSpaceOrFiller(cage_base())) object.Iterate(this);
current += object.Size();
}
}
......@@ -1945,21 +1945,22 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
size_t objects_processed = 0;
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
PtrComprCageBase cage_base(isolate);
while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
if (object.IsFreeSpaceOrFiller()) {
if (object.IsFreeSpaceOrFiller(cage_base)) {
// Due to copying mark bits and the fact that grey and black have their
// first bit set, one word fillers are always black.
DCHECK_IMPLIES(
object.map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlack(object));
DCHECK_IMPLIES(object.map(cage_base) ==
ReadOnlyRoots(isolate).one_pointer_filler_map(),
marking_state()->IsBlack(object));
// Other fillers may be black or grey depending on the color of the object
// that was trimmed.
DCHECK_IMPLIES(
object.map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlackOrGrey(object));
DCHECK_IMPLIES(object.map(cage_base) !=
ReadOnlyRoots(isolate).one_pointer_filler_map(),
marking_state()->IsBlackOrGrey(object));
continue;
}
DCHECK(object.IsHeapObject());
......@@ -1969,7 +1970,7 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
Map map = object.map(isolate);
Map map = object.map(cage_base);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
......@@ -4060,7 +4061,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
slot.address() - CodeDataContainer::kCodeOffset);
DCHECK(host.IsCodeDataContainer(cage_base));
return UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(
host, cage_base, code_cage_base, CodeObjectSlot(slot));
host, cage_base, code_cage_base,
CodeObjectSlot(slot.address()));
},
SlotSet::FREE_EMPTY_BUCKETS);
chunk_->ReleaseSlotSet<OLD_TO_CODE>();
......@@ -5344,9 +5346,10 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
void MinorMarkCompactCollector::DrainMarkingWorklist() {
MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
PtrComprCageBase cage_base(isolate());
HeapObject object;
while (marking_worklist.Pop(&object)) {
DCHECK(!object.IsFreeSpaceOrFiller());
DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(non_atomic_marking_state()->IsGrey(object));
......@@ -5357,6 +5360,7 @@ void MinorMarkCompactCollector::DrainMarkingWorklist() {
void MinorMarkCompactCollector::TraceFragmentation() {
NewSpace* new_space = heap()->new_space();
PtrComprCageBase cage_base(isolate());
const std::array<size_t, 4> free_size_class_limits = {0, 1024, 2048, 4096};
size_t free_bytes_of_class[free_size_class_limits.size()] = {0};
size_t live_bytes = 0;
......@@ -5378,7 +5382,7 @@ void MinorMarkCompactCollector::TraceFragmentation() {
free_bytes_index++;
}
}
Map map = object.map(kAcquireLoad);
Map map = object.map(cage_base, kAcquireLoad);
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
......
......@@ -32,8 +32,8 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) {
if (!obj.IsFreeSpaceOrFiller(cage_base())) {
if (obj.IsCode(cage_base())) {
DCHECK_EQ(space_->identity(), CODE_SPACE);
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
......
......@@ -31,7 +31,12 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {
current_page_(page_range_.begin())
#if V8_COMPRESS_POINTERS
,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
space_->MakeLinearAllocationAreaIterable();
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
......@@ -43,7 +48,12 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
cur_end_(kNullAddress),
space_(space),
page_range_(page),
current_page_(page_range_.begin()) {
current_page_(page_range_.begin())
#if V8_COMPRESS_POINTERS
,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
space_->MakeLinearAllocationAreaIterable();
heap->mark_compact_collector()->EnsureSweepingCompleted();
#ifdef DEBUG
......
......@@ -47,6 +47,16 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
// Returns nullptr when the iteration has ended.
inline HeapObject Next() override;
// The pointer compression cage base value used for decompression of all
// tagged values except references to Code objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
#else
return PtrComprCageBase{};
#endif // V8_COMPRESS_POINTERS
}
private:
// Fast (inlined) path of next().
inline HeapObject FromCurrentPage();
......@@ -60,6 +70,9 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
PagedSpace* space_;
PageRange page_range_;
PageRange::iterator current_page_;
#if V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
#endif // V8_COMPRESS_POINTERS
};
class V8_EXPORT_PRIVATE PagedSpace
......
......@@ -130,11 +130,12 @@ size_t Page::AvailableInFreeList() {
namespace {
// Skips filler starting from the given filler until the end address.
// Returns the first address after the skipped fillers.
Address SkipFillers(HeapObject filler, Address end) {
Address SkipFillers(PtrComprCageBase cage_base, HeapObject filler,
Address end) {
Address addr = filler.address();
while (addr < end) {
filler = HeapObject::FromAddress(addr);
CHECK(filler.IsFreeSpaceOrFiller());
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
addr = filler.address() + filler.Size();
}
return addr;
......@@ -152,9 +153,10 @@ size_t Page::ShrinkToHighWaterMark() {
// or the area_end.
HeapObject filler = HeapObject::FromAddress(HighWaterMark());
if (filler.address() == area_end()) return 0;
CHECK(filler.IsFreeSpaceOrFiller());
PtrComprCageBase cage_base(heap()->isolate());
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
// Ensure that no objects were allocated in [filler, area_end) region.
DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
DCHECK_EQ(area_end(), SkipFillers(cage_base, filler, area_end()));
// Ensure that no objects will be allocated on this page.
DCHECK_EQ(0u, AvailableInFreeList());
......@@ -181,7 +183,7 @@ size_t Page::ShrinkToHighWaterMark() {
heap()->memory_allocator()->PartialFreeMemory(
this, address() + size() - unused, unused, area_end() - unused);
if (filler.address() != area_end()) {
CHECK(filler.IsFreeSpaceOrFiller());
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
CHECK_EQ(filler.address() + filler.Size(), area_end());
}
}
......
......@@ -9,7 +9,6 @@
#include "src/common/ptr-compr-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/maybe-object-inl.h"
namespace v8 {
......@@ -159,7 +158,7 @@ void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
Tagged_t value = *location();
DCHECK_EQ(value & kHeapObjectTagMask, kHeapObjectTag);
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value));
return HeapObject::cast(Object(DecompressTaggedPointer(address(), value)));
}
......
......@@ -132,6 +132,7 @@ class OffHeapCompressedObjectSlot
static constexpr bool kCanBeWeak = false;
OffHeapCompressedObjectSlot() : SlotBase(kNullAddress) {}
explicit OffHeapCompressedObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit OffHeapCompressedObjectSlot(const uint32_t* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
......
......@@ -743,7 +743,24 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots(PtrComprCageBase cage_base) const {
#endif
}
DEF_GETTER(HeapObject, map, Map) {
Map HeapObject::map() const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// TODO(v8:11880): Ensure that cage friendly version is used for the cases
// when this could be a Code object. Replace this with
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
Isolate* isolate;
if (GetIsolateFromHeapObject(*this, &isolate)) {
PtrComprCageBase cage_base(isolate);
return HeapObject::map(cage_base);
}
// If the Isolate can't be obtained then the heap object is a read-only
// one and therefore not a Code object, so fallback to auto-computing cage
// base value.
}
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map(cage_base);
}
Map HeapObject::map(PtrComprCageBase cage_base) const {
return map_word(cage_base, kRelaxedLoad).ToMap();
}
......@@ -819,7 +836,25 @@ ObjectSlot HeapObject::map_slot() const {
return ObjectSlot(MapField::address(*this));
}
DEF_RELAXED_GETTER(HeapObject, map_word, MapWord) {
MapWord HeapObject::map_word(RelaxedLoadTag tag) const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// TODO(v8:11880): Ensure that cage friendly version is used for the cases
// when this could be a Code object. Replace this with
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
Isolate* isolate;
if (GetIsolateFromHeapObject(*this, &isolate)) {
PtrComprCageBase cage_base(isolate);
return HeapObject::map_word(cage_base, tag);
}
// If the Isolate can't be obtained then the heap object is a read-only
// one and therefore not a Code object, so fallback to auto-computing cage
// base value.
}
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
}
MapWord HeapObject::map_word(PtrComprCageBase cage_base,
RelaxedLoadTag tag) const {
return MapField::Relaxed_Load_Map_Word(cage_base, *this);
}
......@@ -827,7 +862,15 @@ void HeapObject::set_map_word(MapWord map_word, RelaxedStoreTag) {
MapField::Relaxed_Store_Map_Word(*this, map_word);
}
DEF_ACQUIRE_GETTER(HeapObject, map_word, MapWord) {
MapWord HeapObject::map_word(AcquireLoadTag tag) const {
// This method is never used for Code objects and thus it is fine to use
// auto-computed cage base value.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
}
MapWord HeapObject::map_word(PtrComprCageBase cage_base,
AcquireLoadTag tag) const {
return MapField::Acquire_Load_No_Unpack(cage_base, *this);
}
......
......@@ -144,8 +144,9 @@ void FullHeapObjectSlot::store(HeapObjectReference value) const {
}
HeapObject FullHeapObjectSlot::ToHeapObject() const {
DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag);
return HeapObject::cast(Object(*location()));
TData value = *location();
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value));
return HeapObject::cast(Object(value));
}
void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
......
......@@ -269,6 +269,7 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
class OffHeapFullObjectSlot : public FullObjectSlot {
public:
OffHeapFullObjectSlot() : FullObjectSlot() {}
explicit OffHeapFullObjectSlot(Address ptr) : FullObjectSlot(ptr) {}
explicit OffHeapFullObjectSlot(const Address* ptr) : FullObjectSlot(ptr) {}
inline Object operator*() const = delete;
......
......@@ -769,7 +769,8 @@ class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
private:
template <typename TSlot>
V8_INLINE void VisitSlotImpl(PtrComprCageBase cage_base, TSlot slot) {
int field_index = static_cast<int>(MaybeObjectSlot(slot) - parent_start_);
int field_index =
static_cast<int>(MaybeObjectSlot(slot.address()) - parent_start_);
if (generator_->visited_fields_[field_index]) {
generator_->visited_fields_[field_index] = false;
} else {
......@@ -1665,8 +1666,9 @@ bool V8HeapExplorer::IterateAndExtractReferences(
}
bool V8HeapExplorer::IsEssentialObject(Object object) {
ReadOnlyRoots roots(heap_);
return object.IsHeapObject() && !object.IsOddball() &&
Isolate* isolate = heap_->isolate();
ReadOnlyRoots roots(isolate);
return object.IsHeapObject() && !object.IsOddball(isolate) &&
object != roots.empty_byte_array() &&
object != roots.empty_fixed_array() &&
object != roots.empty_weak_fixed_array() &&
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment