heap-page.h 8.79 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_CPPGC_HEAP_PAGE_H_
#define V8_HEAP_CPPGC_HEAP_PAGE_H_

8
#include "src/base/iterator.h"
9
#include "src/base/macros.h"
10
#include "src/heap/cppgc/globals.h"
11
#include "src/heap/cppgc/heap-object-header.h"
12
#include "src/heap/cppgc/object-start-bitmap.h"
13 14 15 16

namespace cppgc {
namespace internal {

17 18 19
class BaseSpace;
class NormalPageSpace;
class LargePageSpace;
20
class HeapBase;
21 22
class PageBackend;

23 24
class V8_EXPORT_PRIVATE BasePage {
 public:
25 26
  static inline BasePage* FromPayload(void*);
  static inline const BasePage* FromPayload(const void*);
27

28 29
  static BasePage* FromInnerAddress(const HeapBase*, void*);
  static const BasePage* FromInnerAddress(const HeapBase*, const void*);
30

31 32
  static void Destroy(BasePage*);

33 34 35
  BasePage(const BasePage&) = delete;
  BasePage& operator=(const BasePage&) = delete;

36
  HeapBase* heap() const { return heap_; }
37 38 39 40 41 42

  BaseSpace* space() { return space_; }
  const BaseSpace* space() const { return space_; }
  void set_space(BaseSpace* space) { space_ = space; }

  bool is_large() const { return type_ == PageType::kLarge; }
43

44 45 46 47 48
  Address PayloadStart();
  ConstAddress PayloadStart() const;
  Address PayloadEnd();
  ConstAddress PayloadEnd() const;

49
  // |address| must refer to real object.
50
  template <AccessMode = AccessMode::kNonAtomic>
51
  HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
52
  template <AccessMode = AccessMode::kNonAtomic>
53 54 55 56 57 58 59 60 61
  const HeapObjectHeader& ObjectHeaderFromInnerAddress(
      const void* address) const;

  // |address| is guaranteed to point into the page but not payload. Returns
  // nullptr when pointing into free list entries and the valid header
  // otherwise.
  HeapObjectHeader* TryObjectHeaderFromInnerAddress(void* address) const;
  const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
      const void* address) const;
62

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
  // SynchronizedLoad and SynchronizedStore are used to sync pages after they
  // are allocated. std::atomic_thread_fence is sufficient in practice but is
  // not recognized by tsan. Atomic load and store of the |type_| field are
  // added for tsan builds.
  void SynchronizedLoad() const {
#if defined(THREAD_SANITIZER)
    v8::base::AsAtomicPtr(&type_)->load(std::memory_order_acquire);
#endif
  }
  void SynchronizedStore() {
    std::atomic_thread_fence(std::memory_order_seq_cst);
#if defined(THREAD_SANITIZER)
    v8::base::AsAtomicPtr(&type_)->store(type_, std::memory_order_release);
#endif
  }

79
 protected:
80
  enum class PageType : uint8_t { kNormal, kLarge };
81
  BasePage(HeapBase*, BaseSpace*, PageType);
82

83
 private:
84
  HeapBase* heap_;
85 86
  BaseSpace* space_;
  PageType type_;
87 88 89
};

class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
90 91 92
  template <typename T>
  class IteratorImpl : v8::base::iterator<std::forward_iterator_tag, T> {
   public:
93 94 95 96 97 98 99 100 101
    explicit IteratorImpl(T* p, ConstAddress lab_start = nullptr,
                          size_t lab_size = 0)
        : p_(p), lab_start_(lab_start), lab_size_(lab_size) {
      DCHECK(p);
      DCHECK_EQ(0, (lab_size & (sizeof(T) - 1)));
      if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
        p_ += (lab_size_ / sizeof(T));
      }
    }
102 103 104 105 106 107 108 109

    T& operator*() { return *p_; }
    const T& operator*() const { return *p_; }

    bool operator==(IteratorImpl other) const { return p_ == other.p_; }
    bool operator!=(IteratorImpl other) const { return !(*this == other); }

    IteratorImpl& operator++() {
110 111 112 113 114 115
      const size_t size = p_->GetSize();
      DCHECK_EQ(0, (size & (sizeof(T) - 1)));
      p_ += (size / sizeof(T));
      if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
        p_ += (lab_size_ / sizeof(T));
      }
116 117 118 119
      return *this;
    }
    IteratorImpl operator++(int) {
      IteratorImpl temp(*this);
120
      ++(*this);
121 122 123
      return temp;
    }

124
    T* base() const { return p_; }
125 126 127

   private:
    T* p_;
128 129
    ConstAddress lab_start_;
    size_t lab_size_;
130 131
  };

132
 public:
133 134 135
  using iterator = IteratorImpl<HeapObjectHeader>;
  using const_iterator = IteratorImpl<const HeapObjectHeader>;

136 137
  // Allocates a new page in the detached state.
  static NormalPage* Create(PageBackend*, NormalPageSpace*);
138 139
  // Destroys and frees the page. The page must be detached from the
  // corresponding space (i.e. be swept when called).
140 141
  static void Destroy(NormalPage*);

142 143 144
  static NormalPage* From(BasePage* page) {
    DCHECK(!page->is_large());
    return static_cast<NormalPage*>(page);
145
  }
146 147
  static const NormalPage* From(const BasePage* page) {
    return From(const_cast<BasePage*>(page));
148
  }
149 150 151 152

  iterator begin();
  const_iterator begin() const;

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
  iterator end() {
    return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadEnd()));
  }
  const_iterator end() const {
    return const_iterator(
        reinterpret_cast<const HeapObjectHeader*>(PayloadEnd()));
  }

  Address PayloadStart();
  ConstAddress PayloadStart() const;
  Address PayloadEnd();
  ConstAddress PayloadEnd() const;

  static size_t PayloadSize();

168 169 170 171
  bool PayloadContains(ConstAddress address) const {
    return (PayloadStart() <= address) && (address < PayloadEnd());
  }

172 173 174 175
  PlatformAwareObjectStartBitmap& object_start_bitmap() {
    return object_start_bitmap_;
  }
  const PlatformAwareObjectStartBitmap& object_start_bitmap() const {
176 177 178
    return object_start_bitmap_;
  }

179
 private:
180
  NormalPage(HeapBase* heap, BaseSpace* space);
181
  ~NormalPage();
182

183
  PlatformAwareObjectStartBitmap object_start_bitmap_;
184 185 186 187
};

class V8_EXPORT_PRIVATE LargePage final : public BasePage {
 public:
188 189
  // Returns the allocation size required for a payload of size |size|.
  static size_t AllocationSize(size_t size);
190 191
  // Allocates a new page in the detached state.
  static LargePage* Create(PageBackend*, LargePageSpace*, size_t);
192 193 194 195
  // Destroys and frees the page. The page must be detached from the
  // corresponding space (i.e. be swept when called).
  static void Destroy(LargePage*);

196 197 198 199 200 201 202 203
  static LargePage* From(BasePage* page) {
    DCHECK(page->is_large());
    return static_cast<LargePage*>(page);
  }
  static const LargePage* From(const BasePage* page) {
    return From(const_cast<BasePage*>(page));
  }

204 205 206 207 208 209 210 211 212
  HeapObjectHeader* ObjectHeader();
  const HeapObjectHeader* ObjectHeader() const;

  Address PayloadStart();
  ConstAddress PayloadStart() const;
  Address PayloadEnd();
  ConstAddress PayloadEnd() const;

  size_t PayloadSize() const { return payload_size_; }
213 214 215 216
  size_t ObjectSize() const {
    DCHECK_GT(payload_size_, sizeof(HeapObjectHeader));
    return payload_size_ - sizeof(HeapObjectHeader);
  }
217

218 219 220 221
  bool PayloadContains(ConstAddress address) const {
    return (PayloadStart() <= address) && (address < PayloadEnd());
  }

222
 private:
223
  LargePage(HeapBase* heap, BaseSpace* space, size_t);
224
  ~LargePage();
225

226
  size_t payload_size_;
227 228
};

229 230 231 232 233 234 235 236 237 238 239 240 241 242
// static
BasePage* BasePage::FromPayload(void* payload) {
  return reinterpret_cast<BasePage*>(
      (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
}

// static
const BasePage* BasePage::FromPayload(const void* payload) {
  return reinterpret_cast<const BasePage*>(
      (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
       kPageBaseMask) +
      kGuardPageSize);
}

243
template <AccessMode mode = AccessMode::kNonAtomic>
244 245 246 247 248 249 250 251 252
const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
                                                         const void* address) {
  if (page->is_large()) {
    return LargePage::From(page)->ObjectHeader();
  }
  const PlatformAwareObjectStartBitmap& bitmap =
      NormalPage::From(page)->object_start_bitmap();
  const HeapObjectHeader* header =
      bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
253 254
  DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
                         header->GetSize<AccessMode::kAtomic>());
255 256 257
  return header;
}

258
template <AccessMode mode>
259 260 261 262 263
HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
  return const_cast<HeapObjectHeader&>(
      ObjectHeaderFromInnerAddress<mode>(const_cast<const void*>(address)));
}

264
template <AccessMode mode>
265 266
const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
    const void* address) const {
267 268 269 270 271 272 273
  // This method might be called for |address| found via a Trace method of
  // another object. If |address| is on a newly allocated page , there will
  // be no sync between the page allocation and a concurrent marking thread,
  // resulting in a race with page initialization (specifically with writing
  // the page |type_| field). This can occur when tracing a Member holding a
  // reference to a mixin type
  SynchronizedLoad();
274 275 276 277 278 279
  const HeapObjectHeader* header =
      ObjectHeaderFromInnerAddressImpl<mode>(this, address);
  DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
  return *header;
}

280 281 282 283
}  // namespace internal
}  // namespace cppgc

#endif  // V8_HEAP_CPPGC_HEAP_PAGE_H_