heap-inl.h 22.7 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_HEAP_INL_H_
#define V8_HEAP_HEAP_INL_H_
7

8 9
#include <cmath>

10
// Clients of this interface shouldn't depend on lots of heap internals.
11 12 13
// Do not include anything from src/heap other than src/heap/heap.h and its
// write barrier here!
#include "src/heap/heap-write-barrier.h"
14 15
#include "src/heap/heap.h"

16
#include "src/base/atomic-utils.h"
17
#include "src/base/platform/platform.h"
18
#include "src/objects/feedback-vector.h"
19

20
// TODO(mstarzinger): There is one more include to remove in order to no longer
21
// leak heap internals to users of this interface!
22 23
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
24
#include "src/heap/spaces-inl.h"
25
#include "src/objects/allocation-site-inl.h"
26
#include "src/objects/api-callbacks-inl.h"
27
#include "src/objects/cell-inl.h"
28
#include "src/objects/descriptor-array.h"
29
#include "src/objects/feedback-cell-inl.h"
30
#include "src/objects/literal-objects-inl.h"
31
#include "src/objects/objects-inl.h"
32
#include "src/objects/oddball.h"
33
#include "src/objects/property-cell.h"
34
#include "src/objects/scope-info.h"
35
#include "src/objects/script-inl.h"
36
#include "src/objects/slots-inl.h"
37
#include "src/objects/struct-inl.h"
38
#include "src/profiler/heap-profiler.h"
39
#include "src/sanitizer/msan.h"
40
#include "src/strings/string-hasher.h"
41
#include "src/zone/zone-list-inl.h"
42

43 44
namespace v8 {
namespace internal {
45

46 47
AllocationSpace AllocationResult::RetrySpace() {
  DCHECK(IsRetry());
jgruber's avatar
jgruber committed
48
  return static_cast<AllocationSpace>(Smi::ToInt(object_));
49 50
}

51
HeapObject AllocationResult::ToObjectChecked() {
52 53 54 55
  CHECK(!IsRetry());
  return HeapObject::cast(object_);
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69
Isolate* Heap::isolate() {
  return reinterpret_cast<Isolate*>(
      reinterpret_cast<intptr_t>(this) -
      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
}

int64_t Heap::external_memory() {
  return isolate()->isolate_data()->external_memory_;
}

void Heap::update_external_memory(int64_t delta) {
  isolate()->isolate_data()->external_memory_ += delta;
}

70
void Heap::update_external_memory_concurrently_freed(uintptr_t freed) {
71 72 73 74 75 76 77 78 79 80 81
  external_memory_concurrently_freed_ += freed;
}

void Heap::account_external_memory_concurrently_freed() {
  isolate()->isolate_data()->external_memory_ -=
      external_memory_concurrently_freed_;
  external_memory_concurrently_freed_ = 0;
}

RootsTable& Heap::roots_table() { return isolate()->roots_table(); }

82 83 84
#define ROOT_ACCESSOR(Type, name, CamelName)                           \
  Type Heap::name() {                                                  \
    return Type::cast(Object(roots_table()[RootIndex::k##CamelName])); \
85
  }
86
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
87
#undef ROOT_ACCESSOR
88

89
#define ROOT_ACCESSOR(type, name, CamelName)                                   \
90
  void Heap::set_##name(type value) {                                          \
91 92 93 94 95 96
    /* The deserializer makes use of the fact that these common roots are */   \
    /* never in new space and never on a page that is being compacted.    */   \
    DCHECK_IMPLIES(deserialization_complete(),                                 \
                   !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
    DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName),   \
                   IsImmovable(HeapObject::cast(value)));                      \
97
    roots_table()[RootIndex::k##CamelName] = value.ptr();                      \
98 99 100 101
  }
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR

102
void Heap::SetRootMaterializedObjects(FixedArray objects) {
103
  roots_table()[RootIndex::kMaterializedObjects] = objects.ptr();
104 105
}

106
void Heap::SetRootScriptList(Object value) {
107
  roots_table()[RootIndex::kScriptList] = value.ptr();
108 109
}

110
void Heap::SetRootStringTable(StringTable value) {
111
  roots_table()[RootIndex::kStringTable] = value.ptr();
112 113
}

114
void Heap::SetMessageListeners(TemplateList value) {
115
  roots_table()[RootIndex::kMessageListeners] = value.ptr();
116 117
}

118
void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
119 120
  DCHECK(hash_table.IsObjectHashTable() || hash_table.IsUndefined(isolate()));
  roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table.ptr();
121 122
}

mlippautz's avatar
mlippautz committed
123
PagedSpace* Heap::paged_space(int idx) {
124 125
  DCHECK_NE(idx, LO_SPACE);
  DCHECK_NE(idx, NEW_SPACE);
126 127
  DCHECK_NE(idx, CODE_LO_SPACE);
  DCHECK_NE(idx, NEW_LO_SPACE);
128
  return static_cast<PagedSpace*>(space_[idx]);
mlippautz's avatar
mlippautz committed
129 130
}

131
Space* Heap::space(int idx) { return space_[idx]; }
mlippautz's avatar
mlippautz committed
132 133

Address* Heap::NewSpaceAllocationTopAddress() {
134
  return new_space_->allocation_top_address();
mlippautz's avatar
mlippautz committed
135 136 137
}

Address* Heap::NewSpaceAllocationLimitAddress() {
138
  return new_space_->allocation_limit_address();
mlippautz's avatar
mlippautz committed
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
}

Address* Heap::OldSpaceAllocationTopAddress() {
  return old_space_->allocation_top_address();
}

Address* Heap::OldSpaceAllocationLimitAddress() {
  return old_space_->allocation_limit_address();
}

void Heap::UpdateNewSpaceAllocationCounter() {
  new_space_allocation_counter_ = NewSpaceAllocationCounter();
}

size_t Heap::NewSpaceAllocationCounter() {
  return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
156

157
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
158
                                   AllocationOrigin origin,
159
                                   AllocationAlignment alignment) {
160 161
  DCHECK(AllowHandleAllocation::IsAllowed());
  DCHECK(AllowHeapAllocation::IsAllowed());
162
  DCHECK_EQ(gc_state_, NOT_IN_GC);
163
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
164 165
  if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
    if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
166
      return AllocationResult::Retry();
167
    }
168
  }
169 170
#endif
#ifdef DEBUG
171
  IncrementObjectCounters();
172
#endif
173

174 175
  bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;

176
  HeapObject object;
177
  AllocationResult allocation;
178

179 180 181
  if (FLAG_single_generation && type == AllocationType::kYoung)
    type = AllocationType::kOld;

182
  if (AllocationType::kYoung == type) {
183
    if (large_object) {
184 185 186 187 188 189 190 191 192
      if (FLAG_young_generation_large_objects) {
        allocation = new_lo_space_->AllocateRaw(size_in_bytes);
      } else {
        // If young generation large objects are disalbed we have to tenure the
        // allocation and violate the given allocation type. This could be
        // dangerous. We may want to remove FLAG_young_generation_large_objects
        // and avoid patching.
        allocation = lo_space_->AllocateRaw(size_in_bytes);
      }
193
    } else {
194
      allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
195
    }
196
  } else if (AllocationType::kOld == type) {
197
    if (large_object) {
198
      allocation = lo_space_->AllocateRaw(size_in_bytes);
199
    } else {
200
      allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
201
    }
202
  } else if (AllocationType::kCode == type) {
203
    if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
204
      allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
205
    } else {
206
      allocation = code_lo_space_->AllocateRaw(size_in_bytes);
207
    }
208
  } else if (AllocationType::kMap == type) {
209
    allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
210
  } else if (AllocationType::kReadOnly == type) {
211 212 213 214
#ifdef V8_USE_SNAPSHOT
    DCHECK(isolate_->serializer_enabled());
#endif
    DCHECK(!large_object);
215
    DCHECK(CanAllocateInReadOnlySpace());
216 217 218
    DCHECK_EQ(AllocationOrigin::kRuntime, origin);
    allocation =
        read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
219 220
  } else {
    UNREACHABLE();
221
  }
222

223
  if (allocation.To(&object)) {
224
    if (AllocationType::kCode == type) {
225 226 227
      // Unprotect the memory chunk of the object if it was not unprotected
      // already.
      UnprotectAndRegisterMemoryChunk(object);
228
      ZapCodeObject(object.address(), size_in_bytes);
229
      if (!large_object) {
230 231
        MemoryChunk::FromHeapObject(object)
            ->GetCodeObjectRegistry()
232
            ->RegisterNewlyAllocatedCodeObject(object.address());
233
      }
234
    }
235
    OnAllocationEvent(object, size_in_bytes);
236
  }
237

238
  return allocation;
239 240
}

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
template <Heap::AllocationRetryMode mode>
HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
                                 AllocationOrigin origin,
                                 AllocationAlignment alignment) {
  DCHECK(AllowHandleAllocation::IsAllowed());
  DCHECK(AllowHeapAllocation::IsAllowed());
  DCHECK_EQ(gc_state_, NOT_IN_GC);
  Heap* heap = isolate()->heap();
  Address* top = heap->NewSpaceAllocationTopAddress();
  Address* limit = heap->NewSpaceAllocationLimitAddress();
  if (allocation == AllocationType::kYoung &&
      alignment == AllocationAlignment::kWordAligned &&
      size < kMaxRegularHeapObjectSize &&
      (*limit - *top >= static_cast<unsigned>(size)) &&
      V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
                FLAG_gc_interval == 0)) {
    DCHECK(IsAligned(size, kTaggedSize));
    HeapObject obj = HeapObject::FromAddress(*top);
    *top += size;
    heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
    return obj;
  }
  switch (mode) {
    case kLightRetry:
      return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
                                               alignment);
    case kRetryOrFail:
      return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
                                                alignment);
  }
  UNREACHABLE();
}

275
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
276
  for (auto& tracker : allocation_trackers_) {
277
    tracker->AllocationEvent(object.address(), size_in_bytes);
278 279
  }

280
  if (FLAG_verify_predictable) {
281 282 283 284
    ++allocations_count_;
    // Advance synthetic time by making a time request.
    MonotonicallyIncreasingTimeInMs();

285 286 287
    UpdateAllocationsHash(object);
    UpdateAllocationsHash(size_in_bytes);

288
    if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
Clemens Hammacher's avatar
Clemens Hammacher committed
289
      PrintAllocationsHash();
290
    }
291 292 293 294
  } else if (FLAG_fuzzer_gc_analysis) {
    ++allocations_count_;
  } else if (FLAG_trace_allocation_stack_interval > 0) {
    ++allocations_count_;
295 296 297 298
    if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
      isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
    }
  }
299 300
}

301
bool Heap::CanAllocateInReadOnlySpace() {
302
  return read_only_space()->writable();
303 304
}

305
void Heap::UpdateAllocationsHash(HeapObject object) {
306
  Address object_address = object.address();
307
  MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
308
  AllocationSpace allocation_space = memory_chunk->owner_identity();
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

  STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
  uint32_t value =
      static_cast<uint32_t>(object_address - memory_chunk->address()) |
      (static_cast<uint32_t>(allocation_space) << kPageSizeBits);

  UpdateAllocationsHash(value);
}

void Heap::UpdateAllocationsHash(uint32_t value) {
  uint16_t c1 = static_cast<uint16_t>(value);
  uint16_t c2 = static_cast<uint16_t>(value >> 16);
  raw_allocations_hash_ =
      StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
  raw_allocations_hash_ =
      StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}

327
void Heap::RegisterExternalString(String string) {
328 329
  DCHECK(string.IsExternalString());
  DCHECK(!string.IsThinString());
330 331 332
  external_string_table_.AddString(string);
}

333
void Heap::FinalizeExternalString(String string) {
334
  DCHECK(string.IsExternalString());
335
  Page* page = Page::FromHeapObject(string);
336
  ExternalString ext_string = ExternalString::cast(string);
337 338 339

  page->DecrementExternalBackingStoreBytes(
      ExternalBackingStoreType::kExternalString,
340
      ext_string.ExternalPayloadSize());
341

342
  ext_string.DisposeResource();
343 344
}

345 346
Address Heap::NewSpaceTop() { return new_space_->top(); }

347 348
bool Heap::InYoungGeneration(Object object) {
  DCHECK(!HasWeakHeapObjectTag(object));
349
  return object.IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
}

// static
bool Heap::InYoungGeneration(MaybeObject object) {
  HeapObject heap_object;
  return object->GetHeapObject(&heap_object) && InYoungGeneration(heap_object);
}

// static
bool Heap::InYoungGeneration(HeapObject heap_object) {
  bool result = MemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
#ifdef DEBUG
  // If in the young generation, then check we're either not in the middle of
  // GC or the object is in to-space.
  if (result) {
    // If the object is in the young generation, then it's not in RO_SPACE so
    // this is safe.
    Heap* heap = Heap::FromWritableHeapObject(heap_object);
    DCHECK_IMPLIES(heap->gc_state_ == NOT_IN_GC, InToPage(heap_object));
369 370 371 372 373
  }
#endif
  return result;
}

374
// static
375
bool Heap::InFromPage(Object object) {
376
  DCHECK(!HasWeakHeapObjectTag(object));
377
  return object.IsHeapObject() && InFromPage(HeapObject::cast(object));
378 379
}

380
// static
381
bool Heap::InFromPage(MaybeObject object) {
382
  HeapObject heap_object;
383
  return object->GetHeapObject(&heap_object) && InFromPage(heap_object);
384 385
}

386
// static
387 388
bool Heap::InFromPage(HeapObject heap_object) {
  return MemoryChunk::FromHeapObject(heap_object)->IsFromPage();
389 390
}

391
// static
392
bool Heap::InToPage(Object object) {
393
  DCHECK(!HasWeakHeapObjectTag(object));
394
  return object.IsHeapObject() && InToPage(HeapObject::cast(object));
395 396
}

397
// static
398
bool Heap::InToPage(MaybeObject object) {
399
  HeapObject heap_object;
400
  return object->GetHeapObject(&heap_object) && InToPage(heap_object);
401 402
}

403
// static
404 405
bool Heap::InToPage(HeapObject heap_object) {
  return MemoryChunk::FromHeapObject(heap_object)->IsToPage();
406 407
}

408
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
409

410
// static
411
Heap* Heap::FromWritableHeapObject(HeapObject obj) {
412 413 414 415
  MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
  // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
  // find a heap. The exception is when the ReadOnlySpace is writeable, during
  // bootstrapping, so explicitly allow this case.
416
  SLOW_DCHECK(chunk->IsWritable());
417 418 419 420 421
  Heap* heap = chunk->heap();
  SLOW_DCHECK(heap != nullptr);
  return heap;
}

422
bool Heap::ShouldBePromoted(Address old_address) {
423 424 425 426
  Page* page = Page::FromAddress(old_address);
  Address age_mark = new_space_->age_mark();
  return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
         (!page->ContainsLimit(age_mark) || old_address < age_mark);
427 428
}

429
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
430
  DCHECK(IsAligned(byte_size, kTaggedSize));
431
  CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
432 433
}

434
template <Heap::FindMementoMode mode>
435
AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
436 437
  Address object_address = object.address();
  Address memento_address = object_address + object.SizeFromMap(map);
438
  Address last_memento_word_address = memento_address + kTaggedSize;
439
  // If the memento would be on another page, bail out immediately.
440
  if (!Page::OnSamePage(object_address, last_memento_word_address)) {
441
    return AllocationMemento();
442
  }
443
  HeapObject candidate = HeapObject::FromAddress(memento_address);
444
  ObjectSlot candidate_map_slot = candidate.map_slot();
445 446 447
  // This fast check may peek at an uninitialized word. However, the slow check
  // below (memento_address == top) ensures that this is safe. Mark the word as
  // initialized to silence MemorySanitizer warnings.
448 449 450
  MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
  if (!candidate_map_slot.contains_value(
          ReadOnlyRoots(this).allocation_memento_map().ptr())) {
451
    return AllocationMemento();
452
  }
453 454 455 456

  // Bail out if the memento is below the age mark, which can happen when
  // mementos survived because a page got moved within new space.
  Page* object_page = Page::FromAddress(object_address);
457 458 459 460
  if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
    Address age_mark =
        reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
    if (!object_page->Contains(age_mark)) {
461
      return AllocationMemento();
462 463 464
    }
    // Do an exact check in the case where the age mark is on the same page.
    if (object_address < age_mark) {
465
      return AllocationMemento();
466
    }
467 468
  }

469
  AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
470 471 472 473 474 475 476 477

  // Depending on what the memento is used for, we might need to perform
  // additional checks.
  Address top;
  switch (mode) {
    case Heap::kForGC:
      return memento_candidate;
    case Heap::kForRuntime:
478
      if (memento_candidate.is_null()) return AllocationMemento();
479 480 481 482 483 484
      // Either the object is the last object in the new space, or there is
      // another object of at least word size (the header map word) following
      // it, so suffices to compare ptr and top here.
      top = NewSpaceTop();
      DCHECK(memento_address == top ||
             memento_address + HeapObject::kHeaderSize <= top ||
485
             !Page::OnSamePage(memento_address, top - 1));
486
      if ((memento_address != top) && memento_candidate.IsValid()) {
487 488
        return memento_candidate;
      }
489
      return AllocationMemento();
490 491 492 493
    default:
      UNREACHABLE();
  }
  UNREACHABLE();
494 495
}

496
void Heap::UpdateAllocationSite(Map map, HeapObject object,
497 498
                                PretenuringFeedbackMap* pretenuring_feedback) {
  DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
499 500 501 502 503 504 505
#ifdef DEBUG
  MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
  DCHECK_IMPLIES(chunk->IsToPage(),
                 chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
  DCHECK_IMPLIES(!chunk->InYoungGeneration(),
                 chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
#endif
506
  if (!FLAG_allocation_site_pretenuring ||
507
      !AllocationSite::CanTrack(map.instance_type())) {
508
    return;
509 510
  }
  AllocationMemento memento_candidate =
511
      FindAllocationMemento<kForGC>(map, object);
512
  if (memento_candidate.is_null()) return;
513

514 515 516
  // Entering cached feedback is used in the parallel case. We are not allowed
  // to dereference the allocation site and rather have to postpone all checks
  // till actually merging the data.
517
  Address key = memento_candidate.GetAllocationSiteUnchecked();
518
  (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
519
}
520

521
void Heap::ExternalStringTable::AddString(String string) {
522
  DCHECK(string.IsExternalString());
523 524
  DCHECK(!Contains(string));

525
  if (InYoungGeneration(string)) {
526
    young_strings_.push_back(string);
527
  } else {
528
    old_strings_.push_back(string);
529 530 531
  }
}

532
Oddball Heap::ToBoolean(bool condition) {
533 534
  ReadOnlyRoots roots(this);
  return condition ? roots.true_value() : roots.false_value();
535 536
}

537
int Heap::NextScriptId() {
538
  int last_id = last_script_id().value();
539 540
  if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
  last_id++;
541 542
  set_last_script_id(Smi::FromInt(last_id));
  return last_id;
543 544
}

545
int Heap::NextDebuggingId() {
546
  int last_id = last_debugging_id().value();
547 548
  if (last_id == DebugInfo::DebuggingIdBits::kMax) {
    last_id = DebugInfo::kNoDebuggingId;
549 550 551 552 553 554
  }
  last_id++;
  set_last_debugging_id(Smi::FromInt(last_id));
  return last_id;
}

555
int Heap::GetNextTemplateSerialNumber() {
556
  int next_serial_number = next_template_serial_number().value() + 1;
557 558 559 560
  set_next_template_serial_number(Smi::FromInt(next_serial_number));
  return next_serial_number;
}

561 562 563 564 565 566 567 568 569 570 571 572
int Heap::MaxNumberToStringCacheSize() const {
  // Compute the size of the number string cache based on the max newspace size.
  // The number string cache has a minimum size based on twice the initial cache
  // size to ensure that it is bigger after being made 'full size'.
  size_t number_string_cache_size = max_semi_space_size_ / 512;
  number_string_cache_size =
      Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
          Min<size_t>(0x4000u, number_string_cache_size));
  // There is a string and a number per entry so the length is twice the number
  // of entries.
  return static_cast<int>(number_string_cache_size * 2);
}
573 574 575 576 577 578 579 580 581 582 583 584 585

void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
                                              size_t amount) {
  base::CheckedIncrement(&backing_store_bytes_, amount);
  // TODO(mlippautz): Implement interrupt for global memory allocations that can
  // trigger garbage collections.
}

void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
                                              size_t amount) {
  base::CheckedDecrement(&backing_store_bytes_, amount);
}

586
AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
587
  heap_->always_allocate_scope_count_++;
588 589
}

590 591 592
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
    : AlwaysAllocateScope(isolate->heap()) {}

593
AlwaysAllocateScope::~AlwaysAllocateScope() {
594
  heap_->always_allocate_scope_count_--;
595 596
}

597 598
CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
    : heap_(heap) {
599 600 601
  if (heap_->write_protect_code_memory()) {
    heap_->increment_code_space_memory_modification_scope_depth();
    heap_->code_space()->SetReadAndWritable();
602
    LargePage* page = heap_->code_lo_space()->first_page();
603
    while (page != nullptr) {
604 605 606
      DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
      CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
      page->SetReadAndWritable();
607
      page = page->next_page();
608
    }
609 610 611 612
  }
}

CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
613 614
  if (heap_->write_protect_code_memory()) {
    heap_->decrement_code_space_memory_modification_scope_depth();
615
    heap_->code_space()->SetDefaultCodePermissions();
616
    LargePage* page = heap_->code_lo_space()->first_page();
617
    while (page != nullptr) {
618 619
      DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
      CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
620
      page->SetDefaultCodePermissions();
621
      page = page->next_page();
622
    }
623 624 625
  }
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
CodePageCollectionMemoryModificationScope::
    CodePageCollectionMemoryModificationScope(Heap* heap)
    : heap_(heap) {
  if (heap_->write_protect_code_memory() &&
      !heap_->code_space_memory_modification_scope_depth()) {
    heap_->EnableUnprotectedMemoryChunksRegistry();
  }
}

CodePageCollectionMemoryModificationScope::
    ~CodePageCollectionMemoryModificationScope() {
  if (heap_->write_protect_code_memory() &&
      !heap_->code_space_memory_modification_scope_depth()) {
    heap_->ProtectUnprotectedMemoryChunks();
    heap_->DisableUnprotectedMemoryChunksRegistry();
  }
}

644
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
645
    MemoryChunk* chunk)
646
    : chunk_(chunk),
647 648
      scope_active_(chunk_->heap()->write_protect_code_memory() &&
                    chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
649
  if (scope_active_) {
650 651
    DCHECK(chunk_->owner_identity() == CODE_SPACE ||
           (chunk_->owner_identity() == CODE_LO_SPACE));
652
    chunk_->SetReadAndWritable();
653 654 655 656
  }
}

CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
657
  if (scope_active_) {
658
    chunk_->SetDefaultCodePermissions();
659 660 661
  }
}

662 663
}  // namespace internal
}  // namespace v8
664

665
#endif  // V8_HEAP_HEAP_INL_H_