spaces-inl.h 12.1 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
7

8
#include "src/heap/spaces.h"
9 10
#include "src/heap-profiler.h"
#include "src/isolate.h"
11
#include "src/msan.h"
12
#include "src/v8memory.h"
13

14 15
namespace v8 {
namespace internal {
16 17 18


// -----------------------------------------------------------------------------
19
// Bitmap
20

21 22 23 24
void Bitmap::Clear(MemoryChunk* chunk) {
  Bitmap* bitmap = chunk->markbits();
  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
  chunk->ResetLiveBytes();
25 26 27 28
}


// -----------------------------------------------------------------------------
29
// PageIterator
30 31


32 33 34
PageIterator::PageIterator(PagedSpace* space)
    : space_(space),
      prev_page_(&space->anchor_),
35
      next_page_(prev_page_->next_page()) {}
36

37

38
bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
39 40


41
Page* PageIterator::next() {
42
  DCHECK(has_next());
43 44 45
  prev_page_ = next_page_;
  next_page_ = next_page_->next_page();
  return prev_page_;
46
}
47

48

49 50
// -----------------------------------------------------------------------------
// NewSpacePageIterator
51 52


53 54 55
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
56
      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
57

58 59 60
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
    : prev_page_(space->anchor()),
      next_page_(prev_page_->next_page()),
61
      last_page_(prev_page_->prev_page()) {}
62

63 64 65 66 67
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
      next_page_(NewSpacePage::FromAddress(start)),
      last_page_(NewSpacePage::FromLimit(limit)) {
  SemiSpace::AssertValidRange(start, limit);
68 69 70
}


71
bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
72 73


74
NewSpacePage* NewSpacePageIterator::next() {
75
  DCHECK(has_next());
76 77 78
  prev_page_ = next_page_;
  next_page_ = next_page_->next_page();
  return prev_page_;
79 80 81
}


82 83 84 85 86 87 88 89 90 91 92
// -----------------------------------------------------------------------------
// HeapObjectIterator
HeapObject* HeapObjectIterator::FromCurrentPage() {
  while (cur_addr_ != cur_end_) {
    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
      cur_addr_ = space_->limit();
      continue;
    }
    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
    cur_addr_ += obj_size;
93
    DCHECK(cur_addr_ <= cur_end_);
94 95 96 97 98 99
    // TODO(hpayer): Remove the debugging code.
    if (cur_addr_ > cur_end_) {
      space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
                                                      obj_size);
    }

100
    if (!obj->IsFiller()) {
101
      DCHECK_OBJECT_SIZE(obj_size);
102
      return obj;
103 104
    }
  }
105
  return NULL;
106 107 108
}


109 110 111 112 113 114
// -----------------------------------------------------------------------------
// MemoryAllocator

#ifdef ENABLE_HEAP_PROTECTION

void MemoryAllocator::Protect(Address start, size_t size) {
115
  base::OS::Protect(start, size);
116 117 118
}


119
void MemoryAllocator::Unprotect(Address start, size_t size,
120
                                Executability executable) {
121
  base::OS::Unprotect(start, size, executable);
122 123 124 125 126
}


void MemoryAllocator::ProtectChunkFromPage(Page* page) {
  int id = GetChunkId(page);
127
  base::OS::Protect(chunks_[id].address(), chunks_[id].size());
128 129 130 131 132
}


void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
  int id = GetChunkId(page);
133 134
  base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
                      chunks_[id].owner()->executable() == EXECUTABLE);
135 136 137 138 139
}

#endif


140 141
// --------------------------------------------------------------------------
// PagedSpace
142
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
143 144
                       PagedSpace* owner) {
  Page* page = reinterpret_cast<Page*>(chunk);
145 146
  DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
  DCHECK(chunk->owner() == owner);
147 148
  owner->IncreaseCapacity(page->area_size());
  owner->Free(page->area_start(), page->area_size());
149

150
  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
151

152
  return page;
153 154 155
}


156 157 158 159
bool PagedSpace::Contains(Address addr) {
  Page* p = Page::FromAddress(addr);
  if (!p->is_valid()) return false;
  return p->owner() == this;
160 161 162
}


163 164 165 166
void MemoryChunk::set_scan_on_scavenge(bool scan) {
  if (scan) {
    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
    SetFlag(SCAN_ON_SCAVENGE);
167
  } else {
168 169
    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
    ClearFlag(SCAN_ON_SCAVENGE);
170
  }
171 172 173 174
  heap_->incremental_marking()->SetOldSpacePageFlags(this);
}


175
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
176 177 178
  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
  if (maybe->owner() != NULL) return maybe;
179
  LargeObjectIterator iterator(heap->lo_space());
180 181 182 183 184 185 186 187 188
  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
    // Fixed arrays are the only pointer-containing objects in large object
    // space.
    if (o->IsFixedArray()) {
      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
      if (chunk->Contains(addr)) {
        return chunk;
      }
    }
189
  }
190 191
  UNREACHABLE();
  return NULL;
192 193 194
}


195 196 197 198 199 200 201 202 203 204 205 206 207
void MemoryChunk::UpdateHighWaterMark(Address mark) {
  if (mark == NULL) return;
  // Need to subtract one from the mark because when a chunk is full the
  // top points to the next address after the chunk, which effectively belongs
  // to another chunk. See the comment to Page::FromAllocationTop.
  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
  int new_mark = static_cast<int>(mark - chunk->address());
  if (new_mark > chunk->high_water_mark_) {
    chunk->high_water_mark_ = new_mark;
  }
}


208
PointerChunkIterator::PointerChunkIterator(Heap* heap)
209 210
    : state_(kOldSpaceState),
      old_iterator_(heap->old_space()),
211
      map_iterator_(heap->map_space()),
212
      lo_iterator_(heap->lo_space()) {}
213 214


215
Page* Page::next_page() {
216
  DCHECK(next_chunk()->owner() == owner());
217
  return static_cast<Page*>(next_chunk());
218 219 220
}


221
Page* Page::prev_page() {
222
  DCHECK(prev_chunk()->owner() == owner());
223
  return static_cast<Page*>(prev_chunk());
224 225 226
}


227
void Page::set_next_page(Page* page) {
228
  DCHECK(page->owner() == owner());
229
  set_next_chunk(page);
230 231 232
}


233
void Page::set_prev_page(Page* page) {
234
  DCHECK(page->owner() == owner());
235
  set_prev_chunk(page);
236 237 238
}


239
// Try linear allocation in the page of alloc_info's allocation top.  Does
240
// not contain slow case logic (e.g. move to the next page or try free list
241 242
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
243
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
244
  Address current_top = allocation_info_.top();
245
  Address new_top = current_top + size_in_bytes;
246
  if (new_top > allocation_info_.limit()) return NULL;
247

248
  allocation_info_.set_top(new_top);
249 250 251 252
  return HeapObject::FromAddress(current_top);
}


253
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
254
                                                AllocationAlignment alignment) {
255
  Address current_top = allocation_info_.top();
256 257 258
  int filler_size = Heap::GetFillToAlign(current_top, alignment);

  Address new_top = current_top + filler_size + *size_in_bytes;
259 260 261
  if (new_top > allocation_info_.limit()) return NULL;

  allocation_info_.set_top(new_top);
262 263 264 265
  if (filler_size > 0) {
    *size_in_bytes += filler_size;
    return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
                                     filler_size);
266
  }
267

268 269 270 271
  return HeapObject::FromAddress(current_top);
}


272
// Raw allocation.
273
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
274
  HeapObject* object = AllocateLinearly(size_in_bytes);
275

276 277 278 279
  if (object == NULL) {
    object = free_list_.Allocate(size_in_bytes);
    if (object == NULL) {
      object = SlowAllocateRaw(size_in_bytes);
280 281
    }
  }
282

283 284 285 286
  if (object != NULL) {
    if (identity() == CODE_SPACE) {
      SkipList::Update(object->address(), size_in_bytes);
    }
287
    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
288 289
    return object;
  }
290

291
  return AllocationResult::Retry(identity());
292 293 294
}


295
// Raw allocation.
296 297
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
                                                AllocationAlignment alignment) {
298
  DCHECK(identity() == OLD_SPACE);
299 300
  int allocation_size = size_in_bytes;
  HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
301 302

  if (object == NULL) {
303 304 305 306 307
    // We don't know exactly how much filler we need to align until space is
    // allocated, so assume the worst case.
    int filler_size = Heap::GetMaximumFillToAlign(alignment);
    allocation_size += filler_size;
    object = free_list_.Allocate(allocation_size);
308
    if (object == NULL) {
309
      object = SlowAllocateRaw(allocation_size);
310
    }
311 312 313
    if (object != NULL && filler_size != 0) {
      object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
                                       alignment);
314 315 316
      // Filler objects are initialized, so mark only the aligned object memory
      // as uninitialized.
      allocation_size = size_in_bytes;
317
    }
318 319 320
  }

  if (object != NULL) {
321
    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
322 323 324 325 326 327 328
    return object;
  }

  return AllocationResult::Retry(identity());
}


329 330 331 332 333 334 335 336 337 338 339 340
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
                                         AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
  return alignment == kDoubleAligned
             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
             : AllocateRawUnaligned(size_in_bytes);
#else
  return AllocateRawUnaligned(size_in_bytes);
#endif
}


341
// -----------------------------------------------------------------------------
342
// NewSpace
343 344


345 346
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
                                              AllocationAlignment alignment) {
347
  Address old_top = allocation_info_.top();
348 349
  int filler_size = Heap::GetFillToAlign(old_top, alignment);
  int aligned_size_in_bytes = size_in_bytes + filler_size;
350 351

  if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
352
    return SlowAllocateRaw(size_in_bytes, alignment);
353 354 355 356 357 358
  }

  HeapObject* obj = HeapObject::FromAddress(old_top);
  allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);

359 360
  if (filler_size > 0) {
    obj = heap()->PrecedeWithFiller(obj, filler_size);
361 362 363 364 365 366 367 368 369
  }

  // The slow path above ultimately goes through AllocateRaw, so this suffices.
  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);

  return obj;
}


370
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
371
  Address old_top = allocation_info_.top();
372

373
  if (allocation_info_.limit() - old_top < size_in_bytes) {
374
    return SlowAllocateRaw(size_in_bytes, kWordAligned);
375
  }
376

377
  HeapObject* obj = HeapObject::FromAddress(old_top);
378
  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
379
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
380

381 382 383
  // The slow path above ultimately goes through AllocateRaw, so this suffices.
  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);

384 385 386
  return obj;
}

387

388 389 390 391 392 393 394 395 396 397 398 399
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
                                       AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
  return alignment == kDoubleAligned
             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
             : AllocateRawUnaligned(size_in_bytes);
#else
  return AllocateRawUnaligned(size_in_bytes);
#endif
}


400 401 402 403 404 405
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
  return static_cast<LargePage*>(chunk);
}


406
intptr_t LargeObjectSpace::Available() {
407
  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
408 409
}

410 411
}
}  // namespace v8::internal
412

413
#endif  // V8_HEAP_SPACES_INL_H_