heap.h 103 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_HEAP_H_
#define V8_HEAP_HEAP_H_
7

8
#include <cmath>
9
#include <map>
10

11 12
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
13
#include "include/v8.h"
14 15
#include "src/allocation.h"
#include "src/assert-scope.h"
lpy's avatar
lpy committed
16
#include "src/base/atomic-utils.h"
17
#include "src/debug/debug-interface.h"
18
#include "src/globals.h"
19
#include "src/heap-symbols.h"
20
#include "src/list.h"
21
#include "src/objects.h"
22 23
#include "src/objects/hash-table.h"
#include "src/objects/string-table.h"
24

25 26
namespace v8 {
namespace internal {
27

28 29
using v8::MemoryPressureLevel;

30
// Defines all the roots in Heap.
31
#define STRONG_ROOT_LIST(V)                                                    \
32 33 34
  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
  /* The first 32 entries are most often used in the startup snapshot and   */ \
  /* can use a shorter representation in the serialization format.          */ \
35
  V(Map, free_space_map, FreeSpaceMap)                                         \
36 37
  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
38
  V(Oddball, uninitialized_value, UninitializedValue)                          \
39 40 41 42 43
  V(Oddball, undefined_value, UndefinedValue)                                  \
  V(Oddball, the_hole_value, TheHoleValue)                                     \
  V(Oddball, null_value, NullValue)                                            \
  V(Oddball, true_value, TrueValue)                                            \
  V(Oddball, false_value, FalseValue)                                          \
44
  V(String, empty_string, empty_string)                                        \
45
  V(Map, meta_map, MetaMap)                                                    \
46
  V(Map, byte_array_map, ByteArrayMap)                                         \
47
  V(Map, fixed_array_map, FixedArrayMap)                                       \
48
  V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
49 50
  V(Map, hash_table_map, HashTableMap)                                         \
  V(Map, symbol_map, SymbolMap)                                                \
51 52
  V(Map, one_byte_string_map, OneByteStringMap)                                \
  V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
53 54 55
  V(Map, scope_info_map, ScopeInfoMap)                                         \
  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
  V(Map, code_map, CodeMap)                                                    \
56
  V(Map, function_context_map, FunctionContextMap)                             \
57 58 59 60 61 62
  V(Map, cell_map, CellMap)                                                    \
  V(Map, weak_cell_map, WeakCellMap)                                           \
  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
  V(Map, foreign_map, ForeignMap)                                              \
  V(Map, heap_number_map, HeapNumberMap)                                       \
  V(Map, transition_array_map, TransitionArrayMap)                             \
63
  V(Map, feedback_vector_map, FeedbackVectorMap)                               \
64
  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
65 66
  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
67
  /* Entries beyond the first 32                                            */ \
68 69 70
  /* The roots above this line should be boring from a GC point of view.    */ \
  /* This means they are never in new space and never on a page that is     */ \
  /* being compacted.                                                       */ \
71
  /* Oddballs */                                                               \
72 73 74
  V(Oddball, arguments_marker, ArgumentsMarker)                                \
  V(Oddball, exception, Exception)                                             \
  V(Oddball, termination_exception, TerminationException)                      \
75
  V(Oddball, optimized_out, OptimizedOut)                                      \
76
  V(Oddball, stale_register, StaleRegister)                                    \
77 78 79
  /* Context maps */                                                           \
  V(Map, native_context_map, NativeContextMap)                                 \
  V(Map, module_context_map, ModuleContextMap)                                 \
80
  V(Map, eval_context_map, EvalContextMap)                                     \
81 82 83 84 85 86 87 88 89
  V(Map, script_context_map, ScriptContextMap)                                 \
  V(Map, block_context_map, BlockContextMap)                                   \
  V(Map, catch_context_map, CatchContextMap)                                   \
  V(Map, with_context_map, WithContextMap)                                     \
  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
  V(Map, script_context_table_map, ScriptContextTableMap)                      \
  /* Maps */                                                                   \
  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
90
  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
91
  V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap)          \
92 93 94 95
  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
  V(Map, message_object_map, JSMessageObjectMap)                               \
  V(Map, external_map, ExternalMap)                                            \
  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
96
  V(Map, module_info_map, ModuleInfoMap)                                       \
97 98 99
  V(Map, no_closures_cell_map, NoClosuresCellMap)                              \
  V(Map, one_closure_cell_map, OneClosureCellMap)                              \
  V(Map, many_closures_cell_map, ManyClosuresCellMap)                          \
100 101
  /* String maps */                                                            \
  V(Map, native_source_string_map, NativeSourceStringMap)                      \
102
  V(Map, string_map, StringMap)                                                \
103
  V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
104
  V(Map, cons_string_map, ConsStringMap)                                       \
105 106
  V(Map, thin_one_byte_string_map, ThinOneByteStringMap)                       \
  V(Map, thin_string_map, ThinStringMap)                                       \
107
  V(Map, sliced_string_map, SlicedStringMap)                                   \
108
  V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
109
  V(Map, external_string_map, ExternalStringMap)                               \
110
  V(Map, external_string_with_one_byte_data_map,                               \
111
    ExternalStringWithOneByteDataMap)                                          \
112
  V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
113
  V(Map, short_external_string_map, ShortExternalStringMap)                    \
114
  V(Map, short_external_string_with_one_byte_data_map,                         \
115
    ShortExternalStringWithOneByteDataMap)                                     \
116
  V(Map, internalized_string_map, InternalizedStringMap)                       \
117 118
  V(Map, external_internalized_string_map, ExternalInternalizedStringMap)      \
  V(Map, external_internalized_string_with_one_byte_data_map,                  \
119
    ExternalInternalizedStringWithOneByteDataMap)                              \
120 121
  V(Map, external_one_byte_internalized_string_map,                            \
    ExternalOneByteInternalizedStringMap)                                      \
122
  V(Map, short_external_internalized_string_map,                               \
123
    ShortExternalInternalizedStringMap)                                        \
124
  V(Map, short_external_internalized_string_with_one_byte_data_map,            \
125
    ShortExternalInternalizedStringWithOneByteDataMap)                         \
126 127 128
  V(Map, short_external_one_byte_internalized_string_map,                      \
    ShortExternalOneByteInternalizedStringMap)                                 \
  V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
129
  /* Array element maps */                                                     \
130 131 132 133 134 135 136 137 138
  V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
  V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
  V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
  V(Map, fixed_int16_array_map, FixedInt16ArrayMap)                            \
  V(Map, fixed_uint32_array_map, FixedUint32ArrayMap)                          \
  V(Map, fixed_int32_array_map, FixedInt32ArrayMap)                            \
  V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
  V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
  V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
139 140
  /* Canonical empty values */                                                 \
  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
141 142 143 144 145 146 147 148 149
  V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
  V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
  V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
  V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array)        \
  V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array)      \
  V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array)        \
  V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array)    \
  V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
  V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
150
    EmptyFixedUint8ClampedArray)                                               \
151 152 153 154 155 156 157 158 159 160 161
  V(Script, empty_script, EmptyScript)                                         \
  V(Cell, undefined_cell, UndefinedCell)                                       \
  V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
    EmptySlowElementDictionary)                                                \
  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
  /* Protectors */                                                             \
  V(PropertyCell, array_protector, ArrayProtector)                             \
  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
  V(Cell, species_protector, SpeciesProtector)                                 \
162
  V(PropertyCell, string_length_protector, StringLengthProtector)              \
163
  V(Cell, fast_array_iteration_protector, FastArrayIterationProtector)         \
164
  V(PropertyCell, array_iterator_protector, ArrayIteratorProtector)            \
165 166
  V(PropertyCell, array_buffer_neutering_protector,                            \
    ArrayBufferNeuteringProtector)                                             \
167
  /* Special numbers */                                                        \
168
  V(HeapNumber, nan_value, NanValue)                                           \
169
  V(HeapNumber, hole_nan_value, HoleNanValue)                                  \
170 171
  V(HeapNumber, infinity_value, InfinityValue)                                 \
  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
172
  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
173 174 175 176 177 178 179 180 181
  /* Caches */                                                                 \
  V(FixedArray, number_string_cache, NumberStringCache)                        \
  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
  V(FixedArray, string_split_cache, StringSplitCache)                          \
  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
  /* Lists and dictionaries */                                                 \
182
  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
183 184 185
  V(NameDictionary, public_symbol_table, PublicSymbolTable)                    \
  V(NameDictionary, api_symbol_table, ApiSymbolTable)                          \
  V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable)           \
186
  V(Object, script_list, ScriptList)                                           \
187
  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
188
  V(FixedArray, materialized_objects, MaterializedObjects)                     \
189
  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
ulan's avatar
ulan committed
190
  V(FixedArray, detached_contexts, DetachedContexts)                           \
191
  V(ArrayList, retained_maps, RetainedMaps)                                    \
192
  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
193 194 195 196 197
  /* weak_new_space_object_to_code_list is an array of weak cells, where */    \
  /* slots with even indices refer to the weak object, and the subsequent */   \
  /* slots refer to the code with the reference to the weak object. */         \
  V(ArrayList, weak_new_space_object_to_code_list,                             \
    WeakNewSpaceObjectToCodeList)                                              \
198 199
  /* List to hold onto feedback vectors that we need for code coverage */      \
  V(Object, code_coverage_list, CodeCoverageList)                              \
200
  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
201
  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
202
  V(FixedArray, serialized_templates, SerializedTemplates)                     \
203
  V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes)     \
204
  /* Configured values */                                                      \
205
  V(TemplateList, message_listeners, MessageListeners)                         \
206
  V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo)               \
207 208 209 210 211 212 213 214 215 216 217 218
  V(Code, js_entry_code, JsEntryCode)                                          \
  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
  /* Oddball maps */                                                           \
  V(Map, undefined_map, UndefinedMap)                                          \
  V(Map, the_hole_map, TheHoleMap)                                             \
  V(Map, null_map, NullMap)                                                    \
  V(Map, boolean_map, BooleanMap)                                              \
  V(Map, uninitialized_map, UninitializedMap)                                  \
  V(Map, arguments_marker_map, ArgumentsMarkerMap)                             \
  V(Map, exception_map, ExceptionMap)                                          \
  V(Map, termination_exception_map, TerminationExceptionMap)                   \
  V(Map, optimized_out_map, OptimizedOutMap)                                   \
219 220 221 222
  V(Map, stale_register_map, StaleRegisterMap)                                 \
  /* per-Isolate map for JSPromiseCapability. */                               \
  /* TODO(caitp): Make this a Struct */                                        \
  V(Map, js_promise_capability_map, JSPromiseCapabilityMap)
223

224
// Entries in this list are limited to Smis and are not visited during GC.
225 226 227 228
#define SMI_ROOT_LIST(V)                                                       \
  V(Smi, stack_limit, StackLimit)                                              \
  V(Smi, real_stack_limit, RealStackLimit)                                     \
  V(Smi, last_script_id, LastScriptId)                                         \
229
  V(Smi, hash_seed, HashSeed)                                                  \
230 231 232 233
  /* To distinguish the function templates, so that we can find them in the */ \
  /* function cache of the native context. */                                  \
  V(Smi, next_template_serial_number, NextTemplateSerialNumber)                \
  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
234 235 236 237
  V(Smi, construct_stub_create_deopt_pc_offset,                                \
    ConstructStubCreateDeoptPCOffset)                                          \
  V(Smi, construct_stub_invoke_deopt_pc_offset,                                \
    ConstructStubInvokeDeoptPCOffset)                                          \
238 239
  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)                 \
240
  V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
241

242 243 244
#define ROOT_LIST(V)  \
  STRONG_ROOT_LIST(V) \
  SMI_ROOT_LIST(V)    \
245 246
  V(StringTable, string_table, StringTable)

247

248 249 250 251
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
  V(ByteArrayMap)                       \
252
  V(BytecodeArrayMap)                   \
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
  V(FreeSpaceMap)                       \
  V(OnePointerFillerMap)                \
  V(TwoPointerFillerMap)                \
  V(UndefinedValue)                     \
  V(TheHoleValue)                       \
  V(NullValue)                          \
  V(TrueValue)                          \
  V(FalseValue)                         \
  V(UninitializedValue)                 \
  V(CellMap)                            \
  V(GlobalPropertyCellMap)              \
  V(SharedFunctionInfoMap)              \
  V(MetaMap)                            \
  V(HeapNumberMap)                      \
  V(MutableHeapNumberMap)               \
  V(NativeContextMap)                   \
  V(FixedArrayMap)                      \
  V(CodeMap)                            \
  V(ScopeInfoMap)                       \
272
  V(ModuleInfoMap)                      \
273 274 275
  V(FixedCOWArrayMap)                   \
  V(FixedDoubleArrayMap)                \
  V(WeakCellMap)                        \
276
  V(TransitionArrayMap)                 \
277 278 279 280 281 282 283 284 285 286 287 288 289
  V(HashTableMap)                       \
  V(OrderedHashTableMap)                \
  V(EmptyFixedArray)                    \
  V(EmptyByteArray)                     \
  V(EmptyDescriptorArray)               \
  V(ArgumentsMarker)                    \
  V(SymbolMap)                          \
  V(SloppyArgumentsElementsMap)         \
  V(FunctionContextMap)                 \
  V(CatchContextMap)                    \
  V(WithContextMap)                     \
  V(BlockContextMap)                    \
  V(ModuleContextMap)                   \
290
  V(EvalContextMap)                     \
291 292 293 294 295 296 297 298 299
  V(ScriptContextMap)                   \
  V(UndefinedMap)                       \
  V(TheHoleMap)                         \
  V(NullMap)                            \
  V(BooleanMap)                         \
  V(UninitializedMap)                   \
  V(ArgumentsMarkerMap)                 \
  V(JSMessageObjectMap)                 \
  V(ForeignMap)                         \
300 301 302
  V(NoClosuresCellMap)                  \
  V(OneClosureCellMap)                  \
  V(ManyClosuresCellMap)                \
303 304 305 306
  V(NanValue)                           \
  V(InfinityValue)                      \
  V(MinusZeroValue)                     \
  V(MinusInfinityValue)                 \
307
  V(EmptyWeakCell)                      \
308 309 310
  V(empty_string)                       \
  PRIVATE_SYMBOL_LIST(V)

311
// Forward declarations.
312
class AllocationObserver;
313
class ArrayBufferTracker;
314
class ConcurrentMarking;
315 316 317 318
class GCIdleTimeAction;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
319
class HeapObjectsFilter;
320
class HeapStats;
321
class HistogramTimer;
322
class Isolate;
323
class LocalEmbedderHeapTracer;
mlippautz's avatar
mlippautz committed
324
class MemoryAllocator;
325
class MemoryReducer;
326
class MinorMarkCompactCollector;
327
class ObjectIterator;
328
class ObjectStats;
329
class Page;
mlippautz's avatar
mlippautz committed
330
class PagedSpace;
331
class Scavenger;
ulan's avatar
ulan committed
332
class ScavengeJob;
mlippautz's avatar
mlippautz committed
333
class Space;
334
class StoreBuffer;
335
class TracePossibleWrapperReporter;
336
class WeakObjectRetainer;
337

338 339 340 341 342 343
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);

enum ArrayStorageAllocationMode {
  DONT_INITIALIZE_ARRAY_ELEMENTS,
  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
};
344

345 346
enum class ClearRecordedSlots { kYes, kNo };

347
enum class GarbageCollectionReason {
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
  kUnknown = 0,
  kAllocationFailure = 1,
  kAllocationLimit = 2,
  kContextDisposal = 3,
  kCountersExtension = 4,
  kDebugger = 5,
  kDeserializer = 6,
  kExternalMemoryPressure = 7,
  kFinalizeMarkingViaStackGuard = 8,
  kFinalizeMarkingViaTask = 9,
  kFullHashtable = 10,
  kHeapProfiler = 11,
  kIdleTask = 12,
  kLastResort = 13,
  kLowMemoryNotification = 14,
  kMakeHeapIterable = 15,
  kMemoryPressure = 16,
  kMemoryReducer = 17,
  kRuntime = 18,
  kSamplingProfiler = 19,
  kSnapshotCreator = 20,
  kTesting = 21
  // If you add new items here, then update the incremental_marking_reason,
  // mark_compact_reason, and scavenge_reason counters in counters.h.
  // Also update src/tools/metrics/histograms/histograms.xml in chromium.
373 374
};

375 376 377 378 379 380 381 382 383 384 385
enum class YoungGenerationHandling {
  kRegularScavenge = 0,
  kFastPromotionDuringScavenge = 1,
  // Histogram::InspectConstructionArguments in chromium requires us to have at
  // least three buckets.
  kUnusedBucket = 2,
  // If you add new items here, then update the young_generation_handling in
  // counters.h.
  // Also update src/tools/metrics/histograms/histograms.xml in chromium.
};

386 387 388 389 390
// A queue of objects promoted during scavenge. Each object is accompanied by
// its size to avoid dereferencing a map pointer for scanning. The last page in
// to-space is used for the promotion queue. On conflict during scavenge, the
// promotion queue is allocated externally and all entries are copied to the
// external queue.
391 392
class PromotionQueue {
 public:
393
  explicit PromotionQueue(Heap* heap)
394 395 396 397
      : front_(nullptr),
        rear_(nullptr),
        limit_(nullptr),
        emergency_stack_(nullptr),
398
        heap_(heap) {}
399 400

  void Initialize();
401
  void Destroy();
402

403 404
  inline void SetNewLimit(Address limit);
  inline bool IsBelowPromotionQueue(Address to_space_top);
405

406 407 408
  inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
  inline void remove(HeapObject** target, int32_t* size,
                     bool* was_marked_black);
409

410 411
  bool is_empty() {
    return (front_ == rear_) &&
412
           (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
413 414 415
  }

 private:
416
  struct Entry {
hpayer's avatar
hpayer committed
417 418
    Entry(HeapObject* obj, int32_t size, bool was_marked_black)
        : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
419 420

    HeapObject* obj_;
hpayer's avatar
hpayer committed
421 422
    int32_t size_ : 31;
    bool was_marked_black_ : 1;
423
  };
424

425 426
  inline Page* GetHeadPage();

hpayer's avatar
hpayer committed
427 428
  void RelocateQueueHead();

429 430 431 432 433
  // The front of the queue is higher in the memory page chain than the rear.
  struct Entry* front_;
  struct Entry* rear_;
  struct Entry* limit_;

434 435 436
  List<Entry>* emergency_stack_;
  Heap* heap_;

437 438 439
  DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};

440 441
class AllocationResult {
 public:
442 443 444 445
  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
    return AllocationResult(space);
  }

446 447 448 449 450 451 452 453 454 455 456
  // Implicit constructor from Object*.
  AllocationResult(Object* object)  // NOLINT
      : object_(object) {
    // AllocationResults can't return Smis, which are used to represent
    // failure and the space to retry in.
    CHECK(!object->IsSmi());
  }

  AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}

  inline bool IsRetry() { return object_->IsSmi(); }
457 458
  inline HeapObject* ToObjectChecked();
  inline AllocationSpace RetrySpace();
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523

  template <typename T>
  bool To(T** obj) {
    if (IsRetry()) return false;
    *obj = T::cast(object_);
    return true;
  }

 private:
  explicit AllocationResult(AllocationSpace space)
      : object_(Smi::FromInt(static_cast<int>(space))) {}

  Object* object_;
};

STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);

#ifdef DEBUG
struct CommentStatistic {
  const char* comment;
  int size;
  int count;
  void Clear() {
    comment = NULL;
    size = 0;
    count = 0;
  }
  // Must be small, since an iteration is used for lookup.
  static const int kMaxComments = 64;
};
#endif

class NumberAndSizeInfo BASE_EMBEDDED {
 public:
  NumberAndSizeInfo() : number_(0), bytes_(0) {}

  int number() const { return number_; }
  void increment_number(int num) { number_ += num; }

  int bytes() const { return bytes_; }
  void increment_bytes(int size) { bytes_ += size; }

  void clear() {
    number_ = 0;
    bytes_ = 0;
  }

 private:
  int number_;
  int bytes_;
};

// HistogramInfo class for recording a single "bar" of a histogram.  This
// class is used for collecting statistics to print to the log file.
class HistogramInfo : public NumberAndSizeInfo {
 public:
  HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}

  const char* name() { return name_; }
  void set_name(const char* name) { name_ = name; }

 private:
  const char* name_;
};

524
class Heap {
525
 public:
526 527 528 529 530
  // Declare all the root indices.  This defines the root list order.
  enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
531

532 533 534
#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
        INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
#undef STRING_DECLARATION
535

536 537 538
#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
            PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_INDEX_DECLARATION
539

540
#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
541
                PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
542
                    WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
543
#undef SYMBOL_INDEX_DECLARATION
544

545 546
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
547
                        STRUCT_LIST(DECLARE_STRUCT_MAP)
548
#undef DECLARE_STRUCT_MAP
549
                            kStringTableRootIndex,
550

551 552 553 554 555 556 557
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
        kRootListLength,
    kStrongRootListLength = kStringTableRootIndex,
    kSmiRootsStart = kStringTableRootIndex + 1
  };
558

559 560
  enum FindMementoMode { kForRuntime, kForGC };

561
  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
562 563

  enum UpdateAllocationSiteMode { kGlobal, kCached };
564

565 566 567 568 569 570 571
  // Taking this lock prevents the GC from entering a phase that relocates
  // object references.
  class RelocationLock {
   public:
    explicit RelocationLock(Heap* heap) : heap_(heap) {
      heap_->relocation_mutex_.Lock();
    }
572

573
    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
574

575 576 577
   private:
    Heap* heap_;
  };
578

579 580 581 582 583 584 585 586
  // Support for partial snapshots.  After calling this we have a linear
  // space to write objects in each space.
  struct Chunk {
    uint32_t size;
    Address start;
    Address end;
  };
  typedef List<Chunk> Reservation;
587

588 589
  static const int kInitalOldGenerationLimitFactor = 2;

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
#if V8_OS_ANDROID
  // Don't apply pointer multiplier on Android since it has no swap space and
  // should instead adapt it's heap size based on available physical memory.
  static const int kPointerMultiplier = 1;
#else
  static const int kPointerMultiplier = i::kPointerSize / 4;
#endif

  // The new space size has to be a power of 2. Sizes are in MB.
  static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
  static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
  static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
  static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;

  // The old space size has to be a multiple of Page::kPageSize.
  // Sizes are in MB.
  static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
  static const int kMaxOldSpaceSizeMediumMemoryDevice =
      256 * kPointerMultiplier;
  static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
610
  static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
611 612 613 614 615 616 617 618 619 620 621 622 623 624

  // The executable size has to be a multiple of Page::kPageSize.
  // Sizes are in MB.
  static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
  static const int kMaxExecutableSizeMediumMemoryDevice =
      192 * kPointerMultiplier;
  static const int kMaxExecutableSizeHighMemoryDevice =
      256 * kPointerMultiplier;
  static const int kMaxExecutableSizeHugeMemoryDevice =
      256 * kPointerMultiplier;

  static const int kTraceRingBufferSize = 512;
  static const int kStacktraceBufferSize = 512;

625 626
  V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
  V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
627 628
  static const double kMaxHeapGrowingFactorMemoryConstrained;
  static const double kMaxHeapGrowingFactorIdle;
629
  static const double kConservativeHeapGrowingFactor;
630 631
  static const double kTargetMutatorUtilization;

632 633 634 635 636
  static const int kNoGCFlags = 0;
  static const int kReduceMemoryFootprintMask = 1;
  static const int kAbortIncrementalMarkingMask = 2;
  static const int kFinalizeIncrementalMarkingMask = 4;

637 638 639
  // Making the heap iterable requires us to abort incremental marking.
  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;

640 641 642
  // The roots that have an index less than this are always in old space.
  static const int kOldSpaceRoots = 0x20;

643 644 645
  // The minimum size of a HeapObject on the heap.
  static const int kMinObjectSizeInWords = 2;

646
  static const int kMinPromotedPercentForFastPromotionMode = 90;
647

648 649
  STATIC_ASSERT(kUndefinedValueRootIndex ==
                Internals::kUndefinedValueRootIndex);
650
  STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
  STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
  STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
  STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
  STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);

  // Calculates the maximum amount of filler that could be required by the
  // given alignment.
  static int GetMaximumFillToAlign(AllocationAlignment alignment);
  // Calculates the actual amount of filler required for a given address at the
  // given alignment.
  static int GetFillToAlign(Address address, AllocationAlignment alignment);

  template <typename T>
  static inline bool IsOneByte(T t, int chars);

  static void FatalProcessOutOfMemory(const char* location,
667
                                      bool is_heap_oom = false);
668 669 670 671 672 673 674 675 676 677

  static bool RootIsImmortalImmovable(int root_index);

  // Checks whether the space is valid.
  static bool IsValidAllocationSpace(AllocationSpace space);

  // Generated code can embed direct references to non-writable roots if
  // they are in new space.
  static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);

678 679
  static bool IsUnmodifiedHeapObject(Object** p);

680 681 682 683 684 685 686 687 688 689 690
  // Zapping is needed for verify heap, and always done in debug builds.
  static inline bool ShouldZapGarbage() {
#ifdef DEBUG
    return true;
#else
#ifdef VERIFY_HEAP
    return FLAG_verify_heap;
#else
    return false;
#endif
#endif
691 692
  }

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
  static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
    return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
  }

  static inline GarbageCollector YoungGenerationCollector() {
    return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
  }

  static inline const char* CollectorName(GarbageCollector collector) {
    switch (collector) {
      case SCAVENGER:
        return "Scavenger";
      case MARK_COMPACTOR:
        return "Mark-Compact";
      case MINOR_MARK_COMPACTOR:
        return "Minor Mark-Compact";
    }
    return "Unknown collector";
  }

713 714
  V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
                                                    double mutator_speed);
715 716 717 718 719

  // Copy block of memory from src to dst. Size of block should be aligned
  // by pointer size.
  static inline void CopyBlock(Address dst, Address src, int byte_size);

720 721 722 723
  // Determines a static visitor id based on the given {map} that can then be
  // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
  static int GetStaticVisitorIdForMap(Map* map);

724 725 726 727
  // Notifies the heap that is ok to start marking or other activities that
  // should not happen during deserialization.
  void NotifyDeserializationComplete();

mlippautz's avatar
mlippautz committed
728 729 730 731
  inline Address* NewSpaceAllocationTopAddress();
  inline Address* NewSpaceAllocationLimitAddress();
  inline Address* OldSpaceAllocationTopAddress();
  inline Address* OldSpaceAllocationLimitAddress();
732

733
  // Clear the Instanceof cache (used when a prototype changes).
734
  inline void ClearInstanceofCache();
735

736 737
  // FreeSpace objects have a null map after deserialization. Update the map.
  void RepairFreeListsAfterDeserialization();
738

739 740 741 742
  // Move len elements within a given array from src_index index to dst_index
  // index.
  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);

743
  // Initialize a filler object to keep the ability to iterate over the heap
744 745
  // when introducing gaps within pages. If slots could have been recorded in
  // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
746
  // pass ClearRecordedSlots::kNo.
747 748
  V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(Address addr, int size,
                                                     ClearRecordedSlots mode);
749

750 751
  bool CanMoveObjectStart(HeapObject* object);

752 753
  static bool IsImmovable(HeapObject* object);

754
  // Maintain consistency of live bytes during incremental marking.
755
  void AdjustLiveBytes(HeapObject* object, int by);
756

757 758 759 760
  // Trim the given array from the left. Note that this relocates the object
  // start and hence is only valid if there is only a single reference to it.
  FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);

761 762 763
  // Trim the given array from the right.
  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);

764
  // Converts the given boolean condition to JavaScript boolean value.
765
  inline Oddball* ToBoolean(bool condition);
766

767
  // Notify the heap that a context has been disposed.
768
  int NotifyContextDisposed(bool dependant_context);
769

770 771
  void set_native_contexts_list(Object* object) {
    native_contexts_list_ = object;
772
  }
773
  Object* native_contexts_list() const { return native_contexts_list_; }
774

775 776 777 778
  void set_allocation_sites_list(Object* object) {
    allocation_sites_list_ = object;
  }
  Object* allocation_sites_list() { return allocation_sites_list_; }
779 780

  // Used in CreateAllocationSiteStub and the (de)serializer.
781
  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
782

783 784 785 786 787 788
  void set_encountered_weak_collections(Object* weak_collection) {
    encountered_weak_collections_ = weak_collection;
  }
  Object* encountered_weak_collections() const {
    return encountered_weak_collections_;
  }
789 790 791
  void VisitEncounteredWeakCollections(ObjectVisitor* visitor) {
    visitor->VisitPointer(&encountered_weak_collections_);
  }
792

ulan@chromium.org's avatar
ulan@chromium.org committed
793 794 795 796 797
  void set_encountered_weak_cells(Object* weak_cell) {
    encountered_weak_cells_ = weak_cell;
  }
  Object* encountered_weak_cells() const { return encountered_weak_cells_; }

798 799 800 801 802 803 804
  void set_encountered_transition_arrays(Object* transition_array) {
    encountered_transition_arrays_ = transition_array;
  }
  Object* encountered_transition_arrays() const {
    return encountered_transition_arrays_;
  }

805
  // Number of mark-sweeps.
806
  int ms_count() const { return ms_count_; }
807

808 809 810 811
  // Checks whether the given object is allowed to be migrated from it's
  // current space into the given destination space. Used for debugging.
  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);

812
  void CheckHandleCount();
813

814 815 816
  // Number of "runtime allocations" done so far.
  uint32_t allocations_count() { return allocations_count_; }

817
  // Print short heap statistics.
818
  void PrintShortHeapStatistics();
819

820
  inline HeapState gc_state() { return gc_state_; }
821
  void SetGCState(HeapState state);
822

823
  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
824

825 826
  // If an object has an AllocationMemento trailing it, return it, otherwise
  // return NULL;
827
  template <FindMementoMode mode>
828 829
  inline AllocationMemento* FindAllocationMemento(HeapObject* object);

830
  // Returns false if not able to reserve.
831
  bool ReserveSpace(Reservation* reservations, List<Address>* maps);
832

833 834 835 836
  //
  // Support for the API.
  //

837
  bool CreateApiObjects();
838

839
  // Implements the corresponding V8 API function.
840
  bool IdleNotification(double deadline_in_seconds);
841
  bool IdleNotification(int idle_time_in_ms);
842

843 844 845 846
  void MemoryPressureNotification(MemoryPressureLevel level,
                                  bool is_isolate_locked);
  void CheckMemoryPressure();

847 848 849
  void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
                              void* data);

850 851
  double MonotonicallyIncreasingTimeInMs();

852
  void RecordStats(HeapStats* stats, bool take_snapshot = false);
853

854
  // Check new space expansion criteria and expand semispaces if it was hit.
855
  void CheckNewSpaceExpansionCriteria();
856

857 858
  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);

859 860
  // An object should be promoted if the object has survived a
  // scavenge operation.
861 862
  inline bool ShouldBePromoted(Address old_address, int object_size);

863 864
  void ClearNormalizedMapCaches();

865 866
  void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);

867 868 869 870
  // Completely clear the Instanceof cache (to stop it keeping objects alive
  // around a GC).
  inline void CompletelyClearInstanceofCache();

871
  inline uint32_t HashSeed();
872

873
  inline int NextScriptId();
874

875
  inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
876 877
  inline void SetConstructStubCreateDeoptPCOffset(int pc_offset);
  inline void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
878 879
  inline void SetGetterStubDeoptPCOffset(int pc_offset);
  inline void SetSetterStubDeoptPCOffset(int pc_offset);
880
  inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
881
  inline int GetNextTemplateSerialNumber();
882

883
  inline void SetSerializedTemplates(FixedArray* templates);
884
  inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
885

886 887 888
  // For post mortem debugging.
  void RememberUnmappedPage(Address page, bool compacted);

889 890
  // Global inline caching age: it is incremented on some GCs after context
  // disposal. We use it to flush inline caches.
891
  int global_ic_age() { return global_ic_age_; }
892 893

  void AgeInlineCaches() {
894
    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
895 896
  }

897 898
  int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }

899 900
  int64_t external_memory() { return external_memory_; }
  void update_external_memory(int64_t delta) { external_memory_ += delta; }
901

902 903 904
  void update_external_memory_concurrently_freed(intptr_t freed) {
    external_memory_concurrently_freed_.Increment(freed);
  }
905

906 907 908 909
  void account_external_memory_concurrently_freed() {
    external_memory_ -= external_memory_concurrently_freed_.Value();
    external_memory_concurrently_freed_.SetValue(0);
  }
910

911 912
  void DeoptMarkedAllocationSites();

913
  inline bool DeoptMaybeTenuredAllocationSites();
914

915 916 917
  void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
                                             Handle<WeakCell> code);

ulan's avatar
ulan committed
918
  void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
919
                                     Handle<DependentCode> dep);
920

ulan's avatar
ulan committed
921
  DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
922

923 924
  void CompactWeakFixedArrays();

925 926
  void AddRetainedMap(Handle<Map> map);

927 928 929 930 931 932 933
  // This event is triggered after successful allocation of a new object made
  // by runtime. Allocations of target space for object evacuation do not
  // trigger the event. In order to track ALL allocations one must turn off
  // FLAG_inline_new and FLAG_use_allocation_folding.
  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);

  // This event is triggered after object is moved to a new place.
934
  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
935 936
                          int size_in_bytes);

937 938
  bool deserialization_complete() const { return deserialization_complete_; }

939 940
  bool HasLowAllocationRate();
  bool HasHighFragmentation();
941
  bool HasHighFragmentation(size_t used, size_t committed);
942

943 944 945 946 947 948 949 950 951 952
  void ActivateMemoryReducerIfNeeded();

  bool ShouldOptimizeForMemoryUsage();

  bool IsLowMemoryDevice() {
    return max_old_generation_size_ <= kMaxOldSpaceSizeLowMemoryDevice;
  }

  bool IsMemoryConstrainedDevice() {
    return max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice;
953
  }
954

955 956 957
  bool HighMemoryPressure() {
    return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
  }
958

959
  size_t HeapLimitForDebugging() {
960 961
    const size_t kDebugHeapSizeFactor = 4;
    size_t max_limit = std::numeric_limits<size_t>::max() / 4;
962 963 964 965 966
    return Min(max_limit,
               initial_max_old_generation_size_ * kDebugHeapSizeFactor);
  }

  void IncreaseHeapLimitForDebugging() {
967
    max_old_generation_size_ =
968
        Max(max_old_generation_size_, HeapLimitForDebugging());
969 970 971 972 973 974 975 976 977 978
  }

  void RestoreOriginalHeapLimit() {
    // Do not set the limit lower than the live size + some slack.
    size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
    max_old_generation_size_ =
        Min(max_old_generation_size_,
            Max(initial_max_old_generation_size_, min_limit));
  }

979 980 981 982
  bool IsHeapLimitIncreasedForDebugging() {
    return max_old_generation_size_ == HeapLimitForDebugging();
  }

983 984 985 986 987 988
  // ===========================================================================
  // Initialization. ===========================================================
  // ===========================================================================

  // Configure heap size in MB before setup. Return false if the heap has been
  // set up already.
989 990
  bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
                     size_t max_executable_size, size_t code_range_size);
991 992 993 994 995 996 997 998 999 1000
  bool ConfigureHeapDefault();

  // Prepares the heap, setting up memory areas that are needed in the isolate
  // without actually creating any objects.
  bool SetUp();

  // Bootstraps the object heap with the core set of objects required to run.
  // Returns whether it succeeded.
  bool CreateHeapObjects();

1001 1002 1003
  // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
  V8_INLINE void CreateObjectStats();

1004 1005 1006
  // Destroys all memory allocated by the heap.
  void TearDown();

1007 1008 1009
  // Returns whether SetUp has been called.
  bool HasBeenSetUp();

1010 1011 1012 1013
  // ===========================================================================
  // Getters for spaces. =======================================================
  // ===========================================================================

1014
  inline Address NewSpaceTop();
1015

1016
  NewSpace* new_space() { return new_space_; }
1017 1018 1019 1020 1021
  OldSpace* old_space() { return old_space_; }
  OldSpace* code_space() { return code_space_; }
  MapSpace* map_space() { return map_space_; }
  LargeObjectSpace* lo_space() { return lo_space_; }

mlippautz's avatar
mlippautz committed
1022 1023
  inline PagedSpace* paged_space(int idx);
  inline Space* space(int idx);
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033

  // Returns name of the space.
  const char* GetSpaceName(int idx);

  // ===========================================================================
  // Getters to other components. ==============================================
  // ===========================================================================

  GCTracer* tracer() { return tracer_; }

1034 1035
  MemoryAllocator* memory_allocator() { return memory_allocator_; }

1036 1037 1038 1039 1040
  PromotionQueue* promotion_queue() { return &promotion_queue_; }

  inline Isolate* isolate();

  MarkCompactCollector* mark_compact_collector() {
1041
    return mark_compact_collector_;
1042 1043
  }

1044 1045 1046 1047
  MinorMarkCompactCollector* minor_mark_compact_collector() {
    return minor_mark_compact_collector_;
  }

1048 1049 1050 1051
  // ===========================================================================
  // Root set access. ==========================================================
  // ===========================================================================

1052 1053
  // Heap root getters.
#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
  ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR

  // Utility type maps.
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR

#define STRING_ACCESSOR(name, str) inline String* name();
  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR

#define SYMBOL_ACCESSOR(name) inline Symbol* name();
  PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR

1070
#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
1071
  PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
1072
  WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
1073 1074 1075
#undef SYMBOL_ACCESSOR

  Object* root(RootListIndex index) { return roots_[index]; }
1076 1077 1078
  Handle<Object> root_handle(RootListIndex index) {
    return Handle<Object>(&roots_[index]);
  }
1079 1080 1081 1082 1083 1084 1085 1086
  template <typename T>
  bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
    Object** const handle_location = bit_cast<Object**>(handle.address());
    if (handle_location >= &roots_[kRootListLength]) return false;
    if (handle_location < &roots_[0]) return false;
    *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
    return true;
  }
1087 1088 1089 1090 1091

  // Generated code can embed this address to get access to the roots.
  Object** roots_array_start() { return roots_; }

  // Sets the stub_cache_ (only used when expanding the dictionary).
1092
  void SetRootCodeStubs(UnseededNumberDictionary* value);
1093

1094 1095
  void SetRootMaterializedObjects(FixedArray* objects) {
    roots_[kMaterializedObjectsRootIndex] = objects;
1096 1097
  }

1098 1099 1100 1101 1102 1103
  void SetRootScriptList(Object* value) {
    roots_[kScriptListRootIndex] = value;
  }

  void SetRootStringTable(StringTable* value) {
    roots_[kStringTableRootIndex] = value;
1104 1105
  }

1106 1107 1108 1109
  void SetRootNoScriptSharedFunctionInfos(Object* value) {
    roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
  }

1110 1111 1112 1113
  void SetMessageListeners(TemplateList* value) {
    roots_[kMessageListenersRootIndex] = value;
  }

1114 1115 1116 1117 1118
  // Set the stack limit in the roots_ array.  Some architectures generate
  // code that looks here, because it is faster than loading from the static
  // jslimit_/real_jslimit_ variable in the StackGuard.
  void SetStackLimits();

1119 1120 1121 1122
  // The stack limit is thread-dependent. To be able to reproduce the same
  // snapshot blob, we need to reset it before serializing.
  void ClearStackLimits();

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
  // Generated code can treat direct references to this root as constant.
  bool RootCanBeTreatedAsConstant(RootListIndex root_index);

  Map* MapForFixedTypedArray(ExternalArrayType array_type);
  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);

  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);

  void RegisterStrongRoots(Object** start, Object** end);
  void UnregisterStrongRoots(Object** start);

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
  // ===========================================================================
  // Inline allocation. ========================================================
  // ===========================================================================

  // Indicates whether inline bump-pointer allocation has been disabled.
  bool inline_allocation_disabled() { return inline_allocation_disabled_; }

  // Switch whether inline bump-pointer allocation should be used.
  void EnableInlineAllocation();
  void DisableInlineAllocation();

  // ===========================================================================
  // Methods triggering GCs. ===================================================
  // ===========================================================================

1150
  // Performs garbage collection operation.
1151 1152 1153
  // Returns whether there is a chance that another major GC could
  // collect more garbage.
  inline bool CollectGarbage(
1154
      AllocationSpace space, GarbageCollectionReason gc_reason,
1155
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1156

1157 1158 1159
  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
  // non-zero, then the slower precise sweeper is used, which leaves the heap
  // in a state where we can iterate over the heap visiting all objects.
1160
  void CollectAllGarbage(
1161
      int flags, GarbageCollectionReason gc_reason,
1162 1163 1164
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);

  // Last hope GC, should try to squeeze as much as possible.
1165
  void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
1166

1167 1168
  // Reports and external memory pressure event, either performs a major GC or
  // completes incremental marking in order to free external resources.
1169
  void ReportExternalMemoryPressure();
1170

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
  // Invoked when GC was requested via the stack guard.
  void HandleGCRequest();

  // ===========================================================================
  // Iterators. ================================================================
  // ===========================================================================

  // Iterates over all roots in the heap.
  void IterateRoots(ObjectVisitor* v, VisitMode mode);
  // Iterates over all strong roots in the heap.
  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
  // Iterates over entries in the smi roots list.  Only interesting to the
  // serializer/deserializer, since GC does not care about smis.
  void IterateSmiRoots(ObjectVisitor* v);
  // Iterates over all the other roots in the heap.
  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);

hpayer's avatar
hpayer committed
1188
  // Iterate pointers of promoted objects.
1189 1190
  void IterateAndScavengePromotedObject(HeapObject* target, int size,
                                        bool was_marked_black);
1191

1192 1193 1194 1195
  // ===========================================================================
  // Store buffer API. =========================================================
  // ===========================================================================

1196 1197
  // Write barrier support for object[offset] = o;
  inline void RecordWrite(Object* object, int offset, Object* o);
1198 1199
  inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
  void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
1200
  void RecordWritesIntoCode(Code* code);
1201 1202
  inline void RecordFixedArrayElements(FixedArray* array, int offset,
                                       int length);
1203

1204
  inline Address* store_buffer_top_address();
1205

1206
  void ClearRecordedSlot(HeapObject* object, Object** slot);
1207
  void ClearRecordedSlotRange(Address start, Address end);
1208

1209 1210
  bool HasRecordedSlot(HeapObject* object, Object** slot);

1211 1212 1213 1214 1215 1216
  // ===========================================================================
  // Incremental marking API. ==================================================
  // ===========================================================================

  // Start incremental marking and ensure that idle time handler can perform
  // incremental steps.
1217
  void StartIdleIncrementalMarking(GarbageCollectionReason gc_reason);
1218 1219 1220

  // Starts incremental marking assuming incremental marking is currently
  // stopped.
1221 1222 1223
  void StartIncrementalMarking(
      int gc_flags, GarbageCollectionReason gc_reason,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1224

1225 1226 1227
  void StartIncrementalMarkingIfAllocationLimitIsReached(
      int gc_flags,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1228

1229
  void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
1230

1231 1232
  bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,
                                         GarbageCollectionReason gc_reason);
1233

hpayer's avatar
hpayer committed
1234 1235
  void RegisterReservationsForBlackAllocation(Reservation* reservations);

1236
  IncrementalMarking* incremental_marking() { return incremental_marking_; }
1237

1238 1239 1240 1241 1242 1243
  // ===========================================================================
  // Concurrent marking API. ===================================================
  // ===========================================================================

  ConcurrentMarking* concurrent_marking() { return concurrent_marking_; }

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
  // The runtime uses this function to notify potentially unsafe object layout
  // changes that require special synchronization with the concurrent marker.
  // A layout change is unsafe if
  // - it removes a tagged in-object field.
  // - it replaces a tagged in-objects field with an untagged in-object field.
  void NotifyObjectLayoutChange(HeapObject* object,
                                const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP
  // This function checks that either
  // - the map transition is safe,
  // - or it was communicated to GC using NotifyObjectLayoutChange.
  void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
#endif

1258 1259 1260 1261
  // ===========================================================================
  // Embedder heap tracer support. =============================================
  // ===========================================================================

1262 1263 1264
  LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
    return local_embedder_heap_tracer_;
  }
1265 1266 1267 1268
  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
  void TracePossibleWrapper(JSObject* js_object);
  void RegisterExternallyReferencedObject(Object** object);

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
  // ===========================================================================
  // External string table API. ================================================
  // ===========================================================================

  // Registers an external string.
  inline void RegisterExternalString(String* string);

  // Finalizes an external string by deleting the associated external
  // data and clearing the resource pointer.
  inline void FinalizeExternalString(String* string);

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
  // ===========================================================================
  // Methods checking/returning the space of a given object/address. ===========
  // ===========================================================================

  // Returns whether the object resides in new space.
  inline bool InNewSpace(Object* object);
  inline bool InFromSpace(Object* object);
  inline bool InToSpace(Object* object);

  // Returns whether the object resides in old space.
  inline bool InOldSpace(Object* object);

  // Checks whether an address/object in the heap (including auxiliary
  // area and unused area).
  bool Contains(HeapObject* value);

  // Checks whether an address/object in a space.
  // Currently used by tests, serialization and heap verification only.
  bool InSpace(HeapObject* value, AllocationSpace space);

1300 1301 1302 1303 1304 1305 1306
  // Slow methods that can be used for verification as they can also be used
  // with off-heap Addresses.
  bool ContainsSlow(Address addr);
  bool InSpaceSlow(Address addr, AllocationSpace space);
  inline bool InNewSpaceSlow(Address address);
  inline bool InOldSpaceSlow(Address address);

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
  // ===========================================================================
  // Object statistics tracking. ===============================================
  // ===========================================================================

  // Returns the number of buckets used by object statistics tracking during a
  // major GC. Note that the following methods fail gracefully when the bounds
  // are exceeded though.
  size_t NumberOfTrackedHeapObjectTypes();

  // Returns object statistics about count and size at the last major GC.
  // Objects are being grouped into buckets that roughly resemble existing
  // instance types.
  size_t ObjectCountAtLastGC(size_t index);
  size_t ObjectSizeAtLastGC(size_t index);

  // Retrieves names of buckets used by object statistics tracking.
  bool GetObjectTypeName(size_t index, const char** object_type,
                         const char** object_sub_type);

1326 1327 1328 1329 1330 1331 1332
  // ===========================================================================
  // Code statistics. ==========================================================
  // ===========================================================================

  // Collect code (Code and BytecodeArray objects) statistics.
  void CollectCodeStatistics();

1333 1334 1335 1336
  // ===========================================================================
  // GC statistics. ============================================================
  // ===========================================================================

1337
  // Returns the maximum amount of memory reserved for the heap.
1338
  size_t MaxReserved() {
1339
    return 2 * max_semi_space_size_ + max_old_generation_size_;
1340
  }
1341 1342 1343 1344
  size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
  size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
  size_t MaxOldGenerationSize() { return max_old_generation_size_; }
  size_t MaxExecutableSize() { return max_executable_size_; }
1345 1346 1347

  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
  // more spaces are needed until it reaches the limit.
1348
  size_t Capacity();
1349

1350
  // Returns the capacity of the old generation.
1351
  size_t OldGenerationCapacity();
1352

1353
  // Returns the amount of memory currently committed for the heap.
1354
  size_t CommittedMemory();
1355

1356
  // Returns the amount of memory currently committed for the old space.
1357
  size_t CommittedOldGenerationMemory();
1358

1359
  // Returns the amount of executable memory currently committed for the heap.
1360
  size_t CommittedMemoryExecutable();
1361

1362 1363
  // Returns the amount of phyical memory currently committed for the heap.
  size_t CommittedPhysicalMemory();
1364

1365
  // Returns the maximum amount of memory ever committed for the heap.
1366
  size_t MaximumCommittedMemory() { return maximum_committed_; }
1367

1368 1369 1370
  // Updates the maximum committed memory for the heap. Should be called
  // whenever a space grows.
  void UpdateMaximumCommitted();
1371

1372 1373 1374
  // Returns the available bytes in space w/o growing.
  // Heap doesn't guarantee that it can allocate an object that requires
  // all available bytes. Check MaxHeapObjectSize() instead.
1375
  size_t Available();
1376

1377
  // Returns of size of all objects residing in the heap.
1378
  size_t SizeOfObjects();
1379

1380
  void UpdateSurvivalStatistics(int start_new_space_size);
1381

1382
  inline void IncrementPromotedObjectsSize(size_t object_size) {
1383
    promoted_objects_size_ += object_size;
1384
  }
1385
  inline size_t promoted_objects_size() { return promoted_objects_size_; }
1386

1387
  inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1388
    semi_space_copied_object_size_ += object_size;
1389
  }
1390
  inline size_t semi_space_copied_object_size() {
1391
    return semi_space_copied_object_size_;
1392
  }
1393

1394
  inline size_t SurvivedNewSpaceObjectSize() {
1395
    return promoted_objects_size_ + semi_space_copied_object_size_;
1396
  }
1397

1398
  inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1399

1400
  inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1401

1402
  inline void IncrementNodesPromoted() { nodes_promoted_++; }
1403

1404
  inline void IncrementYoungSurvivorsCounter(size_t survived) {
1405 1406
    survived_last_scavenge_ = survived;
    survived_since_last_expansion_ += survived;
1407
  }
1408

1409 1410
  inline uint64_t PromotedTotalSize() {
    return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1411 1412
  }

mlippautz's avatar
mlippautz committed
1413
  inline void UpdateNewSpaceAllocationCounter();
1414

mlippautz's avatar
mlippautz committed
1415
  inline size_t NewSpaceAllocationCounter();
1416

1417 1418 1419 1420
  // This should be used only for testing.
  void set_new_space_allocation_counter(size_t new_value) {
    new_space_allocation_counter_ = new_value;
  }
1421

1422
  void UpdateOldGenerationAllocationCounter() {
1423 1424
    old_generation_allocation_counter_at_last_gc_ =
        OldGenerationAllocationCounter();
1425
  }
1426

1427
  size_t OldGenerationAllocationCounter() {
1428 1429
    return old_generation_allocation_counter_at_last_gc_ +
           PromotedSinceLastGC();
1430
  }
1431

1432
  // This should be used only for testing.
1433 1434
  void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
    old_generation_allocation_counter_at_last_gc_ = new_value;
1435
  }
1436

1437 1438 1439
  size_t PromotedSinceLastGC() {
    return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
  }
1440

1441
  int gc_count() const { return gc_count_; }
1442

1443
  // Returns the size of objects residing in non new spaces.
1444
  size_t PromotedSpaceSizeOfObjects();
1445

1446 1447 1448
  // ===========================================================================
  // Prologue/epilogue callback methods.========================================
  // ===========================================================================
1449

1450 1451 1452
  void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
                             GCType gc_type_filter, bool pass_isolate = true);
  void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
1453

1454 1455 1456
  void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
                             GCType gc_type_filter, bool pass_isolate = true);
  void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
1457

1458 1459
  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1460

1461 1462 1463
  // ===========================================================================
  // Allocation methods. =======================================================
  // ===========================================================================
1464

1465 1466 1467
  // Creates a filler object and returns a heap object immediately after it.
  MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
                                                int filler_size);
1468

1469 1470 1471 1472 1473 1474 1475
  // Creates a filler object if needed for alignment and returns a heap object
  // immediately after it. If any space is left after the returned object,
  // another filler object is created so the over allocated memory is iterable.
  MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
                                              int object_size,
                                              int allocation_size,
                                              AllocationAlignment alignment);
1476

1477
  // ===========================================================================
1478
  // ArrayBuffer tracking. =====================================================
1479
  // ===========================================================================
1480

1481 1482 1483 1484
  // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
  // in the registration/unregistration APIs. Consider dropping the "New" from
  // "RegisterNewArrayBuffer" because one can re-register a previously
  // unregistered buffer, too, and the name is confusing.
1485 1486 1487
  void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
  void UnregisterArrayBuffer(JSArrayBuffer* buffer);

1488 1489 1490 1491 1492 1493 1494 1495 1496
  // ===========================================================================
  // Allocation site tracking. =================================================
  // ===========================================================================

  // Updates the AllocationSite of a given {object}. If the global prenuring
  // storage is passed as {pretenuring_feedback} the memento found count on
  // the corresponding allocation site is immediately updated and an entry
  // in the hash map is created. Otherwise the entry (including a the count
  // value) is cached on the local pretenuring feedback.
1497
  template <UpdateAllocationSiteMode mode>
1498
  inline void UpdateAllocationSite(HeapObject* object,
lpy's avatar
lpy committed
1499
                                   base::HashMap* pretenuring_feedback);
1500 1501 1502 1503 1504 1505 1506 1507

  // Removes an entry from the global pretenuring storage.
  inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);

  // Merges local pretenuring feedback into the global one. Note that this
  // method needs to be called after evacuation, as allocation sites may be
  // evacuated and this method resolves forward pointers accordingly.
  void MergeAllocationSitePretenuringFeedback(
lpy's avatar
lpy committed
1508
      const base::HashMap& local_pretenuring_feedback);
1509

1510
// =============================================================================
1511

1512 1513 1514 1515
#ifdef VERIFY_HEAP
  // Verify the heap is in its normal state before or after a GC.
  void Verify();
#endif
1516

1517 1518
#ifdef DEBUG
  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1519

1520 1521
  void Print();
  void PrintHandles();
1522

1523 1524 1525 1526
  // Report heap statistics.
  void ReportHeapStatistics(const char* title);
  void ReportCodeStatistics(const char* title);
#endif
ulan@chromium.org's avatar
ulan@chromium.org committed
1527

1528 1529 1530
  static const char* GarbageCollectionReasonToString(
      GarbageCollectionReason gc_reason);

1531
 private:
1532
  class SkipStoreBufferScope;
1533
  class PretenuringScope;
1534

1535 1536 1537 1538 1539 1540 1541 1542
  // External strings table is a place where all external strings are
  // registered.  We need to keep track of such strings to properly
  // finalize them.
  class ExternalStringTable {
   public:
    // Registers an external string.
    inline void AddString(String* string);

1543 1544
    inline void IterateAll(ObjectVisitor* v);
    inline void IterateNewSpaceStrings(ObjectVisitor* v);
1545
    inline void PromoteAllNewSpaceStrings();
1546

1547 1548 1549 1550
    // Restores internal invariant and gets rid of collected strings. Must be
    // called after each Iterate*() that modified the strings.
    void CleanUpAll();
    void CleanUpNewSpaceStrings();
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576

    // Destroys all allocated memory.
    void TearDown();

   private:
    explicit ExternalStringTable(Heap* heap) : heap_(heap) {}

    inline void Verify();

    inline void AddOldString(String* string);

    // Notifies the table that only a prefix of the new list is valid.
    inline void ShrinkNewStrings(int position);

    // To speed up scavenge collections new space string are kept
    // separate from old space strings.
    List<Object*> new_space_strings_;
    List<Object*> old_space_strings_;

    Heap* heap_;

    friend class Heap;

    DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
  };

1577
  struct StrongRootsList;
1578

1579 1580 1581 1582 1583 1584
  struct StringTypeTable {
    InstanceType type;
    int size;
    RootListIndex index;
  };

1585
  struct ConstantStringTable {
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
    const char* contents;
    RootListIndex index;
  };

  struct StructTable {
    InstanceType type;
    int size;
    RootListIndex index;
  };

1596 1597 1598 1599 1600 1601 1602
  struct GCCallbackPair {
    GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
                   bool pass_isolate)
        : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}

    bool operator==(const GCCallbackPair& other) const {
      return other.callback == callback;
1603
    }
1604 1605

    v8::Isolate::GCCallback callback;
1606
    GCType gc_type;
1607
    bool pass_isolate;
1608
  };
1609

1610 1611 1612
  typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
                                                        Object** pointer);

1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
  static const int kInitialStringTableSize = 2048;
  static const int kInitialEvalCacheSize = 64;
  static const int kInitialNumberStringCacheSize = 256;

  static const int kRememberedUnmappedPages = 128;

  static const StringTypeTable string_type_table[];
  static const ConstantStringTable constant_string_table[];
  static const StructTable struct_table[];

  static const int kYoungSurvivalRateHighThreshold = 90;
  static const int kYoungSurvivalRateAllowedDeviation = 15;
  static const int kOldSurvivalRateLowThreshold = 10;

  static const int kMaxMarkCompactsInIdleRound = 7;
  static const int kIdleScavengeThreshold = 5;

1630
  static const int kInitialFeedbackCapacity = 256;
1631 1632 1633 1634 1635 1636

  Heap();

  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
      Heap* heap, Object** pointer);

1637 1638
  // Selects the proper allocation space based on the pretenuring decision.
  static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1639 1640 1641
    return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
  }

1642 1643 1644 1645 1646
#define ROOT_ACCESSOR(type, name, camel_name) \
  inline void set_##name(type* value);
  ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR

1647
  StoreBuffer* store_buffer() { return store_buffer_; }
1648

1649
  void set_current_gc_flags(int flags) {
1650 1651 1652 1653 1654 1655
    current_gc_flags_ = flags;
    DCHECK(!ShouldFinalizeIncrementalMarking() ||
           !ShouldAbortIncrementalMarking());
  }

  inline bool ShouldReduceMemory() const {
1656
    return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1657 1658 1659
  }

  inline bool ShouldAbortIncrementalMarking() const {
1660
    return (current_gc_flags_ & kAbortIncrementalMarkingMask) != 0;
1661 1662 1663
  }

  inline bool ShouldFinalizeIncrementalMarking() const {
1664
    return (current_gc_flags_ & kFinalizeIncrementalMarkingMask) != 0;
1665 1666
  }

1667 1668
  void PreprocessStackTraces();

1669
  // Checks whether a global GC is necessary
1670 1671
  GarbageCollector SelectGarbageCollector(AllocationSpace space,
                                          const char** reason);
1672

1673 1674 1675 1676 1677
  // Make sure there is a filler value behind the top of the new space
  // so that the GC does not confuse some unintialized/stale memory
  // with the allocation memento of the object at the top
  void EnsureFillerObjectAtTop();

1678
  // Ensure that we have swept all spaces in such a way that we can iterate
1679
  // over all objects.  May cause a GC.
1680 1681
  void MakeHeapIterable();

1682 1683 1684
  // Performs garbage collection operation.
  // Returns whether there is a chance that another major GC could
  // collect more garbage.
1685
  bool CollectGarbage(
1686
      GarbageCollector collector, GarbageCollectionReason gc_reason,
1687 1688
      const char* collector_reason,
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1689 1690 1691 1692

  // Performs garbage collection
  // Returns whether there is a chance another major GC could
  // collect more garbage.
1693 1694 1695
  bool PerformGarbageCollection(
      GarbageCollector collector,
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1696 1697 1698 1699 1700 1701

  inline void UpdateOldSpaceLimits();

  // Initializes a JSObject based on its map.
  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
                                 Map* map);
1702 1703 1704 1705

  // Initializes JSObject body starting at given offset.
  void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
  void InitializeAllocationMemento(AllocationMemento* memento,
                                   AllocationSite* allocation_site);

  bool CreateInitialMaps();
  void CreateInitialObjects();

  // These five Create*EntryStub functions are here and forced to not be inlined
  // because of a gcc-4.4 bug that assigns wrong vtable entries.
  NO_INLINE(void CreateJSEntryStub());
  NO_INLINE(void CreateJSConstructEntryStub());

  void CreateFixedStubs();

  HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);

  // Commits from space if it is uncommitted.
  void EnsureFromSpaceIsCommitted();

  // Uncommit unused semi space.
mlippautz's avatar
mlippautz committed
1725
  bool UncommitFromSpace();
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747

  // Fill in bogus values in from space
  void ZapFromSpace();

  // Deopts all code that contains allocation instruction which are tenured or
  // not tenured. Moreover it clears the pretenuring allocation site statistics.
  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);

  // Evaluates local pretenuring for the old space and calls
  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
  // the old space.
  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);

  // Record statistics before and after garbage collection.
  void ReportStatisticsBeforeGC();
  void ReportStatisticsAfterGC();

  // Creates and installs the full-sized number string cache.
  int FullSizeNumberStringCacheLength();
  // Flush the number to string cache.
  void FlushNumberStringCache();

1748 1749
  void ConfigureInitialOldGenerationSize();

1750 1751 1752 1753 1754 1755 1756
  bool HasLowYoungGenerationAllocationRate();
  bool HasLowOldGenerationAllocationRate();
  double YoungGenerationMutatorUtilization();
  double OldGenerationMutatorUtilization();

  void ReduceNewSpaceSize();

1757
  GCIdleTimeHeapState ComputeHeapState();
1758 1759

  bool PerformIdleTimeAction(GCIdleTimeAction action,
1760
                             GCIdleTimeHeapState heap_state,
1761 1762 1763
                             double deadline_in_ms);

  void IdleNotificationEpilogue(GCIdleTimeAction action,
1764 1765
                                GCIdleTimeHeapState heap_state, double start_ms,
                                double deadline_in_ms);
1766 1767 1768

  inline void UpdateAllocationsHash(HeapObject* object);
  inline void UpdateAllocationsHash(uint32_t value);
1769
  void PrintAlloctionsHash();
1770 1771 1772 1773

  void AddToRingBuffer(const char* string);
  void GetFromRingBuffer(char* buffer);

1774 1775
  void CompactRetainedMaps(ArrayList* retained_maps);

1776
  void CollectGarbageOnMemoryPressure();
1777

1778 1779
  void InvokeOutOfMemoryCallback();

1780 1781
  void ComputeFastPromotionMode(double survival_rate);

1782 1783 1784 1785
  // Attempt to over-approximate the weak closure by marking object groups and
  // implicit references from global handles, but don't atomically complete
  // marking. If we continue to mark incrementally, we might have marked
  // objects that die later.
1786
  void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
1787

1788 1789 1790 1791 1792 1793 1794 1795
  // Returns the timer used for a given GC type.
  // - GCScavenger: young generation GC
  // - GCCompactor: full GC
  // - GCFinalzeMC: finalization of incremental full GC
  // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
  // memory reduction
  HistogramTimer* GCTypeTimer(GarbageCollector collector);

1796 1797 1798 1799 1800 1801 1802 1803 1804
  // ===========================================================================
  // Pretenuring. ==============================================================
  // ===========================================================================

  // Pretenuring decisions are made based on feedback collected during new space
  // evacuation. Note that between feedback collection and calling this method
  // object in old space must not move.
  void ProcessPretenuringFeedback();

1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
  // ===========================================================================
  // Actual GC. ================================================================
  // ===========================================================================

  // Code that should be run before and after each GC.  Includes some
  // reporting/verification activities when compiled with DEBUG set.
  void GarbageCollectionPrologue();
  void GarbageCollectionEpilogue();

  // Performs a major collection in the whole heap.
  void MarkCompact();
1816 1817
  // Performs a minor collection of just the young generation.
  void MinorMarkCompact();
1818 1819 1820 1821 1822 1823 1824

  // Code to be run before and after mark-compact.
  void MarkCompactPrologue();
  void MarkCompactEpilogue();

  // Performs a minor collection in new generation.
  void Scavenge();
1825
  void EvacuateYoungGeneration();
1826

1827
  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838

  void UpdateNewSpaceReferencesInExternalStringTable(
      ExternalStringTableUpdaterCallback updater_func);

  void UpdateReferencesInExternalStringTable(
      ExternalStringTableUpdaterCallback updater_func);

  void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
  void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
  void ProcessNativeContexts(WeakObjectRetainer* retainer);
  void ProcessAllocationSites(WeakObjectRetainer* retainer);
1839
  void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1840 1841 1842 1843 1844

  // ===========================================================================
  // GC statistics. ============================================================
  // ===========================================================================

1845 1846 1847 1848 1849
  inline size_t OldGenerationSpaceAvailable() {
    if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
    return old_generation_allocation_limit_ -
           static_cast<size_t>(PromotedTotalSize());
  }
1850

1851 1852 1853 1854
  // We allow incremental marking to overshoot the allocation limit for
  // performace reasons. If the overshoot is too large then we are more
  // eager to finalize incremental marking.
  inline bool AllocationLimitOvershotByLargeMargin() {
1855 1856 1857
    // This guards against too eager finalization in small heaps.
    // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
    size_t kMarginForSmallHeaps = 32u * MB;
1858 1859
    if (old_generation_allocation_limit_ >= PromotedTotalSize()) return false;
    uint64_t overshoot = PromotedTotalSize() - old_generation_allocation_limit_;
1860 1861
    // Overshoot margin is 50% of allocation limit or half-way to the max heap
    // with special handling of small heaps.
1862
    uint64_t margin =
1863
        Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1864 1865 1866 1867
            (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
    return overshoot >= margin;
  }

1868
  void UpdateTotalGCTime(double duration);
1869 1870 1871 1872 1873 1874 1875

  bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }

  // ===========================================================================
  // Growing strategy. =========================================================
  // ===========================================================================

1876 1877 1878 1879
  // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
  // This constant limits the effect of load RAIL mode on GC.
  // The value is arbitrary and chosen as the largest load time observed in
  // v8 browsing benchmarks.
1880
  static const int kMaxLoadTimeMs = 7000;
1881 1882 1883

  bool ShouldOptimizeForLoadTime();

1884 1885
  // Decrease the allocation limit if the new limit based on the given
  // parameters is lower than the current limit.
1886
  void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
1887 1888 1889 1890
                                          double mutator_speed);

  // Calculates the allocation limit based on a given growing factor and a
  // given old generation size.
1891 1892
  size_t CalculateOldGenerationAllocationLimit(double factor,
                                               size_t old_gen_size);
1893 1894

  // Sets the allocation limit to trigger the next full garbage collection.
1895
  void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
1896 1897
                                       double mutator_speed);

1898
  size_t MinimumAllocationLimitGrowingStep();
1899

1900
  size_t old_generation_allocation_limit() const {
1901 1902 1903 1904 1905
    return old_generation_allocation_limit_;
  }

  bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }

1906
  bool CanExpandOldGeneration(size_t size) {
1907 1908 1909 1910
    if (force_oom_) return false;
    return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
  }

1911 1912 1913 1914
  bool IsCloseToOutOfMemory(size_t slack) {
    return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
  }

1915
  bool ShouldExpandOldGenerationOnSlowAllocation();
1916 1917 1918 1919

  enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
  IncrementalMarkingLimit IncrementalMarkingLimitReached();

1920 1921 1922 1923 1924
  // ===========================================================================
  // Idle notification. ========================================================
  // ===========================================================================

  bool RecentIdleNotificationHappened();
ulan's avatar
ulan committed
1925
  void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1926

1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
  // ===========================================================================
  // HeapIterator helpers. =====================================================
  // ===========================================================================

  void heap_iterator_start() { heap_iterator_depth_++; }

  void heap_iterator_end() { heap_iterator_depth_--; }

  bool in_heap_iterator() { return heap_iterator_depth_ > 0; }

1937 1938 1939 1940
  // ===========================================================================
  // Allocation methods. =======================================================
  // ===========================================================================

1941 1942 1943 1944 1945 1946
  // Returns a deep copy of the JavaScript object.
  // Properties and elements are copied too.
  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
  MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
                                                AllocationSite* site = NULL);

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
  // Allocates a JS Map in the heap.
  MUST_USE_RESULT AllocationResult
  AllocateMap(InstanceType instance_type, int instance_size,
              ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);

  // Allocates and initializes a new JavaScript object based on a
  // constructor.
  // If allocation_site is non-null, then a memento is emitted after the object
  // that points to the site.
  MUST_USE_RESULT AllocationResult AllocateJSObject(
      JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
      AllocationSite* allocation_site = NULL);

  // Allocates and initializes a new JavaScript object based on a map.
  // Passing an allocation site means that a memento will be created that
  // points to the site.
  MUST_USE_RESULT AllocationResult
  AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
                          AllocationSite* allocation_site = NULL);

  // Allocates a HeapNumber from value.
1968 1969
  MUST_USE_RESULT AllocationResult AllocateHeapNumber(
      MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
1970 1971 1972 1973

  // Allocates a byte array of the specified length
  MUST_USE_RESULT AllocationResult
  AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
1974

1975 1976
  // Allocates a bytecode array with given contents.
  MUST_USE_RESULT AllocationResult
1977
  AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
1978
                        int parameter_count, FixedArray* constant_pool);
1979

1980
  MUST_USE_RESULT AllocationResult CopyCode(Code* code);
1981

1982 1983 1984
  MUST_USE_RESULT AllocationResult
  CopyBytecodeArray(BytecodeArray* bytecode_array);

1985 1986 1987
  // Allocates a fixed array initialized with undefined values
  MUST_USE_RESULT AllocationResult
  AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
1988

1989
  // Allocate an uninitialized object.  The memory is non-executable if the
1990 1991 1992
  // hardware and OS allow.  This is the single choke-point for allocations
  // performed by the runtime and should not be bypassed (to extend this to
  // inlined allocations, use the Heap::DisableInlineAllocation() support).
1993
  MUST_USE_RESULT inline AllocationResult AllocateRaw(
1994
      int size_in_bytes, AllocationSpace space,
1995
      AllocationAlignment aligment = kWordAligned);
1996

1997
  // Allocates a heap object based on the map.
1998 1999 2000
  MUST_USE_RESULT AllocationResult
      Allocate(Map* map, AllocationSpace space,
               AllocationSite* allocation_site = NULL);
2001 2002

  // Allocates a partial map for bootstrapping.
2003 2004
  MUST_USE_RESULT AllocationResult
      AllocatePartialMap(InstanceType instance_type, int instance_size);
2005 2006 2007

  // Allocate a block of memory in the given space (filled with a filler).
  // Used as a fall-back for generated code when the space is full.
2008 2009
  MUST_USE_RESULT AllocationResult
      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
2010

2011
  // Allocate an uninitialized fixed array.
2012 2013
  MUST_USE_RESULT AllocationResult
      AllocateRawFixedArray(int length, PretenureFlag pretenure);
2014 2015

  // Allocate an uninitialized fixed double array.
2016 2017
  MUST_USE_RESULT AllocationResult
      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
2018 2019

  // Allocate an initialized fixed array with the given filler value.
2020 2021 2022
  MUST_USE_RESULT AllocationResult
      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
                                   Object* filler);
2023

2024
  // Allocate and partially initializes a String.  There are two String
2025 2026 2027
  // encodings: one-byte and two-byte.  These functions allocate a string of
  // the given length and set its map and length fields.  The characters of
  // the string are uninitialized.
2028 2029 2030 2031
  MUST_USE_RESULT AllocationResult
      AllocateRawOneByteString(int length, PretenureFlag pretenure);
  MUST_USE_RESULT AllocationResult
      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
2032

2033 2034
  // Allocates an internalized string in old space based on the character
  // stream.
2035
  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
2036
      Vector<const char> str, int chars, uint32_t hash_field);
2037

2038
  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
2039
      Vector<const uint8_t> str, uint32_t hash_field);
2040

2041
  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
2042
      Vector<const uc16> str, uint32_t hash_field);
2043

2044 2045 2046
  template <bool is_one_byte, typename T>
  MUST_USE_RESULT AllocationResult
      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
2047

2048
  template <typename T>
2049
  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
2050 2051 2052
      T t, int chars, uint32_t hash_field);

  // Allocates an uninitialized fixed array. It must be filled by the caller.
2053
  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
2054

2055
  // Make a copy of src and return it.
2056
  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
2057

2058 2059
  // Make a copy of src, also grow the copy, and return the copy.
  MUST_USE_RESULT AllocationResult
2060
  CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
2061

2062 2063 2064 2065 2066
  // Make a copy of src, also grow the copy, and return the copy.
  MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
                                                      int new_len,
                                                      PretenureFlag pretenure);

2067
  // Make a copy of src, set the map, and return the copy.
2068 2069
  MUST_USE_RESULT AllocationResult
      CopyFixedArrayWithMap(FixedArray* src, Map* map);
2070

2071
  // Make a copy of src and return it.
2072
  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
2073 2074
      FixedDoubleArray* src);

2075
  // Computes a single character string where the character has code.
2076
  // A cache is used for one-byte (Latin1) codes.
2077 2078
  MUST_USE_RESULT AllocationResult
      LookupSingleCharacterStringFromCode(uint16_t code);
2079 2080

  // Allocate a symbol in old space.
2081
  MUST_USE_RESULT AllocationResult AllocateSymbol();
2082 2083

  // Allocates an external array of the specified length and type.
2084 2085 2086
  MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
      int length, ExternalArrayType array_type, void* external_pointer,
      PretenureFlag pretenure);
2087 2088

  // Allocates a fixed typed array of the specified length and type.
2089
  MUST_USE_RESULT AllocationResult
2090 2091
  AllocateFixedTypedArray(int length, ExternalArrayType array_type,
                          bool initialize, PretenureFlag pretenure);
2092 2093

  // Make a copy of src and return it.
2094
  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
2095 2096

  // Make a copy of src, set the map, and return the copy.
2097 2098
  MUST_USE_RESULT AllocationResult
      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
2099 2100

  // Allocates a fixed double array with uninitialized values. Returns
2101
  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
2102
      int length, PretenureFlag pretenure = NOT_TENURED);
2103

2104
  // Allocate empty fixed array.
2105
  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
2106

2107 2108 2109
  // Allocate empty scope info.
  MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();

2110
  // Allocate empty fixed typed array of given type.
2111 2112
  MUST_USE_RESULT AllocationResult
      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
2113

2114
  // Allocate a tenured simple cell.
2115
  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
2116 2117

  // Allocate a tenured JS global property cell initialized with the hole.
2118
  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
2119

ulan@chromium.org's avatar
ulan@chromium.org committed
2120 2121
  MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);

2122 2123
  MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);

2124
  // Allocates a new utility object in the old generation.
2125
  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
2126 2127

  // Allocates a new foreign object.
2128 2129
  MUST_USE_RESULT AllocationResult
      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
2130

2131 2132
  MUST_USE_RESULT AllocationResult
      AllocateCode(int object_size, bool immovable);
2133

2134 2135
  // ===========================================================================

2136 2137
  void set_force_oom(bool value) { force_oom_ = value; }

2138 2139 2140 2141 2142
  // The amount of external memory registered through the API.
  int64_t external_memory_;

  // The limit when to trigger memory pressure from the API.
  int64_t external_memory_limit_;
2143

2144 2145
  // Caches the amount of external memory registered at the last MC.
  int64_t external_memory_at_last_mark_compact_;
2146

2147 2148 2149
  // The amount of memory that has been freed concurrently.
  base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;

2150 2151 2152
  // This can be calculated directly from a pointer to the heap; however, it is
  // more expedient to get at the isolate directly from within Heap methods.
  Isolate* isolate_;
2153

2154
  Object* roots_[kRootListLength];
2155

2156
  size_t code_range_size_;
2157 2158 2159
  size_t max_semi_space_size_;
  size_t initial_semispace_size_;
  size_t max_old_generation_size_;
2160
  size_t initial_max_old_generation_size_;
2161
  size_t initial_old_generation_size_;
2162
  bool old_generation_size_configured_;
2163
  size_t max_executable_size_;
2164
  size_t maximum_committed_;
2165

2166 2167
  // For keeping track of how much data has survived
  // scavenge since last new space expansion.
2168
  size_t survived_since_last_expansion_;
2169

2170
  // ... and since the last scavenge.
2171
  size_t survived_last_scavenge_;
2172

2173 2174
  // This is not the depth of nested AlwaysAllocateScope's but rather a single
  // count, as scopes can be acquired from multiple tasks (read: threads).
lpy's avatar
lpy committed
2175
  base::AtomicNumber<size_t> always_allocate_scope_count_;
2176

2177 2178
  // Stores the memory pressure level that set by MemoryPressureNotification
  // and reset by a mark-compact garbage collection.
lpy's avatar
lpy committed
2179
  base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
2180

2181 2182 2183
  v8::debug::OutOfMemoryCallback out_of_memory_callback_;
  void* out_of_memory_callback_data_;

2184 2185
  // For keeping track of context disposals.
  int contexts_disposed_;
2186

2187 2188 2189 2190 2191
  // The length of the retained_maps array at the time of context disposal.
  // This separates maps in the retained_maps array that were created before
  // and after context disposal.
  int number_of_disposed_maps_;

2192
  int global_ic_age_;
2193

2194
  NewSpace* new_space_;
2195 2196 2197 2198
  OldSpace* old_space_;
  OldSpace* code_space_;
  MapSpace* map_space_;
  LargeObjectSpace* lo_space_;
2199 2200
  // Map from the space id to the space.
  Space* space_[LAST_SPACE + 1];
2201 2202 2203
  HeapState gc_state_;
  int gc_post_processing_depth_;
  Address new_space_top_after_last_gc_;
2204

2205
  // Returns the amount of external memory registered since last global gc.
2206
  uint64_t PromotedExternalMemorySize();
2207

2208 2209
  // How many "runtime allocations" happened.
  uint32_t allocations_count_;
2210

2211 2212
  // Running hash over allocations performed.
  uint32_t raw_allocations_hash_;
2213

2214 2215
  // How many mark-sweep collections happened.
  unsigned int ms_count_;
2216

2217 2218
  // How many gc happened.
  unsigned int gc_count_;
2219

2220 2221 2222
  // For post mortem debugging.
  int remembered_unmapped_pages_index_;
  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2223

2224 2225 2226 2227 2228 2229
#ifdef DEBUG
  // If the --gc-interval flag is set to a positive value, this
  // variable holds the value indicating the number of allocations
  // remain until the next failure and garbage collection.
  int allocation_timeout_;
#endif  // DEBUG
2230

2231 2232 2233 2234
  // Limit that triggers a global GC on the next (normally caused) GC.  This
  // is checked when we have already decided to do a GC to help determine
  // which collector to invoke, before expanding a paged space in the old
  // generation and on every allocation in large object space.
2235
  size_t old_generation_allocation_limit_;
2236

2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
  // Indicates that inline bump-pointer allocation has been globally disabled
  // for all spaces. This is used to disable allocations in generated code.
  bool inline_allocation_disabled_;

  // Weak list heads, threaded through the objects.
  // List heads are initialized lazily and contain the undefined_value at start.
  Object* native_contexts_list_;
  Object* allocation_sites_list_;

  // List of encountered weak collections (JSWeakMap and JSWeakSet) during
  // marking. It is initialized during marking, destroyed after marking and
  // contains Smi(0) while marking is not active.
  Object* encountered_weak_collections_;

  Object* encountered_weak_cells_;

2253 2254
  Object* encountered_transition_arrays_;

2255 2256 2257 2258 2259 2260
  List<GCCallbackPair> gc_epilogue_callbacks_;
  List<GCCallbackPair> gc_prologue_callbacks_;

  int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];

  GCTracer* tracer_;
2261

2262
  size_t promoted_objects_size_;
2263
  double promotion_ratio_;
2264
  double promotion_rate_;
2265 2266
  size_t semi_space_copied_object_size_;
  size_t previous_semi_space_copied_object_size_;
2267
  double semi_space_copied_rate_;
2268 2269 2270
  int nodes_died_in_new_space_;
  int nodes_copied_in_new_space_;
  int nodes_promoted_;
2271

2272 2273 2274 2275 2276 2277
  // This is the pretenuring trigger for allocation sites that are in maybe
  // tenure state. When we switched to the maximum new space size we deoptimize
  // the code that belongs to the allocation site and derive the lifetime
  // of the allocation site.
  unsigned int maximum_size_scavenges_;

2278
  // Total time spent in GC.
2279
  double total_gc_time_ms_;
2280

2281
  // Last time an idle notification happened.
2282 2283
  double last_idle_notification_time_;

2284 2285 2286
  // Last time a garbage collection happened.
  double last_gc_time_;

2287 2288
  Scavenger* scavenge_collector_;

2289
  MarkCompactCollector* mark_compact_collector_;
2290
  MinorMarkCompactCollector* minor_mark_compact_collector_;
2291

2292 2293
  MemoryAllocator* memory_allocator_;

2294
  StoreBuffer* store_buffer_;
2295

2296
  IncrementalMarking* incremental_marking_;
2297
  ConcurrentMarking* concurrent_marking_;
2298

2299
  GCIdleTimeHandler* gc_idle_time_handler_;
2300

2301
  MemoryReducer* memory_reducer_;
2302

2303 2304
  ObjectStats* live_object_stats_;
  ObjectStats* dead_object_stats_;
2305

ulan's avatar
ulan committed
2306 2307
  ScavengeJob* scavenge_job_;

2308
  AllocationObserver* idle_scavenge_observer_;
2309

2310 2311 2312 2313 2314
  // This counter is increased before each GC and never reset.
  // To account for the bytes allocated since the last GC, use the
  // NewSpaceAllocationCounter() function.
  size_t new_space_allocation_counter_;

2315 2316 2317
  // This counter is increased before each GC and never reset. To
  // account for the bytes allocated since the last GC, use the
  // OldGenerationAllocationCounter() function.
2318
  size_t old_generation_allocation_counter_at_last_gc_;
2319 2320 2321 2322

  // The size of objects in old generation after the last MarkCompact GC.
  size_t old_generation_size_at_last_gc_;

2323 2324 2325 2326 2327
  // If the --deopt_every_n_garbage_collections flag is set to a positive value,
  // this variable holds the number of garbage collections since the last
  // deoptimization triggered by garbage collection.
  int gcs_since_last_deopt_;

2328 2329 2330 2331 2332
  // The feedback storage is used to store allocation sites (keys) and how often
  // they have been visited (values) by finding a memento behind an object. The
  // storage is only alive temporary during a GC. The invariant is that all
  // pointers in this map are already fixed, i.e., they do not point to
  // forwarding pointers.
lpy's avatar
lpy committed
2333
  base::HashMap* global_pretenuring_feedback_;
2334

2335 2336 2337 2338 2339 2340 2341
  char trace_ring_buffer_[kTraceRingBufferSize];
  // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
  // full then the data is from ring_buffer_end_ to the end of the buffer and
  // from 0 to ring_buffer_end_.
  bool ring_buffer_full_;
  size_t ring_buffer_end_;

2342 2343 2344 2345
  // Shared state read by the scavenge collector and set by ScavengeObject.
  PromotionQueue promotion_queue_;

  // Flag is set when the heap has been configured.  The heap can be repeatedly
2346
  // configured through the API until it is set up.
2347 2348
  bool configured_;

2349
  // Currently set GC flags that are respected by all GC components.
2350
  int current_gc_flags_;
2351

2352 2353 2354 2355
  // Currently set GC callback flags that are used to pass information between
  // the embedder and V8's GC.
  GCCallbackFlags current_gc_callback_flags_;

2356 2357
  ExternalStringTable external_string_table_;

2358
  base::Mutex relocation_mutex_;
2359

2360 2361
  int gc_callbacks_depth_;

2362 2363
  bool deserialization_complete_;

2364 2365
  StrongRootsList* strong_roots_list_;

2366 2367 2368
  // The depth of HeapIterator nestings.
  int heap_iterator_depth_;

2369
  LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
2370

2371 2372
  bool fast_promotion_mode_;

2373 2374
  // Used for testing purposes.
  bool force_oom_;
2375
  bool delay_sweeper_tasks_for_testing_;
2376

2377 2378
  HeapObject* pending_layout_change_object_;

2379
  // Classes in "heap" can be friends.
2380
  friend class AlwaysAllocateScope;
2381
  friend class ConcurrentMarking;
2382
  friend class GCCallbacksScope;
2383
  friend class GCTracer;
2384
  friend class HeapIterator;
2385
  friend class IdleScavengeObserver;
2386
  friend class IncrementalMarking;
2387 2388
  friend class IncrementalMarkingJob;
  friend class LargeObjectSpace;
2389
  friend class MarkCompactCollector;
2390
  friend class MinorMarkCompactCollector;
2391
  friend class MarkCompactMarkingVisitor;
ulan's avatar
ulan committed
2392
  friend class NewSpace;
2393
  friend class ObjectStatsCollector;
2394
  friend class Page;
2395
  friend class PagedSpace;
2396
  friend class Scavenger;
2397
  friend class StoreBuffer;
2398
  friend class TestMemoryAllocatorScope;
2399

2400 2401 2402 2403 2404 2405
  // The allocator interface.
  friend class Factory;

  // The Isolate constructs us.
  friend class Isolate;

2406 2407 2408
  // Used in cctest.
  friend class HeapTester;

2409
  DISALLOW_COPY_AND_ASSIGN(Heap);
2410 2411 2412
};


ager@chromium.org's avatar
ager@chromium.org committed
2413 2414
class HeapStats {
 public:
2415 2416 2417
  static const int kStartMarker = 0xDECADE00;
  static const int kEndMarker = 0xDECADE01;

2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
  intptr_t* start_marker;                  //  0
  size_t* new_space_size;                  //  1
  size_t* new_space_capacity;              //  2
  size_t* old_space_size;                  //  3
  size_t* old_space_capacity;              //  4
  size_t* code_space_size;                 //  5
  size_t* code_space_capacity;             //  6
  size_t* map_space_size;                  //  7
  size_t* map_space_capacity;              //  8
  size_t* lo_space_size;                   //  9
  size_t* global_handle_count;             // 10
  size_t* weak_global_handle_count;        // 11
  size_t* pending_global_handle_count;     // 12
  size_t* near_death_global_handle_count;  // 13
  size_t* free_global_handle_count;        // 14
  size_t* memory_allocator_size;           // 15
  size_t* memory_allocator_capacity;       // 16
  size_t* malloced_memory;                 // 17
  size_t* malloced_peak_memory;            // 18
  size_t* objects_per_type;                // 19
  size_t* size_per_type;                   // 20
  int* os_error;                           // 21
  char* last_few_messages;                 // 22
  char* js_stacktrace;                     // 23
  intptr_t* end_marker;                    // 24
2443 2444 2445
};


2446 2447
class AlwaysAllocateScope {
 public:
2448
  explicit inline AlwaysAllocateScope(Isolate* isolate);
2449
  inline ~AlwaysAllocateScope();
2450 2451

 private:
2452
  Heap* heap_;
2453 2454
};

2455

2456 2457 2458 2459 2460
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
2461
class VerifyPointersVisitor : public ObjectVisitor {
2462
 public:
2463
  inline void VisitPointers(Object** start, Object** end) override;
2464 2465 2466
};


2467
// Verify that all objects are Smis.
2468
class VerifySmisVisitor : public ObjectVisitor {
2469
 public:
2470
  inline void VisitPointers(Object** start, Object** end) override;
2471 2472 2473
};


2474 2475
// Space iterator for iterating over all spaces of the heap.  Returns each space
// in turn, and null when it is done.
2476 2477
class AllSpaces BASE_EMBEDDED {
 public:
2478
  explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2479
  Space* next();
2480

2481
 private:
2482
  Heap* heap_;
2483 2484 2485 2486
  int counter_;
};


2487 2488
// Space iterator for iterating over all old spaces of the heap: Old space
// and code space.  Returns each space in turn, and null when it is done.
2489
class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
2490
 public:
2491
  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
2492
  OldSpace* next();
2493

2494
 private:
2495
  Heap* heap_;
2496 2497 2498 2499
  int counter_;
};


2500
// Space iterator for iterating over all the paged spaces of the heap: Map
2501
// space, old space, code space and cell space.  Returns
2502
// each space in turn, and null when it is done.
2503 2504
class PagedSpaces BASE_EMBEDDED {
 public:
2505
  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
2506
  PagedSpace* next();
2507

2508
 private:
2509
  Heap* heap_;
2510 2511 2512 2513
  int counter_;
};


2514 2515
class SpaceIterator : public Malloced {
 public:
2516
  explicit SpaceIterator(Heap* heap);
2517 2518 2519
  virtual ~SpaceIterator();

  bool has_next();
2520
  Space* next();
2521 2522

 private:
2523
  Heap* heap_;
2524
  int current_space_;         // from enum AllocationSpace.
2525 2526 2527
};


2528 2529 2530 2531
// A HeapIterator provides iteration over the whole heap. It
// aggregates the specific iterators for the different spaces as
// these can only iterate over one space only.
//
2532 2533 2534
// HeapIterator ensures there is no allocation during its lifetime
// (using an embedded DisallowHeapAllocation instance).
//
2535 2536 2537 2538 2539
// HeapIterator can skip free list nodes (that is, de-allocated heap
// objects that still remain in the heap). As implementation of free
// nodes filtering uses GC marks, it can't be used during MS/MC GC
// phases. Also, it is forbidden to interrupt iteration in this mode,
// as this will leave heap objects marked (and thus, unusable).
2540 2541
class HeapIterator BASE_EMBEDDED {
 public:
2542 2543 2544 2545
  enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };

  explicit HeapIterator(Heap* heap,
                        HeapObjectsFiltering filtering = kNoFiltering);
2546
  ~HeapIterator();
2547 2548 2549 2550

  HeapObject* next();

 private:
2551 2552 2553
  HeapObject* NextObject();

  DisallowHeapAllocation no_heap_allocation_;
2554

2555
  Heap* heap_;
2556 2557
  HeapObjectsFiltering filtering_;
  HeapObjectsFilter* filter_;
2558 2559 2560
  // Space iterator for iterating all the spaces.
  SpaceIterator* space_iterator_;
  // Object iterator for the space currently being iterated.
2561
  std::unique_ptr<ObjectIterator> object_iterator_;
2562 2563
};

2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
 public:
  virtual ~WeakObjectRetainer() {}

  // Return whether this object should be retained. If NULL is returned the
  // object has no references. Otherwise the address of the retained object
  // should be returned as in some GC situations the object has been moved.
  virtual Object* RetainAs(Object* object) = 0;
};

2575
// -----------------------------------------------------------------------------
2576 2577
// Allows observation of allocations.
class AllocationObserver {
2578
 public:
2579
  explicit AllocationObserver(intptr_t step_size)
2580 2581 2582
      : step_size_(step_size), bytes_to_next_step_(step_size) {
    DCHECK(step_size >= kPointerSize);
  }
2583
  virtual ~AllocationObserver() {}
2584

2585
  // Called each time the observed space does an allocation step. This may be
2586 2587
  // more frequently than the step_size we are monitoring (e.g. when there are
  // multiple observers, or when page or space boundary is encountered.)
2588
  void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
    bytes_to_next_step_ -= bytes_allocated;
    if (bytes_to_next_step_ <= 0) {
      Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
           size);
      step_size_ = GetNextStepSize();
      bytes_to_next_step_ = step_size_;
    }
  }

 protected:
  intptr_t step_size() const { return step_size_; }
  intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }

  // Pure virtual method provided by the subclasses that gets called when at
  // least step_size bytes have been allocated. soon_object is the address just
  // allocated (but not yet initialized.) size is the size of the object as
  // requested (i.e. w/o the alignment fillers). Some complexities to be aware
  // of:
  // 1) soon_object will be nullptr in cases where we end up observing an
  //    allocation that happens to be a filler space (e.g. page boundaries.)
  // 2) size is the requested size at the time of allocation. Right-trimming
  //    may change the object size dynamically.
  // 3) soon_object may actually be the first object in an allocation-folding
  //    group. In such a case size is the size of the group rather than the
  //    first object.
  virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;

  // Subclasses can override this method to make step size dynamic.
  virtual intptr_t GetNextStepSize() { return step_size_; }

  intptr_t step_size_;
  intptr_t bytes_to_next_step_;

 private:
2623
  friend class LargeObjectSpace;
2624
  friend class NewSpace;
2625 2626
  friend class PagedSpace;
  DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2627 2628
};

2629 2630
}  // namespace internal
}  // namespace v8
2631

2632
#endif  // V8_HEAP_HEAP_H_