heap.h 106 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_HEAP_H_
#define V8_HEAP_HEAP_H_
7

8
#include <cmath>
9
#include <map>
10

11 12
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
13
#include "include/v8.h"
14 15
#include "src/allocation.h"
#include "src/assert-scope.h"
lpy's avatar
lpy committed
16
#include "src/base/atomic-utils.h"
17
#include "src/debug/debug-interface.h"
18
#include "src/globals.h"
19
#include "src/heap-symbols.h"
20
#include "src/list.h"
21
#include "src/objects.h"
22

23 24
namespace v8 {
namespace internal {
25

26 27
using v8::MemoryPressureLevel;

28
// Defines all the roots in Heap.
29
#define STRONG_ROOT_LIST(V)                                                    \
30 31 32
  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
  /* The first 32 entries are most often used in the startup snapshot and   */ \
  /* can use a shorter representation in the serialization format.          */ \
33
  V(Map, free_space_map, FreeSpaceMap)                                         \
34 35
  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
36
  V(Oddball, uninitialized_value, UninitializedValue)                          \
37 38 39 40 41
  V(Oddball, undefined_value, UndefinedValue)                                  \
  V(Oddball, the_hole_value, TheHoleValue)                                     \
  V(Oddball, null_value, NullValue)                                            \
  V(Oddball, true_value, TrueValue)                                            \
  V(Oddball, false_value, FalseValue)                                          \
42
  V(String, empty_string, empty_string)                                        \
43
  V(Map, meta_map, MetaMap)                                                    \
44
  V(Map, byte_array_map, ByteArrayMap)                                         \
45
  V(Map, fixed_array_map, FixedArrayMap)                                       \
46
  V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
47 48
  V(Map, hash_table_map, HashTableMap)                                         \
  V(Map, symbol_map, SymbolMap)                                                \
49 50
  V(Map, one_byte_string_map, OneByteStringMap)                                \
  V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
51 52 53
  V(Map, scope_info_map, ScopeInfoMap)                                         \
  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
  V(Map, code_map, CodeMap)                                                    \
54
  V(Map, function_context_map, FunctionContextMap)                             \
55 56 57 58 59 60 61
  V(Map, cell_map, CellMap)                                                    \
  V(Map, weak_cell_map, WeakCellMap)                                           \
  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
  V(Map, foreign_map, ForeignMap)                                              \
  V(Map, heap_number_map, HeapNumberMap)                                       \
  V(Map, transition_array_map, TransitionArrayMap)                             \
  V(FixedArray, empty_literals_array, EmptyLiteralsArray)                      \
62
  V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector)           \
63 64
  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
65
  /* Entries beyond the first 32                                            */ \
66 67 68
  /* The roots above this line should be boring from a GC point of view.    */ \
  /* This means they are never in new space and never on a page that is     */ \
  /* being compacted.                                                       */ \
69 70
  /* Empty scope info */                                                       \
  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
71
  /* Oddballs */                                                               \
72 73 74 75
  V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
  V(Oddball, arguments_marker, ArgumentsMarker)                                \
  V(Oddball, exception, Exception)                                             \
  V(Oddball, termination_exception, TerminationException)                      \
76
  V(Oddball, optimized_out, OptimizedOut)                                      \
77
  V(Oddball, stale_register, StaleRegister)                                    \
78 79 80
  /* Context maps */                                                           \
  V(Map, native_context_map, NativeContextMap)                                 \
  V(Map, module_context_map, ModuleContextMap)                                 \
81
  V(Map, eval_context_map, EvalContextMap)                                     \
82 83 84 85 86 87 88 89 90
  V(Map, script_context_map, ScriptContextMap)                                 \
  V(Map, block_context_map, BlockContextMap)                                   \
  V(Map, catch_context_map, CatchContextMap)                                   \
  V(Map, with_context_map, WithContextMap)                                     \
  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
  V(Map, script_context_table_map, ScriptContextTableMap)                      \
  /* Maps */                                                                   \
  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
91
  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
92
  V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap)          \
93 94 95 96
  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
  V(Map, message_object_map, JSMessageObjectMap)                               \
  V(Map, external_map, ExternalMap)                                            \
  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
97
  V(Map, module_info_map, ModuleInfoMap)                                       \
98
  V(Map, type_feedback_vector_map, TypeFeedbackVectorMap)                      \
99 100
  /* String maps */                                                            \
  V(Map, native_source_string_map, NativeSourceStringMap)                      \
101
  V(Map, string_map, StringMap)                                                \
102
  V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
103
  V(Map, cons_string_map, ConsStringMap)                                       \
104
  V(Map, sliced_string_map, SlicedStringMap)                                   \
105
  V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
106
  V(Map, external_string_map, ExternalStringMap)                               \
107
  V(Map, external_string_with_one_byte_data_map,                               \
108
    ExternalStringWithOneByteDataMap)                                          \
109
  V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
110
  V(Map, short_external_string_map, ShortExternalStringMap)                    \
111
  V(Map, short_external_string_with_one_byte_data_map,                         \
112
    ShortExternalStringWithOneByteDataMap)                                     \
113
  V(Map, internalized_string_map, InternalizedStringMap)                       \
114 115
  V(Map, external_internalized_string_map, ExternalInternalizedStringMap)      \
  V(Map, external_internalized_string_with_one_byte_data_map,                  \
116
    ExternalInternalizedStringWithOneByteDataMap)                              \
117 118
  V(Map, external_one_byte_internalized_string_map,                            \
    ExternalOneByteInternalizedStringMap)                                      \
119
  V(Map, short_external_internalized_string_map,                               \
120
    ShortExternalInternalizedStringMap)                                        \
121
  V(Map, short_external_internalized_string_with_one_byte_data_map,            \
122
    ShortExternalInternalizedStringWithOneByteDataMap)                         \
123 124 125
  V(Map, short_external_one_byte_internalized_string_map,                      \
    ShortExternalOneByteInternalizedStringMap)                                 \
  V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
126
  /* Array element maps */                                                     \
127 128 129 130 131 132 133 134 135
  V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
  V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
  V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
  V(Map, fixed_int16_array_map, FixedInt16ArrayMap)                            \
  V(Map, fixed_uint32_array_map, FixedUint32ArrayMap)                          \
  V(Map, fixed_int32_array_map, FixedInt32ArrayMap)                            \
  V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
  V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
  V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
136 137 138 139 140 141 142 143 144 145 146 147
  V(Map, float32x4_map, Float32x4Map)                                          \
  V(Map, int32x4_map, Int32x4Map)                                              \
  V(Map, uint32x4_map, Uint32x4Map)                                            \
  V(Map, bool32x4_map, Bool32x4Map)                                            \
  V(Map, int16x8_map, Int16x8Map)                                              \
  V(Map, uint16x8_map, Uint16x8Map)                                            \
  V(Map, bool16x8_map, Bool16x8Map)                                            \
  V(Map, int8x16_map, Int8x16Map)                                              \
  V(Map, uint8x16_map, Uint8x16Map)                                            \
  V(Map, bool8x16_map, Bool8x16Map)                                            \
  /* Canonical empty values */                                                 \
  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
148 149 150 151 152 153 154 155 156
  V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
  V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
  V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
  V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array)        \
  V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array)      \
  V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array)        \
  V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array)    \
  V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
  V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
157
    EmptyFixedUint8ClampedArray)                                               \
158 159 160 161 162 163 164 165 166 167 168 169 170
  V(Script, empty_script, EmptyScript)                                         \
  V(Cell, undefined_cell, UndefinedCell)                                       \
  V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
    EmptySlowElementDictionary)                                                \
  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
  /* Protectors */                                                             \
  V(PropertyCell, array_protector, ArrayProtector)                             \
  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
  V(Cell, species_protector, SpeciesProtector)                                 \
171
  V(PropertyCell, string_length_protector, StringLengthProtector)              \
172
  V(Cell, fast_array_iteration_protector, FastArrayIterationProtector)         \
173
  V(Cell, array_iterator_protector, ArrayIteratorProtector)                    \
174 175
  V(PropertyCell, array_buffer_neutering_protector,                            \
    ArrayBufferNeuteringProtector)                                             \
176
  /* Special numbers */                                                        \
177
  V(HeapNumber, nan_value, NanValue)                                           \
178
  V(HeapNumber, hole_nan_value, HoleNanValue)                                  \
179 180
  V(HeapNumber, infinity_value, InfinityValue)                                 \
  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
181
  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
182 183 184 185 186 187 188 189
  /* Caches */                                                                 \
  V(FixedArray, number_string_cache, NumberStringCache)                        \
  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
  V(FixedArray, string_split_cache, StringSplitCache)                          \
  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
190
  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
191 192
  V(FixedArray, experimental_natives_source_cache,                             \
    ExperimentalNativesSourceCache)                                            \
193
  V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache)           \
194 195
  V(FixedArray, experimental_extra_natives_source_cache,                       \
    ExperimentalExtraNativesSourceCache)                                       \
196
  /* Lists and dictionaries */                                                 \
197
  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
198 199 200
  V(NameDictionary, public_symbol_table, PublicSymbolTable)                    \
  V(NameDictionary, api_symbol_table, ApiSymbolTable)                          \
  V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable)           \
201
  V(Object, script_list, ScriptList)                                           \
202
  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
203
  V(FixedArray, materialized_objects, MaterializedObjects)                     \
204
  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
ulan's avatar
ulan committed
205
  V(FixedArray, detached_contexts, DetachedContexts)                           \
206
  V(ArrayList, retained_maps, RetainedMaps)                                    \
207
  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
208 209 210 211 212
  /* weak_new_space_object_to_code_list is an array of weak cells, where */    \
  /* slots with even indices refer to the weak object, and the subsequent */   \
  /* slots refer to the code with the reference to the weak object. */         \
  V(ArrayList, weak_new_space_object_to_code_list,                             \
    WeakNewSpaceObjectToCodeList)                                              \
213
  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
214
  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
215
  V(FixedArray, serialized_templates, SerializedTemplates)                     \
216
  V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes)     \
217
  /* Configured values */                                                      \
218
  V(TemplateList, message_listeners, MessageListeners)                         \
219
  V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo)               \
220 221 222 223 224 225 226 227 228 229 230 231 232
  V(Code, js_entry_code, JsEntryCode)                                          \
  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
  /* Oddball maps */                                                           \
  V(Map, undefined_map, UndefinedMap)                                          \
  V(Map, the_hole_map, TheHoleMap)                                             \
  V(Map, null_map, NullMap)                                                    \
  V(Map, boolean_map, BooleanMap)                                              \
  V(Map, uninitialized_map, UninitializedMap)                                  \
  V(Map, arguments_marker_map, ArgumentsMarkerMap)                             \
  V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap)   \
  V(Map, exception_map, ExceptionMap)                                          \
  V(Map, termination_exception_map, TerminationExceptionMap)                   \
  V(Map, optimized_out_map, OptimizedOutMap)                                   \
233 234 235 236
  V(Map, stale_register_map, StaleRegisterMap)                                 \
  /* per-Isolate map for JSPromiseCapability. */                               \
  /* TODO(caitp): Make this a Struct */                                        \
  V(Map, js_promise_capability_map, JSPromiseCapabilityMap)
237

238
// Entries in this list are limited to Smis and are not visited during GC.
239 240 241 242
#define SMI_ROOT_LIST(V)                                                       \
  V(Smi, stack_limit, StackLimit)                                              \
  V(Smi, real_stack_limit, RealStackLimit)                                     \
  V(Smi, last_script_id, LastScriptId)                                         \
243
  V(Smi, hash_seed, HashSeed)                                                  \
244 245 246 247 248 249 250
  /* To distinguish the function templates, so that we can find them in the */ \
  /* function cache of the native context. */                                  \
  V(Smi, next_template_serial_number, NextTemplateSerialNumber)                \
  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)           \
  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)                 \
251
  V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
252

253 254 255
#define ROOT_LIST(V)  \
  STRONG_ROOT_LIST(V) \
  SMI_ROOT_LIST(V)    \
256 257
  V(StringTable, string_table, StringTable)

258

259 260 261 262
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
  V(ByteArrayMap)                       \
263
  V(BytecodeArrayMap)                   \
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
  V(FreeSpaceMap)                       \
  V(OnePointerFillerMap)                \
  V(TwoPointerFillerMap)                \
  V(UndefinedValue)                     \
  V(TheHoleValue)                       \
  V(NullValue)                          \
  V(TrueValue)                          \
  V(FalseValue)                         \
  V(UninitializedValue)                 \
  V(CellMap)                            \
  V(GlobalPropertyCellMap)              \
  V(SharedFunctionInfoMap)              \
  V(MetaMap)                            \
  V(HeapNumberMap)                      \
  V(MutableHeapNumberMap)               \
  V(Float32x4Map)                       \
280
  V(Int32x4Map)                         \
281
  V(Uint32x4Map)                        \
282 283
  V(Bool32x4Map)                        \
  V(Int16x8Map)                         \
284
  V(Uint16x8Map)                        \
285 286
  V(Bool16x8Map)                        \
  V(Int8x16Map)                         \
287
  V(Uint8x16Map)                        \
288
  V(Bool8x16Map)                        \
289 290 291 292
  V(NativeContextMap)                   \
  V(FixedArrayMap)                      \
  V(CodeMap)                            \
  V(ScopeInfoMap)                       \
293
  V(ModuleInfoMap)                      \
294 295 296
  V(FixedCOWArrayMap)                   \
  V(FixedDoubleArrayMap)                \
  V(WeakCellMap)                        \
297
  V(TransitionArrayMap)                 \
298 299 300 301 302 303 304 305 306 307 308 309 310 311
  V(NoInterceptorResultSentinel)        \
  V(HashTableMap)                       \
  V(OrderedHashTableMap)                \
  V(EmptyFixedArray)                    \
  V(EmptyByteArray)                     \
  V(EmptyDescriptorArray)               \
  V(ArgumentsMarker)                    \
  V(SymbolMap)                          \
  V(SloppyArgumentsElementsMap)         \
  V(FunctionContextMap)                 \
  V(CatchContextMap)                    \
  V(WithContextMap)                     \
  V(BlockContextMap)                    \
  V(ModuleContextMap)                   \
312
  V(EvalContextMap)                     \
313 314 315 316 317 318 319 320 321
  V(ScriptContextMap)                   \
  V(UndefinedMap)                       \
  V(TheHoleMap)                         \
  V(NullMap)                            \
  V(BooleanMap)                         \
  V(UninitializedMap)                   \
  V(ArgumentsMarkerMap)                 \
  V(JSMessageObjectMap)                 \
  V(ForeignMap)                         \
322 323 324 325
  V(NanValue)                           \
  V(InfinityValue)                      \
  V(MinusZeroValue)                     \
  V(MinusInfinityValue)                 \
326
  V(EmptyWeakCell)                      \
327 328 329
  V(empty_string)                       \
  PRIVATE_SYMBOL_LIST(V)

330
// Forward declarations.
331
class AllocationObserver;
332
class ArrayBufferTracker;
333 334 335 336
class GCIdleTimeAction;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
337
class HeapObjectsFilter;
338
class HeapStats;
339
class HistogramTimer;
340
class Isolate;
341
class LocalEmbedderHeapTracer;
mlippautz's avatar
mlippautz committed
342
class MemoryAllocator;
343
class MemoryReducer;
344
class ObjectIterator;
345
class ObjectStats;
346
class Page;
mlippautz's avatar
mlippautz committed
347
class PagedSpace;
348
class Scavenger;
ulan's avatar
ulan committed
349
class ScavengeJob;
mlippautz's avatar
mlippautz committed
350
class Space;
351
class StoreBuffer;
352
class TracePossibleWrapperReporter;
353
class WeakObjectRetainer;
354

355 356 357 358 359 360
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);

enum ArrayStorageAllocationMode {
  DONT_INITIALIZE_ARRAY_ELEMENTS,
  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
};
361

362 363
enum class ClearRecordedSlots { kYes, kNo };

364
enum class GarbageCollectionReason {
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
  kUnknown = 0,
  kAllocationFailure = 1,
  kAllocationLimit = 2,
  kContextDisposal = 3,
  kCountersExtension = 4,
  kDebugger = 5,
  kDeserializer = 6,
  kExternalMemoryPressure = 7,
  kFinalizeMarkingViaStackGuard = 8,
  kFinalizeMarkingViaTask = 9,
  kFullHashtable = 10,
  kHeapProfiler = 11,
  kIdleTask = 12,
  kLastResort = 13,
  kLowMemoryNotification = 14,
  kMakeHeapIterable = 15,
  kMemoryPressure = 16,
  kMemoryReducer = 17,
  kRuntime = 18,
  kSamplingProfiler = 19,
  kSnapshotCreator = 20,
  kTesting = 21
  // If you add new items here, then update the incremental_marking_reason,
  // mark_compact_reason, and scavenge_reason counters in counters.h.
  // Also update src/tools/metrics/histograms/histograms.xml in chromium.
390 391
};

392 393 394 395 396
// A queue of objects promoted during scavenge. Each object is accompanied by
// its size to avoid dereferencing a map pointer for scanning. The last page in
// to-space is used for the promotion queue. On conflict during scavenge, the
// promotion queue is allocated externally and all entries are copied to the
// external queue.
397 398
class PromotionQueue {
 public:
399
  explicit PromotionQueue(Heap* heap)
400 401 402 403
      : front_(nullptr),
        rear_(nullptr),
        limit_(nullptr),
        emergency_stack_(nullptr),
404
        heap_(heap) {}
405 406

  void Initialize();
407
  void Destroy();
408

409 410
  inline void SetNewLimit(Address limit);
  inline bool IsBelowPromotionQueue(Address to_space_top);
411

412 413 414
  inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
  inline void remove(HeapObject** target, int32_t* size,
                     bool* was_marked_black);
415

416 417
  bool is_empty() {
    return (front_ == rear_) &&
418
           (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
419 420 421
  }

 private:
422
  struct Entry {
hpayer's avatar
hpayer committed
423 424
    Entry(HeapObject* obj, int32_t size, bool was_marked_black)
        : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
425 426

    HeapObject* obj_;
hpayer's avatar
hpayer committed
427 428
    int32_t size_ : 31;
    bool was_marked_black_ : 1;
429
  };
430

431 432
  inline Page* GetHeadPage();

hpayer's avatar
hpayer committed
433 434
  void RelocateQueueHead();

435 436 437 438 439
  // The front of the queue is higher in the memory page chain than the rear.
  struct Entry* front_;
  struct Entry* rear_;
  struct Entry* limit_;

440 441 442
  List<Entry>* emergency_stack_;
  Heap* heap_;

443 444 445
  DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};

446 447
class AllocationResult {
 public:
448 449 450 451
  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
    return AllocationResult(space);
  }

452 453 454 455 456 457 458 459 460 461 462
  // Implicit constructor from Object*.
  AllocationResult(Object* object)  // NOLINT
      : object_(object) {
    // AllocationResults can't return Smis, which are used to represent
    // failure and the space to retry in.
    CHECK(!object->IsSmi());
  }

  AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}

  inline bool IsRetry() { return object_->IsSmi(); }
463 464
  inline HeapObject* ToObjectChecked();
  inline AllocationSpace RetrySpace();
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

  template <typename T>
  bool To(T** obj) {
    if (IsRetry()) return false;
    *obj = T::cast(object_);
    return true;
  }

 private:
  explicit AllocationResult(AllocationSpace space)
      : object_(Smi::FromInt(static_cast<int>(space))) {}

  Object* object_;
};

STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);

#ifdef DEBUG
struct CommentStatistic {
  const char* comment;
  int size;
  int count;
  void Clear() {
    comment = NULL;
    size = 0;
    count = 0;
  }
  // Must be small, since an iteration is used for lookup.
  static const int kMaxComments = 64;
};
#endif

class NumberAndSizeInfo BASE_EMBEDDED {
 public:
  NumberAndSizeInfo() : number_(0), bytes_(0) {}

  int number() const { return number_; }
  void increment_number(int num) { number_ += num; }

  int bytes() const { return bytes_; }
  void increment_bytes(int size) { bytes_ += size; }

  void clear() {
    number_ = 0;
    bytes_ = 0;
  }

 private:
  int number_;
  int bytes_;
};

// HistogramInfo class for recording a single "bar" of a histogram.  This
// class is used for collecting statistics to print to the log file.
class HistogramInfo : public NumberAndSizeInfo {
 public:
  HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}

  const char* name() { return name_; }
  void set_name(const char* name) { name_ = name; }

 private:
  const char* name_;
};

530
class Heap {
531
 public:
532 533 534 535 536
  // Declare all the root indices.  This defines the root list order.
  enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
537

538 539 540
#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
        INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
#undef STRING_DECLARATION
541

542 543 544
#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
            PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_INDEX_DECLARATION
545

546
#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
547
                PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
548
                    WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
549
#undef SYMBOL_INDEX_DECLARATION
550

551 552
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
553
                        STRUCT_LIST(DECLARE_STRUCT_MAP)
554
#undef DECLARE_STRUCT_MAP
555
                            kStringTableRootIndex,
556

557 558 559 560 561 562 563
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
        kRootListLength,
    kStrongRootListLength = kStringTableRootIndex,
    kSmiRootsStart = kStringTableRootIndex + 1
  };
564

565 566 567 568 569
  enum FindMementoMode { kForRuntime, kForGC };

  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };

  enum UpdateAllocationSiteMode { kGlobal, kCached };
570

571 572 573 574 575 576 577
  // Taking this lock prevents the GC from entering a phase that relocates
  // object references.
  class RelocationLock {
   public:
    explicit RelocationLock(Heap* heap) : heap_(heap) {
      heap_->relocation_mutex_.Lock();
    }
578

579
    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
580

581 582 583
   private:
    Heap* heap_;
  };
584

585 586 587 588 589 590 591 592
  // Support for partial snapshots.  After calling this we have a linear
  // space to write objects in each space.
  struct Chunk {
    uint32_t size;
    Address start;
    Address end;
  };
  typedef List<Chunk> Reservation;
593

594 595
  static const int kInitalOldGenerationLimitFactor = 2;

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
#if V8_OS_ANDROID
  // Don't apply pointer multiplier on Android since it has no swap space and
  // should instead adapt it's heap size based on available physical memory.
  static const int kPointerMultiplier = 1;
#else
  static const int kPointerMultiplier = i::kPointerSize / 4;
#endif

  // The new space size has to be a power of 2. Sizes are in MB.
  static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
  static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
  static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
  static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;

  // The old space size has to be a multiple of Page::kPageSize.
  // Sizes are in MB.
  static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
  static const int kMaxOldSpaceSizeMediumMemoryDevice =
      256 * kPointerMultiplier;
  static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
616
  static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
617 618 619 620 621 622 623 624 625 626 627 628 629 630

  // The executable size has to be a multiple of Page::kPageSize.
  // Sizes are in MB.
  static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
  static const int kMaxExecutableSizeMediumMemoryDevice =
      192 * kPointerMultiplier;
  static const int kMaxExecutableSizeHighMemoryDevice =
      256 * kPointerMultiplier;
  static const int kMaxExecutableSizeHugeMemoryDevice =
      256 * kPointerMultiplier;

  static const int kTraceRingBufferSize = 512;
  static const int kStacktraceBufferSize = 512;

631 632
  V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
  V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
633 634
  static const double kMaxHeapGrowingFactorMemoryConstrained;
  static const double kMaxHeapGrowingFactorIdle;
635
  static const double kConservativeHeapGrowingFactor;
636 637
  static const double kTargetMutatorUtilization;

638 639 640 641 642
  static const int kNoGCFlags = 0;
  static const int kReduceMemoryFootprintMask = 1;
  static const int kAbortIncrementalMarkingMask = 2;
  static const int kFinalizeIncrementalMarkingMask = 4;

643 644 645
  // Making the heap iterable requires us to abort incremental marking.
  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;

646 647 648
  // The roots that have an index less than this are always in old space.
  static const int kOldSpaceRoots = 0x20;

649 650 651
  // The minimum size of a HeapObject on the heap.
  static const int kMinObjectSizeInWords = 2;

652 653
  STATIC_ASSERT(kUndefinedValueRootIndex ==
                Internals::kUndefinedValueRootIndex);
654
  STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
  STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
  STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
  STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
  STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);

  // Calculates the maximum amount of filler that could be required by the
  // given alignment.
  static int GetMaximumFillToAlign(AllocationAlignment alignment);
  // Calculates the actual amount of filler required for a given address at the
  // given alignment.
  static int GetFillToAlign(Address address, AllocationAlignment alignment);

  template <typename T>
  static inline bool IsOneByte(T t, int chars);

  static void FatalProcessOutOfMemory(const char* location,
671
                                      bool is_heap_oom = false);
672 673 674 675 676 677 678 679 680 681

  static bool RootIsImmortalImmovable(int root_index);

  // Checks whether the space is valid.
  static bool IsValidAllocationSpace(AllocationSpace space);

  // Generated code can embed direct references to non-writable roots if
  // they are in new space.
  static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);

682 683
  static bool IsUnmodifiedHeapObject(Object** p);

684 685 686 687 688 689 690 691 692 693 694
  // Zapping is needed for verify heap, and always done in debug builds.
  static inline bool ShouldZapGarbage() {
#ifdef DEBUG
    return true;
#else
#ifdef VERIFY_HEAP
    return FLAG_verify_heap;
#else
    return false;
#endif
#endif
695 696
  }

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
  static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
    return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
  }

  static inline GarbageCollector YoungGenerationCollector() {
    return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
  }

  static inline const char* CollectorName(GarbageCollector collector) {
    switch (collector) {
      case SCAVENGER:
        return "Scavenger";
      case MARK_COMPACTOR:
        return "Mark-Compact";
      case MINOR_MARK_COMPACTOR:
        return "Minor Mark-Compact";
    }
    return "Unknown collector";
  }

717 718
  V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
                                                    double mutator_speed);
719 720 721 722 723

  // Copy block of memory from src to dst. Size of block should be aligned
  // by pointer size.
  static inline void CopyBlock(Address dst, Address src, int byte_size);

724 725 726 727
  // Determines a static visitor id based on the given {map} that can then be
  // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
  static int GetStaticVisitorIdForMap(Map* map);

728 729 730 731
  // Notifies the heap that is ok to start marking or other activities that
  // should not happen during deserialization.
  void NotifyDeserializationComplete();

mlippautz's avatar
mlippautz committed
732 733 734 735
  inline Address* NewSpaceAllocationTopAddress();
  inline Address* NewSpaceAllocationLimitAddress();
  inline Address* OldSpaceAllocationTopAddress();
  inline Address* OldSpaceAllocationLimitAddress();
736

737
  // Clear the Instanceof cache (used when a prototype changes).
738
  inline void ClearInstanceofCache();
739

740 741
  // FreeSpace objects have a null map after deserialization. Update the map.
  void RepairFreeListsAfterDeserialization();
742

743 744 745 746
  // Move len elements within a given array from src_index index to dst_index
  // index.
  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);

747
  // Initialize a filler object to keep the ability to iterate over the heap
748 749
  // when introducing gaps within pages. If slots could have been recorded in
  // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
750
  // pass ClearRecordedSlots::kNo.
751 752
  HeapObject* CreateFillerObjectAt(Address addr, int size,
                                   ClearRecordedSlots mode);
753

754 755
  bool CanMoveObjectStart(HeapObject* object);

756 757
  static bool IsImmovable(HeapObject* object);

758
  // Maintain consistency of live bytes during incremental marking.
759
  void AdjustLiveBytes(HeapObject* object, int by);
760

761 762 763 764
  // Trim the given array from the left. Note that this relocates the object
  // start and hence is only valid if there is only a single reference to it.
  FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);

765 766 767
  // Trim the given array from the right.
  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);

768
  // Converts the given boolean condition to JavaScript boolean value.
769
  inline Oddball* ToBoolean(bool condition);
770

771 772 773
  // Check whether the heap is currently iterable.
  bool IsHeapIterable();

774
  // Notify the heap that a context has been disposed.
775
  int NotifyContextDisposed(bool dependant_context);
776

777 778
  void set_native_contexts_list(Object* object) {
    native_contexts_list_ = object;
779
  }
780
  Object* native_contexts_list() const { return native_contexts_list_; }
781

782 783 784 785
  void set_allocation_sites_list(Object* object) {
    allocation_sites_list_ = object;
  }
  Object* allocation_sites_list() { return allocation_sites_list_; }
786 787

  // Used in CreateAllocationSiteStub and the (de)serializer.
788
  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
789

790 791 792 793 794 795
  void set_encountered_weak_collections(Object* weak_collection) {
    encountered_weak_collections_ = weak_collection;
  }
  Object* encountered_weak_collections() const {
    return encountered_weak_collections_;
  }
796 797 798
  void VisitEncounteredWeakCollections(ObjectVisitor* visitor) {
    visitor->VisitPointer(&encountered_weak_collections_);
  }
799

ulan@chromium.org's avatar
ulan@chromium.org committed
800 801 802 803 804
  void set_encountered_weak_cells(Object* weak_cell) {
    encountered_weak_cells_ = weak_cell;
  }
  Object* encountered_weak_cells() const { return encountered_weak_cells_; }

805 806 807 808 809 810 811
  void set_encountered_transition_arrays(Object* transition_array) {
    encountered_transition_arrays_ = transition_array;
  }
  Object* encountered_transition_arrays() const {
    return encountered_transition_arrays_;
  }

812
  // Number of mark-sweeps.
813
  int ms_count() const { return ms_count_; }
814

815 816 817 818
  // Checks whether the given object is allowed to be migrated from it's
  // current space into the given destination space. Used for debugging.
  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);

819
  void CheckHandleCount();
820

821 822 823
  // Number of "runtime allocations" done so far.
  uint32_t allocations_count() { return allocations_count_; }

824
  // Print short heap statistics.
825
  void PrintShortHeapStatistics();
826

827
  inline HeapState gc_state() { return gc_state_; }
828
  void SetGCState(HeapState state);
829

830
  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
831

832 833
  // If an object has an AllocationMemento trailing it, return it, otherwise
  // return NULL;
834
  template <FindMementoMode mode>
835 836
  inline AllocationMemento* FindAllocationMemento(HeapObject* object);

837
  // Returns false if not able to reserve.
838
  bool ReserveSpace(Reservation* reservations, List<Address>* maps);
839

840 841 842 843
  //
  // Support for the API.
  //

844
  bool CreateApiObjects();
845

846
  // Implements the corresponding V8 API function.
847
  bool IdleNotification(double deadline_in_seconds);
848
  bool IdleNotification(int idle_time_in_ms);
849

850 851 852 853
  void MemoryPressureNotification(MemoryPressureLevel level,
                                  bool is_isolate_locked);
  void CheckMemoryPressure();

854 855 856
  void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
                              void* data);

857 858
  double MonotonicallyIncreasingTimeInMs();

859
  void RecordStats(HeapStats* stats, bool take_snapshot = false);
860

861
  // Check new space expansion criteria and expand semispaces if it was hit.
862
  void CheckNewSpaceExpansionCriteria();
863

864 865
  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);

866 867
  // An object should be promoted if the object has survived a
  // scavenge operation.
868 869 870 871
  inline bool ShouldBePromoted(Address old_address, int object_size);

  void ClearNormalizedMapCaches();

872 873
  void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);

874 875 876 877
  // Completely clear the Instanceof cache (to stop it keeping objects alive
  // around a GC).
  inline void CompletelyClearInstanceofCache();

878
  inline uint32_t HashSeed();
879

880
  inline int NextScriptId();
881

882 883 884 885
  inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
  inline void SetConstructStubDeoptPCOffset(int pc_offset);
  inline void SetGetterStubDeoptPCOffset(int pc_offset);
  inline void SetSetterStubDeoptPCOffset(int pc_offset);
886
  inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
887
  inline int GetNextTemplateSerialNumber();
888

889
  inline void SetSerializedTemplates(FixedArray* templates);
890
  inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
891

892 893 894
  // For post mortem debugging.
  void RememberUnmappedPage(Address page, bool compacted);

895 896
  // Global inline caching age: it is incremented on some GCs after context
  // disposal. We use it to flush inline caches.
897
  int global_ic_age() { return global_ic_age_; }
898 899

  void AgeInlineCaches() {
900
    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
901 902
  }

903 904
  int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }

905 906
  int64_t external_memory() { return external_memory_; }
  void update_external_memory(int64_t delta) { external_memory_ += delta; }
907

908 909 910
  void update_external_memory_concurrently_freed(intptr_t freed) {
    external_memory_concurrently_freed_.Increment(freed);
  }
911

912 913 914 915
  void account_external_memory_concurrently_freed() {
    external_memory_ -= external_memory_concurrently_freed_.Value();
    external_memory_concurrently_freed_.SetValue(0);
  }
916

917 918
  void DeoptMarkedAllocationSites();

919
  inline bool DeoptMaybeTenuredAllocationSites();
920

921 922 923
  void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
                                             Handle<WeakCell> code);

ulan's avatar
ulan committed
924
  void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
925
                                     Handle<DependentCode> dep);
926

ulan's avatar
ulan committed
927
  DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
928

929 930
  void CompactWeakFixedArrays();

931 932
  void AddRetainedMap(Handle<Map> map);

933 934 935 936 937 938 939
  // This event is triggered after successful allocation of a new object made
  // by runtime. Allocations of target space for object evacuation do not
  // trigger the event. In order to track ALL allocations one must turn off
  // FLAG_inline_new and FLAG_use_allocation_folding.
  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);

  // This event is triggered after object is moved to a new place.
940
  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
941 942
                          int size_in_bytes);

943 944
  bool deserialization_complete() const { return deserialization_complete_; }

945 946
  bool HasLowAllocationRate();
  bool HasHighFragmentation();
947
  bool HasHighFragmentation(size_t used, size_t committed);
948

949 950 951 952 953 954 955 956 957 958
  void ActivateMemoryReducerIfNeeded();

  bool ShouldOptimizeForMemoryUsage();

  bool IsLowMemoryDevice() {
    return max_old_generation_size_ <= kMaxOldSpaceSizeLowMemoryDevice;
  }

  bool IsMemoryConstrainedDevice() {
    return max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice;
959
  }
960

961 962 963
  bool HighMemoryPressure() {
    return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
  }
964

965
  size_t HeapLimitForDebugging() {
966 967
    const size_t kDebugHeapSizeFactor = 4;
    size_t max_limit = std::numeric_limits<size_t>::max() / 4;
968 969 970 971 972
    return Min(max_limit,
               initial_max_old_generation_size_ * kDebugHeapSizeFactor);
  }

  void IncreaseHeapLimitForDebugging() {
973
    max_old_generation_size_ =
974
        Max(max_old_generation_size_, HeapLimitForDebugging());
975 976 977 978 979 980 981 982 983 984
  }

  void RestoreOriginalHeapLimit() {
    // Do not set the limit lower than the live size + some slack.
    size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
    max_old_generation_size_ =
        Min(max_old_generation_size_,
            Max(initial_max_old_generation_size_, min_limit));
  }

985 986 987 988
  bool IsHeapLimitIncreasedForDebugging() {
    return max_old_generation_size_ == HeapLimitForDebugging();
  }

989 990 991 992 993 994
  // ===========================================================================
  // Initialization. ===========================================================
  // ===========================================================================

  // Configure heap size in MB before setup. Return false if the heap has been
  // set up already.
995 996
  bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
                     size_t max_executable_size, size_t code_range_size);
997 998 999 1000 1001 1002 1003 1004 1005 1006
  bool ConfigureHeapDefault();

  // Prepares the heap, setting up memory areas that are needed in the isolate
  // without actually creating any objects.
  bool SetUp();

  // Bootstraps the object heap with the core set of objects required to run.
  // Returns whether it succeeded.
  bool CreateHeapObjects();

1007 1008 1009
  // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
  V8_INLINE void CreateObjectStats();

1010 1011 1012
  // Destroys all memory allocated by the heap.
  void TearDown();

1013 1014 1015
  // Returns whether SetUp has been called.
  bool HasBeenSetUp();

1016 1017 1018 1019
  // ===========================================================================
  // Getters for spaces. =======================================================
  // ===========================================================================

1020
  inline Address NewSpaceTop();
1021

1022
  NewSpace* new_space() { return new_space_; }
1023 1024 1025 1026 1027
  OldSpace* old_space() { return old_space_; }
  OldSpace* code_space() { return code_space_; }
  MapSpace* map_space() { return map_space_; }
  LargeObjectSpace* lo_space() { return lo_space_; }

mlippautz's avatar
mlippautz committed
1028 1029
  inline PagedSpace* paged_space(int idx);
  inline Space* space(int idx);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

  // Returns name of the space.
  const char* GetSpaceName(int idx);

  // ===========================================================================
  // Getters to other components. ==============================================
  // ===========================================================================

  GCTracer* tracer() { return tracer_; }

1040 1041
  MemoryAllocator* memory_allocator() { return memory_allocator_; }

1042 1043 1044 1045 1046
  PromotionQueue* promotion_queue() { return &promotion_queue_; }

  inline Isolate* isolate();

  MarkCompactCollector* mark_compact_collector() {
1047
    return mark_compact_collector_;
1048 1049
  }

1050 1051 1052 1053
  // ===========================================================================
  // Root set access. ==========================================================
  // ===========================================================================

1054 1055
  // Heap root getters.
#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
  ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR

  // Utility type maps.
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR

#define STRING_ACCESSOR(name, str) inline String* name();
  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR

#define SYMBOL_ACCESSOR(name) inline Symbol* name();
  PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR

1072
#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
1073
  PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
1074
  WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
1075 1076 1077
#undef SYMBOL_ACCESSOR

  Object* root(RootListIndex index) { return roots_[index]; }
1078 1079 1080
  Handle<Object> root_handle(RootListIndex index) {
    return Handle<Object>(&roots_[index]);
  }
1081 1082 1083 1084 1085 1086 1087 1088
  template <typename T>
  bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
    Object** const handle_location = bit_cast<Object**>(handle.address());
    if (handle_location >= &roots_[kRootListLength]) return false;
    if (handle_location < &roots_[0]) return false;
    *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
    return true;
  }
1089 1090 1091 1092 1093

  // Generated code can embed this address to get access to the roots.
  Object** roots_array_start() { return roots_; }

  // Sets the stub_cache_ (only used when expanding the dictionary).
1094
  void SetRootCodeStubs(UnseededNumberDictionary* value) {
1095 1096 1097
    roots_[kCodeStubsRootIndex] = value;
  }

1098 1099
  void SetRootMaterializedObjects(FixedArray* objects) {
    roots_[kMaterializedObjectsRootIndex] = objects;
1100 1101
  }

1102 1103 1104 1105 1106 1107
  void SetRootScriptList(Object* value) {
    roots_[kScriptListRootIndex] = value;
  }

  void SetRootStringTable(StringTable* value) {
    roots_[kStringTableRootIndex] = value;
1108 1109
  }

1110 1111 1112 1113
  void SetRootNoScriptSharedFunctionInfos(Object* value) {
    roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
  }

1114 1115 1116 1117
  void SetMessageListeners(TemplateList* value) {
    roots_[kMessageListenersRootIndex] = value;
  }

1118 1119 1120 1121 1122
  // Set the stack limit in the roots_ array.  Some architectures generate
  // code that looks here, because it is faster than loading from the static
  // jslimit_/real_jslimit_ variable in the StackGuard.
  void SetStackLimits();

1123 1124 1125 1126
  // The stack limit is thread-dependent. To be able to reproduce the same
  // snapshot blob, we need to reset it before serializing.
  void ClearStackLimits();

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
  // Generated code can treat direct references to this root as constant.
  bool RootCanBeTreatedAsConstant(RootListIndex root_index);

  Map* MapForFixedTypedArray(ExternalArrayType array_type);
  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);

  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);

  void RegisterStrongRoots(Object** start, Object** end);
  void UnregisterStrongRoots(Object** start);

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
  // ===========================================================================
  // Inline allocation. ========================================================
  // ===========================================================================

  // Indicates whether inline bump-pointer allocation has been disabled.
  bool inline_allocation_disabled() { return inline_allocation_disabled_; }

  // Switch whether inline bump-pointer allocation should be used.
  void EnableInlineAllocation();
  void DisableInlineAllocation();

  // ===========================================================================
  // Methods triggering GCs. ===================================================
  // ===========================================================================

1154
  // Performs garbage collection operation.
1155 1156 1157
  // Returns whether there is a chance that another major GC could
  // collect more garbage.
  inline bool CollectGarbage(
1158
      AllocationSpace space, GarbageCollectionReason gc_reason,
1159
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1160

1161 1162 1163
  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
  // non-zero, then the slower precise sweeper is used, which leaves the heap
  // in a state where we can iterate over the heap visiting all objects.
1164
  void CollectAllGarbage(
1165
      int flags, GarbageCollectionReason gc_reason,
1166 1167 1168
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);

  // Last hope GC, should try to squeeze as much as possible.
1169
  void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
1170

1171 1172
  // Reports and external memory pressure event, either performs a major GC or
  // completes incremental marking in order to free external resources.
1173
  void ReportExternalMemoryPressure();
1174

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
  // Invoked when GC was requested via the stack guard.
  void HandleGCRequest();

  // ===========================================================================
  // Iterators. ================================================================
  // ===========================================================================

  // Iterates over all roots in the heap.
  void IterateRoots(ObjectVisitor* v, VisitMode mode);
  // Iterates over all strong roots in the heap.
  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
  // Iterates over entries in the smi roots list.  Only interesting to the
  // serializer/deserializer, since GC does not care about smis.
  void IterateSmiRoots(ObjectVisitor* v);
  // Iterates over all the other roots in the heap.
  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);

hpayer's avatar
hpayer committed
1192
  // Iterate pointers of promoted objects.
1193 1194
  void IterateAndScavengePromotedObject(HeapObject* target, int size,
                                        bool was_marked_black);
1195

1196 1197 1198 1199
  // ===========================================================================
  // Store buffer API. =========================================================
  // ===========================================================================

1200 1201
  // Write barrier support for object[offset] = o;
  inline void RecordWrite(Object* object, int offset, Object* o);
1202 1203
  inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
  void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
1204
  void RecordWritesIntoCode(Code* code);
1205 1206
  inline void RecordFixedArrayElements(FixedArray* array, int offset,
                                       int length);
1207

1208
  inline Address* store_buffer_top_address();
1209

1210
  void ClearRecordedSlot(HeapObject* object, Object** slot);
1211
  void ClearRecordedSlotRange(Address start, Address end);
1212

1213 1214
  bool HasRecordedSlot(HeapObject* object, Object** slot);

1215 1216 1217 1218 1219 1220
  // ===========================================================================
  // Incremental marking API. ==================================================
  // ===========================================================================

  // Start incremental marking and ensure that idle time handler can perform
  // incremental steps.
1221
  void StartIdleIncrementalMarking(GarbageCollectionReason gc_reason);
1222 1223 1224

  // Starts incremental marking assuming incremental marking is currently
  // stopped.
1225 1226 1227
  void StartIncrementalMarking(
      int gc_flags, GarbageCollectionReason gc_reason,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1228

1229 1230 1231
  void StartIncrementalMarkingIfAllocationLimitIsReached(
      int gc_flags,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1232

1233
  void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
1234

1235 1236
  bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,
                                         GarbageCollectionReason gc_reason);
1237

hpayer's avatar
hpayer committed
1238 1239
  void RegisterReservationsForBlackAllocation(Reservation* reservations);

1240
  IncrementalMarking* incremental_marking() { return incremental_marking_; }
1241

1242 1243 1244 1245
  // ===========================================================================
  // Embedder heap tracer support. =============================================
  // ===========================================================================

1246 1247 1248
  LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
    return local_embedder_heap_tracer_;
  }
1249 1250 1251 1252
  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
  void TracePossibleWrapper(JSObject* js_object);
  void RegisterExternallyReferencedObject(Object** object);

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
  // ===========================================================================
  // External string table API. ================================================
  // ===========================================================================

  // Registers an external string.
  inline void RegisterExternalString(String* string);

  // Finalizes an external string by deleting the associated external
  // data and clearing the resource pointer.
  inline void FinalizeExternalString(String* string);

1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
  // ===========================================================================
  // Methods checking/returning the space of a given object/address. ===========
  // ===========================================================================

  // Returns whether the object resides in new space.
  inline bool InNewSpace(Object* object);
  inline bool InFromSpace(Object* object);
  inline bool InToSpace(Object* object);

  // Returns whether the object resides in old space.
  inline bool InOldSpace(Object* object);

  // Checks whether an address/object in the heap (including auxiliary
  // area and unused area).
  bool Contains(HeapObject* value);

  // Checks whether an address/object in a space.
  // Currently used by tests, serialization and heap verification only.
  bool InSpace(HeapObject* value, AllocationSpace space);

1284 1285 1286 1287 1288 1289 1290
  // Slow methods that can be used for verification as they can also be used
  // with off-heap Addresses.
  bool ContainsSlow(Address addr);
  bool InSpaceSlow(Address addr, AllocationSpace space);
  inline bool InNewSpaceSlow(Address address);
  inline bool InOldSpaceSlow(Address address);

1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
  // ===========================================================================
  // Object statistics tracking. ===============================================
  // ===========================================================================

  // Returns the number of buckets used by object statistics tracking during a
  // major GC. Note that the following methods fail gracefully when the bounds
  // are exceeded though.
  size_t NumberOfTrackedHeapObjectTypes();

  // Returns object statistics about count and size at the last major GC.
  // Objects are being grouped into buckets that roughly resemble existing
  // instance types.
  size_t ObjectCountAtLastGC(size_t index);
  size_t ObjectSizeAtLastGC(size_t index);

  // Retrieves names of buckets used by object statistics tracking.
  bool GetObjectTypeName(size_t index, const char** object_type,
                         const char** object_sub_type);

1310 1311 1312 1313 1314 1315 1316
  // ===========================================================================
  // Code statistics. ==========================================================
  // ===========================================================================

  // Collect code (Code and BytecodeArray objects) statistics.
  void CollectCodeStatistics();

1317 1318 1319 1320
  // ===========================================================================
  // GC statistics. ============================================================
  // ===========================================================================

1321
  // Returns the maximum amount of memory reserved for the heap.
1322
  size_t MaxReserved() {
1323
    return 2 * max_semi_space_size_ + max_old_generation_size_;
1324
  }
1325 1326 1327 1328
  size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
  size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
  size_t MaxOldGenerationSize() { return max_old_generation_size_; }
  size_t MaxExecutableSize() { return max_executable_size_; }
1329 1330 1331

  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
  // more spaces are needed until it reaches the limit.
1332
  size_t Capacity();
1333

1334
  // Returns the capacity of the old generation.
1335
  size_t OldGenerationCapacity();
1336

1337
  // Returns the amount of memory currently committed for the heap.
1338
  size_t CommittedMemory();
1339

1340
  // Returns the amount of memory currently committed for the old space.
1341
  size_t CommittedOldGenerationMemory();
1342

1343
  // Returns the amount of executable memory currently committed for the heap.
1344
  size_t CommittedMemoryExecutable();
1345

1346 1347
  // Returns the amount of phyical memory currently committed for the heap.
  size_t CommittedPhysicalMemory();
1348

1349
  // Returns the maximum amount of memory ever committed for the heap.
1350
  size_t MaximumCommittedMemory() { return maximum_committed_; }
1351

1352 1353 1354
  // Updates the maximum committed memory for the heap. Should be called
  // whenever a space grows.
  void UpdateMaximumCommitted();
1355

1356 1357 1358
  // Returns the available bytes in space w/o growing.
  // Heap doesn't guarantee that it can allocate an object that requires
  // all available bytes. Check MaxHeapObjectSize() instead.
1359
  size_t Available();
1360

1361
  // Returns of size of all objects residing in the heap.
1362
  size_t SizeOfObjects();
1363

1364
  void UpdateSurvivalStatistics(int start_new_space_size);
1365

1366
  inline void IncrementPromotedObjectsSize(size_t object_size) {
1367
    promoted_objects_size_ += object_size;
1368
  }
1369
  inline size_t promoted_objects_size() { return promoted_objects_size_; }
1370

1371
  inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1372
    semi_space_copied_object_size_ += object_size;
1373
  }
1374
  inline size_t semi_space_copied_object_size() {
1375
    return semi_space_copied_object_size_;
1376
  }
1377

1378
  inline size_t SurvivedNewSpaceObjectSize() {
1379
    return promoted_objects_size_ + semi_space_copied_object_size_;
1380
  }
1381

1382
  inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1383

1384
  inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1385

1386
  inline void IncrementNodesPromoted() { nodes_promoted_++; }
1387

1388
  inline void IncrementYoungSurvivorsCounter(size_t survived) {
1389 1390
    survived_last_scavenge_ = survived;
    survived_since_last_expansion_ += survived;
1391
  }
1392

1393 1394
  inline uint64_t PromotedTotalSize() {
    return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1395 1396
  }

mlippautz's avatar
mlippautz committed
1397
  inline void UpdateNewSpaceAllocationCounter();
1398

mlippautz's avatar
mlippautz committed
1399
  inline size_t NewSpaceAllocationCounter();
1400

1401 1402 1403 1404
  // This should be used only for testing.
  void set_new_space_allocation_counter(size_t new_value) {
    new_space_allocation_counter_ = new_value;
  }
1405

1406
  void UpdateOldGenerationAllocationCounter() {
1407 1408
    old_generation_allocation_counter_at_last_gc_ =
        OldGenerationAllocationCounter();
1409
  }
1410

1411
  size_t OldGenerationAllocationCounter() {
1412 1413
    return old_generation_allocation_counter_at_last_gc_ +
           PromotedSinceLastGC();
1414
  }
1415

1416
  // This should be used only for testing.
1417 1418
  void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
    old_generation_allocation_counter_at_last_gc_ = new_value;
1419
  }
1420

1421 1422 1423
  size_t PromotedSinceLastGC() {
    return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
  }
1424

1425
  int gc_count() const { return gc_count_; }
1426

1427
  // Returns the size of objects residing in non new spaces.
1428
  size_t PromotedSpaceSizeOfObjects();
1429

1430 1431 1432 1433
  double total_regexp_code_generated() { return total_regexp_code_generated_; }
  void IncreaseTotalRegexpCodeGenerated(int size) {
    total_regexp_code_generated_ += size;
  }
1434

1435 1436 1437 1438 1439 1440 1441
  void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
    if (is_crankshafted) {
      crankshaft_codegen_bytes_generated_ += size;
    } else {
      full_codegen_bytes_generated_ += size;
    }
  }
1442

1443 1444 1445
  // ===========================================================================
  // Prologue/epilogue callback methods.========================================
  // ===========================================================================
1446

1447 1448 1449
  void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
                             GCType gc_type_filter, bool pass_isolate = true);
  void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
1450

1451 1452 1453
  void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
                             GCType gc_type_filter, bool pass_isolate = true);
  void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
1454

1455 1456
  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1457

1458 1459 1460
  // ===========================================================================
  // Allocation methods. =======================================================
  // ===========================================================================
1461

1462 1463 1464
  // Creates a filler object and returns a heap object immediately after it.
  MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
                                                int filler_size);
1465

1466 1467 1468 1469 1470 1471 1472
  // Creates a filler object if needed for alignment and returns a heap object
  // immediately after it. If any space is left after the returned object,
  // another filler object is created so the over allocated memory is iterable.
  MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
                                              int object_size,
                                              int allocation_size,
                                              AllocationAlignment alignment);
1473

1474
  // ===========================================================================
1475
  // ArrayBuffer tracking. =====================================================
1476
  // ===========================================================================
1477

1478 1479 1480 1481
  // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
  // in the registration/unregistration APIs. Consider dropping the "New" from
  // "RegisterNewArrayBuffer" because one can re-register a previously
  // unregistered buffer, too, and the name is confusing.
1482 1483 1484
  void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
  void UnregisterArrayBuffer(JSArrayBuffer* buffer);

1485 1486 1487 1488 1489 1490 1491 1492 1493
  // ===========================================================================
  // Allocation site tracking. =================================================
  // ===========================================================================

  // Updates the AllocationSite of a given {object}. If the global prenuring
  // storage is passed as {pretenuring_feedback} the memento found count on
  // the corresponding allocation site is immediately updated and an entry
  // in the hash map is created. Otherwise the entry (including a the count
  // value) is cached on the local pretenuring feedback.
1494
  template <UpdateAllocationSiteMode mode>
1495
  inline void UpdateAllocationSite(HeapObject* object,
lpy's avatar
lpy committed
1496
                                   base::HashMap* pretenuring_feedback);
1497 1498 1499 1500 1501 1502 1503 1504

  // Removes an entry from the global pretenuring storage.
  inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);

  // Merges local pretenuring feedback into the global one. Note that this
  // method needs to be called after evacuation, as allocation sites may be
  // evacuated and this method resolves forward pointers accordingly.
  void MergeAllocationSitePretenuringFeedback(
lpy's avatar
lpy committed
1505
      const base::HashMap& local_pretenuring_feedback);
1506

1507
// =============================================================================
1508

1509 1510 1511 1512
#ifdef VERIFY_HEAP
  // Verify the heap is in its normal state before or after a GC.
  void Verify();
#endif
1513

1514 1515
#ifdef DEBUG
  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1516

1517 1518 1519
  void TracePathToObjectFrom(Object* target, Object* root);
  void TracePathToObject(Object* target);
  void TracePathToGlobal();
1520

1521 1522
  void Print();
  void PrintHandles();
1523

1524 1525 1526 1527
  // Report heap statistics.
  void ReportHeapStatistics(const char* title);
  void ReportCodeStatistics(const char* title);
#endif
ulan@chromium.org's avatar
ulan@chromium.org committed
1528

1529 1530 1531
  static const char* GarbageCollectionReasonToString(
      GarbageCollectionReason gc_reason);

1532
 private:
1533
  class PretenuringScope;
1534

1535 1536 1537 1538 1539 1540 1541 1542
  // External strings table is a place where all external strings are
  // registered.  We need to keep track of such strings to properly
  // finalize them.
  class ExternalStringTable {
   public:
    // Registers an external string.
    inline void AddString(String* string);

1543 1544
    inline void IterateAll(ObjectVisitor* v);
    inline void IterateNewSpaceStrings(ObjectVisitor* v);
1545

1546 1547 1548 1549
    // Restores internal invariant and gets rid of collected strings. Must be
    // called after each Iterate*() that modified the strings.
    void CleanUpAll();
    void CleanUpNewSpaceStrings();
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575

    // Destroys all allocated memory.
    void TearDown();

   private:
    explicit ExternalStringTable(Heap* heap) : heap_(heap) {}

    inline void Verify();

    inline void AddOldString(String* string);

    // Notifies the table that only a prefix of the new list is valid.
    inline void ShrinkNewStrings(int position);

    // To speed up scavenge collections new space string are kept
    // separate from old space strings.
    List<Object*> new_space_strings_;
    List<Object*> old_space_strings_;

    Heap* heap_;

    friend class Heap;

    DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
  };

1576
  struct StrongRootsList;
1577

1578 1579 1580 1581 1582 1583
  struct StringTypeTable {
    InstanceType type;
    int size;
    RootListIndex index;
  };

1584
  struct ConstantStringTable {
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
    const char* contents;
    RootListIndex index;
  };

  struct StructTable {
    InstanceType type;
    int size;
    RootListIndex index;
  };

1595 1596 1597 1598 1599 1600 1601
  struct GCCallbackPair {
    GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
                   bool pass_isolate)
        : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}

    bool operator==(const GCCallbackPair& other) const {
      return other.callback == callback;
1602
    }
1603 1604

    v8::Isolate::GCCallback callback;
1605
    GCType gc_type;
1606
    bool pass_isolate;
1607
  };
1608

1609 1610 1611
  typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
                                                        Object** pointer);

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
  static const int kInitialStringTableSize = 2048;
  static const int kInitialEvalCacheSize = 64;
  static const int kInitialNumberStringCacheSize = 256;

  static const int kRememberedUnmappedPages = 128;

  static const StringTypeTable string_type_table[];
  static const ConstantStringTable constant_string_table[];
  static const StructTable struct_table[];

  static const int kYoungSurvivalRateHighThreshold = 90;
  static const int kYoungSurvivalRateAllowedDeviation = 15;
  static const int kOldSurvivalRateLowThreshold = 10;

  static const int kMaxMarkCompactsInIdleRound = 7;
  static const int kIdleScavengeThreshold = 5;

1629
  static const int kInitialFeedbackCapacity = 256;
1630 1631 1632 1633 1634 1635

  Heap();

  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
      Heap* heap, Object** pointer);

1636 1637
  // Selects the proper allocation space based on the pretenuring decision.
  static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1638 1639 1640
    return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
  }

1641 1642 1643 1644 1645
#define ROOT_ACCESSOR(type, name, camel_name) \
  inline void set_##name(type* value);
  ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR

1646
  StoreBuffer* store_buffer() { return store_buffer_; }
1647

1648
  void set_current_gc_flags(int flags) {
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
    current_gc_flags_ = flags;
    DCHECK(!ShouldFinalizeIncrementalMarking() ||
           !ShouldAbortIncrementalMarking());
  }

  inline bool ShouldReduceMemory() const {
    return current_gc_flags_ & kReduceMemoryFootprintMask;
  }

  inline bool ShouldAbortIncrementalMarking() const {
    return current_gc_flags_ & kAbortIncrementalMarkingMask;
  }

  inline bool ShouldFinalizeIncrementalMarking() const {
    return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
  }

1666 1667
  void PreprocessStackTraces();

1668
  // Checks whether a global GC is necessary
1669 1670
  GarbageCollector SelectGarbageCollector(AllocationSpace space,
                                          const char** reason);
1671

1672 1673 1674 1675 1676
  // Make sure there is a filler value behind the top of the new space
  // so that the GC does not confuse some unintialized/stale memory
  // with the allocation memento of the object at the top
  void EnsureFillerObjectAtTop();

1677
  // Ensure that we have swept all spaces in such a way that we can iterate
1678
  // over all objects.  May cause a GC.
1679 1680
  void MakeHeapIterable();

1681 1682 1683
  // Performs garbage collection operation.
  // Returns whether there is a chance that another major GC could
  // collect more garbage.
1684
  bool CollectGarbage(
1685
      GarbageCollector collector, GarbageCollectionReason gc_reason,
1686 1687
      const char* collector_reason,
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1688 1689 1690 1691

  // Performs garbage collection
  // Returns whether there is a chance another major GC could
  // collect more garbage.
1692 1693 1694
  bool PerformGarbageCollection(
      GarbageCollector collector,
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1695 1696 1697 1698 1699 1700

  inline void UpdateOldSpaceLimits();

  // Initializes a JSObject based on its map.
  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
                                 Map* map);
1701 1702 1703 1704

  // Initializes JSObject body starting at given offset.
  void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);

1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
  void InitializeAllocationMemento(AllocationMemento* memento,
                                   AllocationSite* allocation_site);

  bool CreateInitialMaps();
  void CreateInitialObjects();

  // These five Create*EntryStub functions are here and forced to not be inlined
  // because of a gcc-4.4 bug that assigns wrong vtable entries.
  NO_INLINE(void CreateJSEntryStub());
  NO_INLINE(void CreateJSConstructEntryStub());

  void CreateFixedStubs();

  HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);

  // Commits from space if it is uncommitted.
  void EnsureFromSpaceIsCommitted();

  // Uncommit unused semi space.
mlippautz's avatar
mlippautz committed
1724
  bool UncommitFromSpace();
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746

  // Fill in bogus values in from space
  void ZapFromSpace();

  // Deopts all code that contains allocation instruction which are tenured or
  // not tenured. Moreover it clears the pretenuring allocation site statistics.
  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);

  // Evaluates local pretenuring for the old space and calls
  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
  // the old space.
  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);

  // Record statistics before and after garbage collection.
  void ReportStatisticsBeforeGC();
  void ReportStatisticsAfterGC();

  // Creates and installs the full-sized number string cache.
  int FullSizeNumberStringCacheLength();
  // Flush the number to string cache.
  void FlushNumberStringCache();

1747 1748
  void ConfigureInitialOldGenerationSize();

1749 1750 1751 1752 1753 1754 1755
  bool HasLowYoungGenerationAllocationRate();
  bool HasLowOldGenerationAllocationRate();
  double YoungGenerationMutatorUtilization();
  double OldGenerationMutatorUtilization();

  void ReduceNewSpaceSize();

1756
  GCIdleTimeHeapState ComputeHeapState();
1757 1758

  bool PerformIdleTimeAction(GCIdleTimeAction action,
1759
                             GCIdleTimeHeapState heap_state,
1760 1761 1762
                             double deadline_in_ms);

  void IdleNotificationEpilogue(GCIdleTimeAction action,
1763 1764
                                GCIdleTimeHeapState heap_state, double start_ms,
                                double deadline_in_ms);
1765 1766 1767

  inline void UpdateAllocationsHash(HeapObject* object);
  inline void UpdateAllocationsHash(uint32_t value);
1768
  void PrintAlloctionsHash();
1769 1770 1771 1772

  void AddToRingBuffer(const char* string);
  void GetFromRingBuffer(char* buffer);

1773 1774
  void CompactRetainedMaps(ArrayList* retained_maps);

1775
  void CollectGarbageOnMemoryPressure();
1776

1777 1778
  void InvokeOutOfMemoryCallback();

1779 1780 1781 1782
  // Attempt to over-approximate the weak closure by marking object groups and
  // implicit references from global handles, but don't atomically complete
  // marking. If we continue to mark incrementally, we might have marked
  // objects that die later.
1783
  void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
1784

1785 1786 1787 1788 1789 1790 1791 1792
  // Returns the timer used for a given GC type.
  // - GCScavenger: young generation GC
  // - GCCompactor: full GC
  // - GCFinalzeMC: finalization of incremental full GC
  // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
  // memory reduction
  HistogramTimer* GCTypeTimer(GarbageCollector collector);

1793 1794 1795 1796 1797 1798 1799 1800 1801
  // ===========================================================================
  // Pretenuring. ==============================================================
  // ===========================================================================

  // Pretenuring decisions are made based on feedback collected during new space
  // evacuation. Note that between feedback collection and calling this method
  // object in old space must not move.
  void ProcessPretenuringFeedback();

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
  // ===========================================================================
  // Actual GC. ================================================================
  // ===========================================================================

  // Code that should be run before and after each GC.  Includes some
  // reporting/verification activities when compiled with DEBUG set.
  void GarbageCollectionPrologue();
  void GarbageCollectionEpilogue();

  // Performs a major collection in the whole heap.
  void MarkCompact();
1813 1814
  // Performs a minor collection of just the young generation.
  void MinorMarkCompact();
1815 1816 1817 1818 1819 1820 1821 1822

  // Code to be run before and after mark-compact.
  void MarkCompactPrologue();
  void MarkCompactEpilogue();

  // Performs a minor collection in new generation.
  void Scavenge();

1823
  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834

  void UpdateNewSpaceReferencesInExternalStringTable(
      ExternalStringTableUpdaterCallback updater_func);

  void UpdateReferencesInExternalStringTable(
      ExternalStringTableUpdaterCallback updater_func);

  void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
  void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
  void ProcessNativeContexts(WeakObjectRetainer* retainer);
  void ProcessAllocationSites(WeakObjectRetainer* retainer);
1835
  void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1836 1837 1838 1839 1840

  // ===========================================================================
  // GC statistics. ============================================================
  // ===========================================================================

1841 1842 1843 1844 1845
  inline size_t OldGenerationSpaceAvailable() {
    if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
    return old_generation_allocation_limit_ -
           static_cast<size_t>(PromotedTotalSize());
  }
1846

1847 1848 1849 1850
  // We allow incremental marking to overshoot the allocation limit for
  // performace reasons. If the overshoot is too large then we are more
  // eager to finalize incremental marking.
  inline bool AllocationLimitOvershotByLargeMargin() {
1851 1852 1853
    // This guards against too eager finalization in small heaps.
    // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
    size_t kMarginForSmallHeaps = 32u * MB;
1854 1855
    if (old_generation_allocation_limit_ >= PromotedTotalSize()) return false;
    uint64_t overshoot = PromotedTotalSize() - old_generation_allocation_limit_;
1856 1857
    // Overshoot margin is 50% of allocation limit or half-way to the max heap
    // with special handling of small heaps.
1858
    uint64_t margin =
1859
        Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1860 1861 1862 1863
            (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
    return overshoot >= margin;
  }

1864
  void UpdateTotalGCTime(double duration);
1865 1866 1867 1868 1869 1870 1871

  bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }

  // ===========================================================================
  // Growing strategy. =========================================================
  // ===========================================================================

1872 1873 1874 1875
  // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
  // This constant limits the effect of load RAIL mode on GC.
  // The value is arbitrary and chosen as the largest load time observed in
  // v8 browsing benchmarks.
1876
  static const int kMaxLoadTimeMs = 7000;
1877 1878 1879

  bool ShouldOptimizeForLoadTime();

1880 1881
  // Decrease the allocation limit if the new limit based on the given
  // parameters is lower than the current limit.
1882
  void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
1883 1884 1885 1886
                                          double mutator_speed);

  // Calculates the allocation limit based on a given growing factor and a
  // given old generation size.
1887 1888
  size_t CalculateOldGenerationAllocationLimit(double factor,
                                               size_t old_gen_size);
1889 1890

  // Sets the allocation limit to trigger the next full garbage collection.
1891
  void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
1892 1893
                                       double mutator_speed);

1894
  size_t MinimumAllocationLimitGrowingStep();
1895

1896
  size_t old_generation_allocation_limit() const {
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
    return old_generation_allocation_limit_;
  }

  bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }

  bool CanExpandOldGeneration(int size) {
    if (force_oom_) return false;
    return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
  }

1907 1908 1909 1910
  bool IsCloseToOutOfMemory(size_t slack) {
    return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
  }

1911
  bool ShouldExpandOldGenerationOnSlowAllocation();
1912 1913 1914 1915

  enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
  IncrementalMarkingLimit IncrementalMarkingLimitReached();

1916 1917 1918 1919 1920
  // ===========================================================================
  // Idle notification. ========================================================
  // ===========================================================================

  bool RecentIdleNotificationHappened();
ulan's avatar
ulan committed
1921
  void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1922

1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
  // ===========================================================================
  // HeapIterator helpers. =====================================================
  // ===========================================================================

  void heap_iterator_start() { heap_iterator_depth_++; }

  void heap_iterator_end() { heap_iterator_depth_--; }

  bool in_heap_iterator() { return heap_iterator_depth_ > 0; }

1933 1934 1935 1936
  // ===========================================================================
  // Allocation methods. =======================================================
  // ===========================================================================

1937 1938 1939 1940 1941 1942
  // Returns a deep copy of the JavaScript object.
  // Properties and elements are copied too.
  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
  MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
                                                AllocationSite* site = NULL);

1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
  // Allocates a JS Map in the heap.
  MUST_USE_RESULT AllocationResult
  AllocateMap(InstanceType instance_type, int instance_size,
              ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);

  // Allocates and initializes a new JavaScript object based on a
  // constructor.
  // If allocation_site is non-null, then a memento is emitted after the object
  // that points to the site.
  MUST_USE_RESULT AllocationResult AllocateJSObject(
      JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
      AllocationSite* allocation_site = NULL);

  // Allocates and initializes a new JavaScript object based on a map.
  // Passing an allocation site means that a memento will be created that
  // points to the site.
  MUST_USE_RESULT AllocationResult
  AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
                          AllocationSite* allocation_site = NULL);

  // Allocates a HeapNumber from value.
  MUST_USE_RESULT AllocationResult
  AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
                     PretenureFlag pretenure = NOT_TENURED);

// Allocates SIMD values from the given lane values.
#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
  AllocationResult Allocate##Type(lane_type lanes[lane_count],             \
                                  PretenureFlag pretenure = NOT_TENURED);
  SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
#undef SIMD_ALLOCATE_DECLARATION

  // Allocates a byte array of the specified length
  MUST_USE_RESULT AllocationResult
  AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
1978

1979 1980
  // Allocates a bytecode array with given contents.
  MUST_USE_RESULT AllocationResult
1981
  AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
1982
                        int parameter_count, FixedArray* constant_pool);
1983

1984
  MUST_USE_RESULT AllocationResult CopyCode(Code* code);
1985

1986 1987 1988
  MUST_USE_RESULT AllocationResult
  CopyBytecodeArray(BytecodeArray* bytecode_array);

1989 1990 1991
  // Allocates a fixed array initialized with undefined values
  MUST_USE_RESULT AllocationResult
  AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
1992

1993
  // Allocate an uninitialized object.  The memory is non-executable if the
1994 1995 1996
  // hardware and OS allow.  This is the single choke-point for allocations
  // performed by the runtime and should not be bypassed (to extend this to
  // inlined allocations, use the Heap::DisableInlineAllocation() support).
1997
  MUST_USE_RESULT inline AllocationResult AllocateRaw(
1998
      int size_in_bytes, AllocationSpace space,
1999
      AllocationAlignment aligment = kWordAligned);
2000

2001
  // Allocates a heap object based on the map.
2002 2003 2004
  MUST_USE_RESULT AllocationResult
      Allocate(Map* map, AllocationSpace space,
               AllocationSite* allocation_site = NULL);
2005 2006

  // Allocates a partial map for bootstrapping.
2007 2008
  MUST_USE_RESULT AllocationResult
      AllocatePartialMap(InstanceType instance_type, int instance_size);
2009 2010 2011

  // Allocate a block of memory in the given space (filled with a filler).
  // Used as a fall-back for generated code when the space is full.
2012 2013
  MUST_USE_RESULT AllocationResult
      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
2014

2015
  // Allocate an uninitialized fixed array.
2016 2017
  MUST_USE_RESULT AllocationResult
      AllocateRawFixedArray(int length, PretenureFlag pretenure);
2018 2019

  // Allocate an uninitialized fixed double array.
2020 2021
  MUST_USE_RESULT AllocationResult
      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
2022 2023

  // Allocate an initialized fixed array with the given filler value.
2024 2025 2026
  MUST_USE_RESULT AllocationResult
      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
                                   Object* filler);
2027

2028
  // Allocate and partially initializes a String.  There are two String
2029 2030 2031
  // encodings: one-byte and two-byte.  These functions allocate a string of
  // the given length and set its map and length fields.  The characters of
  // the string are uninitialized.
2032 2033 2034 2035
  MUST_USE_RESULT AllocationResult
      AllocateRawOneByteString(int length, PretenureFlag pretenure);
  MUST_USE_RESULT AllocationResult
      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
2036

2037 2038
  // Allocates an internalized string in old space based on the character
  // stream.
2039
  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
2040
      Vector<const char> str, int chars, uint32_t hash_field);
2041

2042
  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
2043
      Vector<const uint8_t> str, uint32_t hash_field);
2044

2045
  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
2046
      Vector<const uc16> str, uint32_t hash_field);
2047

2048 2049 2050
  template <bool is_one_byte, typename T>
  MUST_USE_RESULT AllocationResult
      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
2051

2052
  template <typename T>
2053
  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
2054 2055 2056
      T t, int chars, uint32_t hash_field);

  // Allocates an uninitialized fixed array. It must be filled by the caller.
2057
  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
2058

2059
  // Make a copy of src and return it.
2060
  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
2061

2062 2063
  // Make a copy of src, also grow the copy, and return the copy.
  MUST_USE_RESULT AllocationResult
2064
  CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
2065

2066 2067 2068 2069 2070
  // Make a copy of src, also grow the copy, and return the copy.
  MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
                                                      int new_len,
                                                      PretenureFlag pretenure);

2071
  // Make a copy of src, set the map, and return the copy.
2072 2073
  MUST_USE_RESULT AllocationResult
      CopyFixedArrayWithMap(FixedArray* src, Map* map);
2074

2075
  // Make a copy of src and return it.
2076
  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
2077 2078
      FixedDoubleArray* src);

2079
  // Computes a single character string where the character has code.
2080
  // A cache is used for one-byte (Latin1) codes.
2081 2082
  MUST_USE_RESULT AllocationResult
      LookupSingleCharacterStringFromCode(uint16_t code);
2083 2084

  // Allocate a symbol in old space.
2085
  MUST_USE_RESULT AllocationResult AllocateSymbol();
2086 2087

  // Allocates an external array of the specified length and type.
2088 2089 2090
  MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
      int length, ExternalArrayType array_type, void* external_pointer,
      PretenureFlag pretenure);
2091 2092

  // Allocates a fixed typed array of the specified length and type.
2093
  MUST_USE_RESULT AllocationResult
2094 2095
  AllocateFixedTypedArray(int length, ExternalArrayType array_type,
                          bool initialize, PretenureFlag pretenure);
2096 2097

  // Make a copy of src and return it.
2098
  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
2099 2100

  // Make a copy of src, set the map, and return the copy.
2101 2102
  MUST_USE_RESULT AllocationResult
      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
2103 2104

  // Allocates a fixed double array with uninitialized values. Returns
2105
  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
2106
      int length, PretenureFlag pretenure = NOT_TENURED);
2107

2108
  // Allocate empty fixed array.
2109
  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
2110

2111 2112 2113
  // Allocate empty scope info.
  MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();

2114
  // Allocate empty fixed typed array of given type.
2115 2116
  MUST_USE_RESULT AllocationResult
      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
2117

2118
  // Allocate a tenured simple cell.
2119
  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
2120 2121

  // Allocate a tenured JS global property cell initialized with the hole.
2122
  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
2123

ulan@chromium.org's avatar
ulan@chromium.org committed
2124 2125
  MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);

2126 2127
  MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);

2128
  // Allocates a new utility object in the old generation.
2129
  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
2130 2131

  // Allocates a new foreign object.
2132 2133
  MUST_USE_RESULT AllocationResult
      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
2134

2135 2136
  MUST_USE_RESULT AllocationResult
      AllocateCode(int object_size, bool immovable);
2137

2138 2139 2140 2141
  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);

  MUST_USE_RESULT AllocationResult InternalizeString(String* str);

2142 2143
  // ===========================================================================

2144 2145
  void set_force_oom(bool value) { force_oom_ = value; }

2146 2147 2148 2149 2150
  // The amount of external memory registered through the API.
  int64_t external_memory_;

  // The limit when to trigger memory pressure from the API.
  int64_t external_memory_limit_;
2151

2152 2153
  // Caches the amount of external memory registered at the last MC.
  int64_t external_memory_at_last_mark_compact_;
2154

2155 2156 2157
  // The amount of memory that has been freed concurrently.
  base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;

2158 2159 2160
  // This can be calculated directly from a pointer to the heap; however, it is
  // more expedient to get at the isolate directly from within Heap methods.
  Isolate* isolate_;
2161

2162
  Object* roots_[kRootListLength];
2163

2164
  size_t code_range_size_;
2165 2166 2167
  size_t max_semi_space_size_;
  size_t initial_semispace_size_;
  size_t max_old_generation_size_;
2168
  size_t initial_max_old_generation_size_;
2169
  size_t initial_old_generation_size_;
2170
  bool old_generation_size_configured_;
2171
  size_t max_executable_size_;
2172
  size_t maximum_committed_;
2173

2174 2175
  // For keeping track of how much data has survived
  // scavenge since last new space expansion.
2176
  size_t survived_since_last_expansion_;
2177

2178
  // ... and since the last scavenge.
2179
  size_t survived_last_scavenge_;
2180

2181 2182
  // This is not the depth of nested AlwaysAllocateScope's but rather a single
  // count, as scopes can be acquired from multiple tasks (read: threads).
lpy's avatar
lpy committed
2183
  base::AtomicNumber<size_t> always_allocate_scope_count_;
2184

2185 2186
  // Stores the memory pressure level that set by MemoryPressureNotification
  // and reset by a mark-compact garbage collection.
lpy's avatar
lpy committed
2187
  base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
2188

2189 2190 2191
  v8::debug::OutOfMemoryCallback out_of_memory_callback_;
  void* out_of_memory_callback_data_;

2192 2193
  // For keeping track of context disposals.
  int contexts_disposed_;
2194

2195 2196 2197 2198 2199
  // The length of the retained_maps array at the time of context disposal.
  // This separates maps in the retained_maps array that were created before
  // and after context disposal.
  int number_of_disposed_maps_;

2200
  int global_ic_age_;
2201

2202
  NewSpace* new_space_;
2203 2204 2205 2206
  OldSpace* old_space_;
  OldSpace* code_space_;
  MapSpace* map_space_;
  LargeObjectSpace* lo_space_;
2207 2208
  // Map from the space id to the space.
  Space* space_[LAST_SPACE + 1];
2209 2210 2211
  HeapState gc_state_;
  int gc_post_processing_depth_;
  Address new_space_top_after_last_gc_;
2212

2213
  // Returns the amount of external memory registered since last global gc.
2214
  uint64_t PromotedExternalMemorySize();
2215

2216 2217
  // How many "runtime allocations" happened.
  uint32_t allocations_count_;
2218

2219 2220
  // Running hash over allocations performed.
  uint32_t raw_allocations_hash_;
2221

2222 2223
  // How many mark-sweep collections happened.
  unsigned int ms_count_;
2224

2225 2226
  // How many gc happened.
  unsigned int gc_count_;
2227

2228 2229 2230
  // For post mortem debugging.
  int remembered_unmapped_pages_index_;
  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2231

2232 2233 2234 2235 2236 2237
#ifdef DEBUG
  // If the --gc-interval flag is set to a positive value, this
  // variable holds the value indicating the number of allocations
  // remain until the next failure and garbage collection.
  int allocation_timeout_;
#endif  // DEBUG
2238

2239 2240 2241 2242
  // Limit that triggers a global GC on the next (normally caused) GC.  This
  // is checked when we have already decided to do a GC to help determine
  // which collector to invoke, before expanding a paged space in the old
  // generation and on every allocation in large object space.
2243
  size_t old_generation_allocation_limit_;
2244

2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
  // Indicates that inline bump-pointer allocation has been globally disabled
  // for all spaces. This is used to disable allocations in generated code.
  bool inline_allocation_disabled_;

  // Weak list heads, threaded through the objects.
  // List heads are initialized lazily and contain the undefined_value at start.
  Object* native_contexts_list_;
  Object* allocation_sites_list_;

  // List of encountered weak collections (JSWeakMap and JSWeakSet) during
  // marking. It is initialized during marking, destroyed after marking and
  // contains Smi(0) while marking is not active.
  Object* encountered_weak_collections_;

  Object* encountered_weak_cells_;

2261 2262
  Object* encountered_transition_arrays_;

2263 2264 2265 2266 2267 2268 2269 2270 2271
  List<GCCallbackPair> gc_epilogue_callbacks_;
  List<GCCallbackPair> gc_prologue_callbacks_;

  // Total RegExp code ever generated
  double total_regexp_code_generated_;

  int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];

  GCTracer* tracer_;
2272

2273
  size_t promoted_objects_size_;
2274
  double promotion_ratio_;
2275
  double promotion_rate_;
2276 2277
  size_t semi_space_copied_object_size_;
  size_t previous_semi_space_copied_object_size_;
2278
  double semi_space_copied_rate_;
2279 2280 2281
  int nodes_died_in_new_space_;
  int nodes_copied_in_new_space_;
  int nodes_promoted_;
2282

2283 2284 2285 2286 2287 2288
  // This is the pretenuring trigger for allocation sites that are in maybe
  // tenure state. When we switched to the maximum new space size we deoptimize
  // the code that belongs to the allocation site and derive the lifetime
  // of the allocation site.
  unsigned int maximum_size_scavenges_;

2289
  // Total time spent in GC.
2290
  double total_gc_time_ms_;
2291

2292
  // Last time an idle notification happened.
2293 2294
  double last_idle_notification_time_;

2295 2296 2297
  // Last time a garbage collection happened.
  double last_gc_time_;

2298 2299
  Scavenger* scavenge_collector_;

2300
  MarkCompactCollector* mark_compact_collector_;
2301

2302 2303
  MemoryAllocator* memory_allocator_;

2304
  StoreBuffer* store_buffer_;
2305

2306
  IncrementalMarking* incremental_marking_;
2307

2308
  GCIdleTimeHandler* gc_idle_time_handler_;
2309

2310
  MemoryReducer* memory_reducer_;
2311

2312 2313
  ObjectStats* live_object_stats_;
  ObjectStats* dead_object_stats_;
2314

ulan's avatar
ulan committed
2315 2316
  ScavengeJob* scavenge_job_;

2317
  AllocationObserver* idle_scavenge_observer_;
2318

2319 2320 2321 2322
  // These two counters are monotomically increasing and never reset.
  size_t full_codegen_bytes_generated_;
  size_t crankshaft_codegen_bytes_generated_;

2323 2324 2325 2326 2327
  // This counter is increased before each GC and never reset.
  // To account for the bytes allocated since the last GC, use the
  // NewSpaceAllocationCounter() function.
  size_t new_space_allocation_counter_;

2328 2329 2330
  // This counter is increased before each GC and never reset. To
  // account for the bytes allocated since the last GC, use the
  // OldGenerationAllocationCounter() function.
2331
  size_t old_generation_allocation_counter_at_last_gc_;
2332 2333 2334 2335

  // The size of objects in old generation after the last MarkCompact GC.
  size_t old_generation_size_at_last_gc_;

2336 2337 2338 2339 2340
  // If the --deopt_every_n_garbage_collections flag is set to a positive value,
  // this variable holds the number of garbage collections since the last
  // deoptimization triggered by garbage collection.
  int gcs_since_last_deopt_;

2341 2342 2343 2344 2345
  // The feedback storage is used to store allocation sites (keys) and how often
  // they have been visited (values) by finding a memento behind an object. The
  // storage is only alive temporary during a GC. The invariant is that all
  // pointers in this map are already fixed, i.e., they do not point to
  // forwarding pointers.
lpy's avatar
lpy committed
2346
  base::HashMap* global_pretenuring_feedback_;
2347

2348 2349 2350 2351 2352 2353 2354
  char trace_ring_buffer_[kTraceRingBufferSize];
  // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
  // full then the data is from ring_buffer_end_ to the end of the buffer and
  // from 0 to ring_buffer_end_.
  bool ring_buffer_full_;
  size_t ring_buffer_end_;

2355 2356 2357 2358
  // Shared state read by the scavenge collector and set by ScavengeObject.
  PromotionQueue promotion_queue_;

  // Flag is set when the heap has been configured.  The heap can be repeatedly
2359
  // configured through the API until it is set up.
2360 2361
  bool configured_;

2362
  // Currently set GC flags that are respected by all GC components.
2363
  int current_gc_flags_;
2364

2365 2366 2367 2368
  // Currently set GC callback flags that are used to pass information between
  // the embedder and V8's GC.
  GCCallbackFlags current_gc_callback_flags_;

2369 2370
  ExternalStringTable external_string_table_;

2371
  base::Mutex relocation_mutex_;
2372

2373 2374
  int gc_callbacks_depth_;

2375 2376
  bool deserialization_complete_;

2377 2378
  StrongRootsList* strong_roots_list_;

2379 2380 2381
  // The depth of HeapIterator nestings.
  int heap_iterator_depth_;

2382
  LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
2383

2384 2385
  // Used for testing purposes.
  bool force_oom_;
2386
  bool delay_sweeper_tasks_for_testing_;
2387

2388
  // Classes in "heap" can be friends.
2389 2390
  friend class AlwaysAllocateScope;
  friend class GCCallbacksScope;
2391
  friend class GCTracer;
2392
  friend class HeapIterator;
2393
  friend class IdleScavengeObserver;
2394
  friend class IncrementalMarking;
2395 2396
  friend class IncrementalMarkingJob;
  friend class LargeObjectSpace;
2397
  friend class MarkCompactCollector;
2398
  friend class MarkCompactMarkingVisitor;
ulan's avatar
ulan committed
2399
  friend class NewSpace;
2400
  friend class ObjectStatsCollector;
2401
  friend class Page;
2402
  friend class PagedSpace;
2403
  friend class Scavenger;
2404
  friend class StoreBuffer;
2405
  friend class TestMemoryAllocatorScope;
2406

2407 2408 2409 2410 2411 2412
  // The allocator interface.
  friend class Factory;

  // The Isolate constructs us.
  friend class Isolate;

2413 2414 2415
  // Used in cctest.
  friend class HeapTester;

2416
  DISALLOW_COPY_AND_ASSIGN(Heap);
2417 2418 2419
};


ager@chromium.org's avatar
ager@chromium.org committed
2420 2421
class HeapStats {
 public:
2422 2423 2424
  static const int kStartMarker = 0xDECADE00;
  static const int kEndMarker = 0xDECADE01;

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
  intptr_t* start_marker;                  //  0
  size_t* new_space_size;                  //  1
  size_t* new_space_capacity;              //  2
  size_t* old_space_size;                  //  3
  size_t* old_space_capacity;              //  4
  size_t* code_space_size;                 //  5
  size_t* code_space_capacity;             //  6
  size_t* map_space_size;                  //  7
  size_t* map_space_capacity;              //  8
  size_t* lo_space_size;                   //  9
  size_t* global_handle_count;             // 10
  size_t* weak_global_handle_count;        // 11
  size_t* pending_global_handle_count;     // 12
  size_t* near_death_global_handle_count;  // 13
  size_t* free_global_handle_count;        // 14
  size_t* memory_allocator_size;           // 15
  size_t* memory_allocator_capacity;       // 16
  size_t* malloced_memory;                 // 17
  size_t* malloced_peak_memory;            // 18
  size_t* objects_per_type;                // 19
  size_t* size_per_type;                   // 20
  int* os_error;                           // 21
  char* last_few_messages;                 // 22
  char* js_stacktrace;                     // 23
  intptr_t* end_marker;                    // 24
2450 2451 2452
};


2453 2454
class AlwaysAllocateScope {
 public:
2455
  explicit inline AlwaysAllocateScope(Isolate* isolate);
2456
  inline ~AlwaysAllocateScope();
2457 2458

 private:
2459
  Heap* heap_;
2460 2461
};

2462

2463 2464 2465 2466 2467
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
2468
class VerifyPointersVisitor : public ObjectVisitor {
2469
 public:
2470
  inline void VisitPointers(Object** start, Object** end) override;
2471 2472 2473
};


2474
// Verify that all objects are Smis.
2475
class VerifySmisVisitor : public ObjectVisitor {
2476
 public:
2477
  inline void VisitPointers(Object** start, Object** end) override;
2478 2479 2480
};


2481 2482
// Space iterator for iterating over all spaces of the heap.  Returns each space
// in turn, and null when it is done.
2483 2484
class AllSpaces BASE_EMBEDDED {
 public:
2485
  explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2486
  Space* next();
2487

2488
 private:
2489
  Heap* heap_;
2490 2491 2492 2493
  int counter_;
};


2494 2495
// Space iterator for iterating over all old spaces of the heap: Old space
// and code space.  Returns each space in turn, and null when it is done.
2496
class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
2497
 public:
2498
  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
2499
  OldSpace* next();
2500

2501
 private:
2502
  Heap* heap_;
2503 2504 2505 2506
  int counter_;
};


2507
// Space iterator for iterating over all the paged spaces of the heap: Map
2508
// space, old space, code space and cell space.  Returns
2509
// each space in turn, and null when it is done.
2510 2511
class PagedSpaces BASE_EMBEDDED {
 public:
2512
  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
2513
  PagedSpace* next();
2514

2515
 private:
2516
  Heap* heap_;
2517 2518 2519 2520
  int counter_;
};


2521 2522
class SpaceIterator : public Malloced {
 public:
2523
  explicit SpaceIterator(Heap* heap);
2524 2525 2526
  virtual ~SpaceIterator();

  bool has_next();
2527
  Space* next();
2528 2529

 private:
2530
  Heap* heap_;
2531
  int current_space_;         // from enum AllocationSpace.
2532 2533 2534
};


2535 2536 2537 2538
// A HeapIterator provides iteration over the whole heap. It
// aggregates the specific iterators for the different spaces as
// these can only iterate over one space only.
//
2539 2540 2541
// HeapIterator ensures there is no allocation during its lifetime
// (using an embedded DisallowHeapAllocation instance).
//
2542 2543 2544 2545 2546
// HeapIterator can skip free list nodes (that is, de-allocated heap
// objects that still remain in the heap). As implementation of free
// nodes filtering uses GC marks, it can't be used during MS/MC GC
// phases. Also, it is forbidden to interrupt iteration in this mode,
// as this will leave heap objects marked (and thus, unusable).
2547 2548
class HeapIterator BASE_EMBEDDED {
 public:
2549 2550 2551 2552
  enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };

  explicit HeapIterator(Heap* heap,
                        HeapObjectsFiltering filtering = kNoFiltering);
2553
  ~HeapIterator();
2554 2555 2556 2557

  HeapObject* next();

 private:
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
  struct MakeHeapIterableHelper {
    explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
  };

  HeapObject* NextObject();

  // The following two fields need to be declared in this order. Initialization
  // order guarantees that we first make the heap iterable (which may involve
  // allocations) and only then lock it down by not allowing further
  // allocations.
  MakeHeapIterableHelper make_heap_iterable_helper_;
  DisallowHeapAllocation no_heap_allocation_;
2570

2571
  Heap* heap_;
2572 2573
  HeapObjectsFiltering filtering_;
  HeapObjectsFilter* filter_;
2574 2575 2576
  // Space iterator for iterating all the spaces.
  SpaceIterator* space_iterator_;
  // Object iterator for the space currently being iterated.
2577
  std::unique_ptr<ObjectIterator> object_iterator_;
2578 2579
};

2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
 public:
  virtual ~WeakObjectRetainer() {}

  // Return whether this object should be retained. If NULL is returned the
  // object has no references. Otherwise the address of the retained object
  // should be returned as in some GC situations the object has been moved.
  virtual Object* RetainAs(Object* object) = 0;
};


2592
#ifdef DEBUG
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
// object to the search target object.
class PathTracer : public ObjectVisitor {
 public:
  enum WhatToFind {
    FIND_ALL,   // Will find all matches.
    FIND_FIRST  // Will stop the search after first match.
  };

ishell@chromium.org's avatar
ishell@chromium.org committed
2603 2604 2605
  // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
  static const int kMarkTag = 2;

2606 2607 2608
  // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
  // after the first match.  If FIND_ALL is specified, then tracing will be
  // done for all matches.
2609
  PathTracer(Object* search_target, WhatToFind what_to_find,
2610 2611 2612 2613 2614 2615 2616
             VisitMode visit_mode)
      : search_target_(search_target),
        found_target_(false),
        found_target_in_trace_(false),
        what_to_find_(what_to_find),
        visit_mode_(visit_mode),
        object_stack_(20),
2617
        no_allocation() {}
2618

2619
  void VisitPointers(Object** start, Object** end) override;
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642

  void Reset();
  void TracePathFrom(Object** root);

  bool found() const { return found_target_; }

  static Object* const kAnyGlobalObject;

 protected:
  class MarkVisitor;
  class UnmarkVisitor;

  void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
  void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
  virtual void ProcessResults();

  Object* search_target_;
  bool found_target_;
  bool found_target_in_trace_;
  WhatToFind what_to_find_;
  VisitMode visit_mode_;
  List<Object*> object_stack_;

2643
  DisallowHeapAllocation no_allocation;  // i.e. no gc allowed.
2644

2645
 private:
2646 2647
  DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
2648
#endif  // DEBUG
2649 2650

// -----------------------------------------------------------------------------
2651 2652
// Allows observation of allocations.
class AllocationObserver {
2653
 public:
2654
  explicit AllocationObserver(intptr_t step_size)
2655 2656 2657
      : step_size_(step_size), bytes_to_next_step_(step_size) {
    DCHECK(step_size >= kPointerSize);
  }
2658
  virtual ~AllocationObserver() {}
2659

2660
  // Called each time the observed space does an allocation step. This may be
2661 2662
  // more frequently than the step_size we are monitoring (e.g. when there are
  // multiple observers, or when page or space boundary is encountered.)
2663
  void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
    bytes_to_next_step_ -= bytes_allocated;
    if (bytes_to_next_step_ <= 0) {
      Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
           size);
      step_size_ = GetNextStepSize();
      bytes_to_next_step_ = step_size_;
    }
  }

 protected:
  intptr_t step_size() const { return step_size_; }
  intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }

  // Pure virtual method provided by the subclasses that gets called when at
  // least step_size bytes have been allocated. soon_object is the address just
  // allocated (but not yet initialized.) size is the size of the object as
  // requested (i.e. w/o the alignment fillers). Some complexities to be aware
  // of:
  // 1) soon_object will be nullptr in cases where we end up observing an
  //    allocation that happens to be a filler space (e.g. page boundaries.)
  // 2) size is the requested size at the time of allocation. Right-trimming
  //    may change the object size dynamically.
  // 3) soon_object may actually be the first object in an allocation-folding
  //    group. In such a case size is the size of the group rather than the
  //    first object.
  virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;

  // Subclasses can override this method to make step size dynamic.
  virtual intptr_t GetNextStepSize() { return step_size_; }

  intptr_t step_size_;
  intptr_t bytes_to_next_step_;

 private:
2698
  friend class LargeObjectSpace;
2699
  friend class NewSpace;
2700 2701
  friend class PagedSpace;
  DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2702 2703
};

2704 2705
}  // namespace internal
}  // namespace v8
2706

2707
#endif  // V8_HEAP_HEAP_H_