isolate.h 64.6 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6 7

#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_

8
#include <cstddef>
9
#include <memory>
10
#include <queue>
11
#include <vector>
12

13 14
#include "include/v8-debug.h"
#include "src/allocation.h"
15
#include "src/base/atomicops.h"
16
#include "src/builtins/builtins.h"
17
#include "src/contexts.h"
18
#include "src/date.h"
19
#include "src/debug/debug-interface.h"
20
#include "src/execution.h"
binji's avatar
binji committed
21
#include "src/futex-emulation.h"
22 23
#include "src/global-handles.h"
#include "src/handles.h"
24
#include "src/heap/heap.h"
25
#include "src/messages.h"
26
#include "src/objects/code.h"
27
#include "src/regexp/regexp-stack.h"
28
#include "src/runtime/runtime.h"
29
#include "src/zone/zone.h"
30 31

namespace v8 {
32 33 34 35 36

namespace base {
class RandomNumberGenerator;
}

37 38 39 40
namespace debug {
class ConsoleDelegate;
}

41 42
namespace internal {

43 44 45 46
namespace heap {
class HeapTester;
}  // namespace heap

47
class AccessCompilerData;
48
class AddressToIndexHashMap;
49
class AstStringConstants;
50
class BasicBlockProfiler;
51
class Bootstrapper;
52
class CallInterfaceDescriptorData;
53
class CancelableTaskManager;
54
class CodeEventDispatcher;
55 56
class CodeGenerator;
class CodeRange;
57
class CodeStubDescriptor;
58
class CodeTracer;
59
class CompilationCache;
60
class CompilationStatistics;
61
class CompilerDispatcher;
62 63 64 65
class ContextSlotCache;
class Counters;
class CpuFeatures;
class CpuProfiler;
66
class Debug;
67
class DeoptimizerData;
68
class DescriptorLookupCache;
69
class EmptyStatement;
70
class ExternalCallbackScope;
71 72 73
class ExternalReferenceTable;
class Factory;
class HandleScopeImplementer;
74
class HeapObjectToIndexHashMap;
75 76
class HeapProfiler;
class InlineRuntimeFunctionsTable;
77
class InnerPointerToCodeCache;
78
class Logger;
jarin@chromium.org's avatar
jarin@chromium.org committed
79
class MaterializedObjectStore;
80
class OptimizingCompileDispatcher;
81 82
class PromiseOnStack;
class Redirection;
83
class RegExpStack;
84
class RootVisitor;
85
class RuntimeProfiler;
86
class SaveContext;
87
class SetupIsolateDelegate;
88 89
class Simulator;
class StartupDeserializer;
90
class StandardFrame;
91
class StatsTable;
92 93
class StringTracker;
class StubCache;
94
class SweeperThread;
95 96 97
class ThreadManager;
class ThreadState;
class ThreadVisitor;  // Defined in v8threads.h
jarin@chromium.org's avatar
jarin@chromium.org committed
98
class UnicodeCache;
99

100
template <StateTag Tag> class VMState;
101 102 103 104 105 106

// 'void function pointer', used to roundtrip the
// ExternalReference::ExternalReferenceRedirector since we can not include
// assembler.h, where it is defined, here.
typedef void* ExternalReferenceRedirectorPointer();

107 108 109
namespace interpreter {
class Interpreter;
}
110

111 112 113 114
namespace wasm {
class CompilationManager;
}

115 116 117 118 119 120 121
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
  do {                                                 \
    Isolate* __isolate__ = (isolate);                  \
    DCHECK(!__isolate__->has_pending_exception());     \
    if (__isolate__->has_scheduled_exception()) {      \
      return __isolate__->PromoteScheduledException(); \
    }                                                  \
122
  } while (false)
123

124 125
// Macros for MaybeHandle.

126 127 128
#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
  do {                                                      \
    Isolate* __isolate__ = (isolate);                       \
129
    DCHECK(!__isolate__->has_pending_exception());          \
130 131 132 133
    if (__isolate__->has_scheduled_exception()) {           \
      __isolate__->PromoteScheduledException();             \
      return value;                                         \
    }                                                       \
134 135
  } while (false)

136 137 138
#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
#define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
  do {                                                                        \
    Isolate* __isolate__ = (isolate);                                         \
    if (!(call).ToLocal(&dst)) {                                              \
      DCHECK(__isolate__->has_scheduled_exception());                         \
      __isolate__->PromoteScheduledException();                               \
      return value;                                                           \
    }                                                                         \
  } while (false)

#define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
  do {                                                            \
    Isolate* __isolate__ = (isolate);                             \
    if ((call).IsNothing()) {                                     \
      DCHECK(__isolate__->has_scheduled_exception());             \
      __isolate__->PromoteScheduledException();                   \
      return value;                                               \
    }                                                             \
  } while (false)

159 160 161 162 163 164 165 166
#define RETURN_RESULT_OR_FAILURE(isolate, call)     \
  do {                                              \
    Handle<Object> __result__;                      \
    Isolate* __isolate__ = (isolate);               \
    if (!(call).ToHandle(&__result__)) {            \
      DCHECK(__isolate__->has_pending_exception()); \
      return __isolate__->heap()->exception();      \
    }                                               \
167
    DCHECK(!__isolate__->has_pending_exception());  \
168 169 170
    return *__result__;                             \
  } while (false)

171 172 173
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
  do {                                                               \
    if (!(call).ToHandle(&dst)) {                                    \
174
      DCHECK((isolate)->has_pending_exception());                    \
175 176 177 178
      return value;                                                  \
    }                                                                \
  } while (false)

179 180 181 182 183 184
#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)          \
  do {                                                                  \
    Isolate* __isolate__ = (isolate);                                   \
    ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,            \
                                     __isolate__->heap()->exception()); \
  } while (false)
185 186 187 188

#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())

189 190 191 192
#define THROW_NEW_ERROR(isolate, call, T)                       \
  do {                                                          \
    Isolate* __isolate__ = (isolate);                           \
    return __isolate__->Throw<T>(__isolate__->factory()->call); \
193 194
  } while (false)

195 196 197 198
#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
  do {                                                        \
    Isolate* __isolate__ = (isolate);                         \
    return __isolate__->Throw(*__isolate__->factory()->call); \
199 200
  } while (false)

201
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
202
  do {                                                             \
203
    if ((call).is_null()) {                                        \
204
      DCHECK((isolate)->has_pending_exception());                  \
205 206 207 208
      return value;                                                \
    }                                                              \
  } while (false)

209 210 211 212 213 214
#define RETURN_FAILURE_ON_EXCEPTION(isolate, call)               \
  do {                                                           \
    Isolate* __isolate__ = (isolate);                            \
    RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                 \
                              __isolate__->heap()->exception()); \
  } while (false);
215 216

#define RETURN_ON_EXCEPTION(isolate, call, T)  \
217
  RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
218 219


220 221 222 223 224 225 226 227 228 229 230 231 232 233
#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
                              limit_check, increment, body)                \
  do {                                                                     \
    loop_var_type init;                                                    \
    loop_var_type for_with_handle_limit = loop_var;                        \
    Isolate* for_with_handle_isolate = isolate;                            \
    while (limit_check) {                                                  \
      for_with_handle_limit += 1024;                                       \
      HandleScope loop_scope(for_with_handle_isolate);                     \
      for (; limit_check && loop_var < for_with_handle_limit; increment) { \
        body                                                               \
      }                                                                    \
    }                                                                      \
  } while (false)
234

235 236 237 238
// Platform-independent, reliable thread identifier.
class ThreadId {
 public:
  // Creates an invalid ThreadId.
239
  ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
240 241

  ThreadId& operator=(const ThreadId& other) {
242
    base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
243 244
    return *this;
  }
245 246 247 248 249 250 251 252 253

  // Returns ThreadId for current thread.
  static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }

  // Returns invalid ThreadId (guaranteed not to be equal to any thread).
  static ThreadId Invalid() { return ThreadId(kInvalidId); }

  // Compares ThreadIds for equality.
  INLINE(bool Equals(const ThreadId& other) const) {
254
    return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
255 256 257 258
  }

  // Checks whether this ThreadId refers to any thread.
  INLINE(bool IsValid() const) {
259
    return base::Relaxed_Load(&id_) != kInvalidId;
260 261 262 263
  }

  // Converts ThreadId to an integer representation
  // (required for public API: V8::V8::GetCurrentThreadId).
264
  int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
265 266 267 268 269 270 271 272

  // Converts ThreadId to an integer representation
  // (required for public API: V8::V8::TerminateExecution).
  static ThreadId FromInteger(int id) { return ThreadId(id); }

 private:
  static const int kInvalidId = -1;

273
  explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
274 275 276

  static int AllocateThreadId();

277
  V8_EXPORT_PRIVATE static int GetCurrentThreadId();
278

279
  base::Atomic32 id_;
280

281
  static base::Atomic32 highest_thread_id_;
282 283 284 285 286

  friend class Isolate;
};


287 288 289 290 291
#define FIELD_ACCESSOR(type, name)                 \
  inline void set_##name(type v) { name##_ = v; }  \
  inline type name() const { return name##_; }


292 293
class ThreadLocalTop BASE_EMBEDDED {
 public:
294 295 296 297
  // Does early low-level initialization that does not depend on the
  // isolate being present.
  ThreadLocalTop();

298 299 300
  // Initialize the thread data.
  void Initialize();

301
  // Get the top C++ try catch handler or nullptr if none are registered.
302
  //
303
  // This method is not guaranteed to return an address that can be
304 305
  // used for comparison with addresses into the JS stack.  If such an
  // address is needed, use try_catch_handler_address.
306
  FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
307

308
  // Get the address of the top C++ try catch handler or nullptr if
309 310 311 312 313 314 315 316 317
  // none are registered.
  //
  // This method always returns an address that can be compared to
  // pointers into the JavaScript stack.  When running on actual
  // hardware, try_catch_handler_address and TryCatchHandler return
  // the same pointer.  When running on a simulator with a separate JS
  // stack, try_catch_handler_address returns a JS stack address that
  // corresponds to the place on the JS stack where the C++ handler
  // would have been if the stack were not separate.
318 319 320 321
  Address try_catch_handler_address() {
    return reinterpret_cast<Address>(
        v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
  }
322

323
  void Free();
324

325
  Isolate* isolate_;
326 327 328
  // The context where the current execution method is created and for variable
  // lookups.
  Context* context_;
329
  ThreadId thread_id_;
330
  Object* pending_exception_;
331 332 333
  // TODO(kschimpf): Change this to a stack of caught exceptions (rather than
  // just innermost catching try block).
  Object* wasm_caught_exception_;
334 335 336

  // Communication channel between Isolate::FindHandler and the CEntryStub.
  Context* pending_handler_context_;
337
  Address pending_handler_entrypoint_;
338
  Address pending_handler_constant_pool_;
339 340 341 342
  Address pending_handler_fp_;
  Address pending_handler_sp_;

  // Communication channel between Isolate::Throw and message consumers.
343
  bool rethrowing_message_;
344
  Object* pending_message_obj_;
345

346 347 348
  // Use a separate value for scheduled exceptions to preserve the
  // invariants that hold about pending_exception.  We may want to
  // unify them later.
349
  Object* scheduled_exception_;
350 351 352 353 354
  bool external_caught_exception_;
  SaveContext* save_context_;

  // Stack.
  Address c_entry_fp_;  // the frame pointer of the top c entry frame
355
  Address handler_;     // try-blocks are chained through the stack
356
  Address c_function_;  // C function that was called at c entry.
357

358 359 360 361 362
  // Throwing an exception may cause a Promise rejection.  For this purpose
  // we keep track of a stack of nested promises and the corresponding
  // try-catch handlers.
  PromiseOnStack* promise_on_stack_;

363 364 365 366
#ifdef USE_SIMULATOR
  Simulator* simulator_;
#endif

367
  Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
368 369
  // the external callback we're currently in
  ExternalCallbackScope* external_callback_scope_;
370 371 372 373 374 375
  StateTag current_vm_state_;

  // Call back function to report unsafe JS accesses.
  v8::FailedAccessCheckCallback failed_access_check_callback_;

 private:
376 377
  void InitializeInternal();

378
  v8::TryCatch* try_catch_handler_;
379 380 381
};


382
#if USE_SIMULATOR
383

384 385 386 387
#define ISOLATE_INIT_SIMULATOR_LIST(V)                       \
  V(bool, simulator_initialized, false)                      \
  V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr) \
  V(Redirection*, simulator_redirection, nullptr)
388 389 390 391 392 393 394
#else

#define ISOLATE_INIT_SIMULATOR_LIST(V)

#endif


395 396
#ifdef DEBUG

397 398 399 400
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
  V(CommentStatistic, paged_space_comments_statistics, \
    CommentStatistic::kMaxComments + 1)                \
  V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
401 402 403 404 405 406 407 408
#else

#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)

#endif

#define ISOLATE_INIT_ARRAY_LIST(V)                                             \
  /* SerializerDeserializer state. */                                          \
409
  V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
410 411 412 413 414
  V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
  V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
  V(int, suffix_table, (kBMMaxShift + 1))                                      \
  ISOLATE_INIT_DEBUG_ARRAY_LIST(V)

415
typedef std::vector<HeapObject*> DebugObjectCache;
416

417 418 419
#define ISOLATE_INIT_LIST(V)                                                  \
  /* Assembler state. */                                                      \
  V(FatalErrorCallback, exception_behavior, nullptr)                          \
420
  V(OOMErrorCallback, oom_behavior, nullptr)                                  \
421 422
  V(LogEventCallback, event_logger, nullptr)                                  \
  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
423 424
  V(ExtensionCallback, wasm_module_callback, &NoExtension)                    \
  V(ExtensionCallback, wasm_instance_callback, &NoExtension)                  \
425
  V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr)      \
426 427 428 429 430 431 432
  V(ExternalReferenceRedirectorPointer*, external_reference_redirector,       \
    nullptr)                                                                  \
  /* State for Relocatable. */                                                \
  V(Relocatable*, relocatable_top, nullptr)                                   \
  V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
  V(Object*, string_stream_current_security_token, nullptr)                   \
  V(ExternalReferenceTable*, external_reference_table, nullptr)               \
433
  V(const intptr_t*, api_external_references, nullptr)                        \
434 435
  V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
  V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
436 437 438 439 440 441 442 443 444 445
  V(int, pending_microtask_count, 0)                                          \
  V(CompilationStatistics*, turbo_statistics, nullptr)                        \
  V(CodeTracer*, code_tracer, nullptr)                                        \
  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
  V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
  V(const v8::StartupData*, snapshot_blob, nullptr)                           \
  V(int, code_and_metadata_size, 0)                                           \
  V(int, bytecode_and_metadata_size, 0)                                       \
  /* true if being profiled. Causes collection of extra compile info. */      \
  V(bool, is_profiling, false)                                                \
jgruber's avatar
jgruber committed
446 447
  /* true if a trace is being formatted through Error.prepareStackTrace. */   \
  V(bool, formatting_stack_trace, false)                                      \
448 449
  /* Perform side effect checks on function call and API callbacks. */        \
  V(bool, needs_side_effect_check, false)                                     \
450 451
  /* Current code coverage mode */                                            \
  V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort)  \
452
  V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone)   \
453
  V(int, last_stack_frame_info_id, 0)                                         \
454
  V(int, last_console_context_id, 0)                                          \
455
  ISOLATE_INIT_SIMULATOR_LIST(V)
456

457 458 459 460
#define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
  inline void set_##name(type v) { thread_local_top_.name##_ = v; }  \
  inline type name() const { return thread_local_top_.name##_; }

461 462 463
#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
  type* name##_address() { return &thread_local_top_.name##_; }

464

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
class Isolate {
  // These forward declarations are required to make the friend declarations in
  // PerIsolateThreadData work on some older versions of gcc.
  class ThreadDataTable;
  class EntryStackItem;
 public:
  ~Isolate();

  // A thread has a PerIsolateThreadData instance for each isolate that it has
  // entered. That instance is allocated when the isolate is initially entered
  // and reused on subsequent entries.
  class PerIsolateThreadData {
   public:
    PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
        : isolate_(isolate),
          thread_id_(thread_id),
          stack_limit_(0),
482
          thread_state_(nullptr),
483
#if USE_SIMULATOR
484
          simulator_(nullptr),
485
#endif
486 487 488
          next_(nullptr),
          prev_(nullptr) {
    }
489
    ~PerIsolateThreadData();
490 491
    Isolate* isolate() const { return isolate_; }
    ThreadId thread_id() const { return thread_id_; }
492 493 494

    FIELD_ACCESSOR(uintptr_t, stack_limit)
    FIELD_ACCESSOR(ThreadState*, thread_state)
495

496
#if USE_SIMULATOR
497
    FIELD_ACCESSOR(Simulator*, simulator)
498 499 500
#endif

    bool Matches(Isolate* isolate, ThreadId thread_id) const {
501
      return isolate_ == isolate && thread_id_.Equals(thread_id);
502 503 504 505 506 507 508 509
    }

   private:
    Isolate* isolate_;
    ThreadId thread_id_;
    uintptr_t stack_limit_;
    ThreadState* thread_state_;

510
#if USE_SIMULATOR
511 512 513 514 515 516 517 518 519 520 521 522 523
    Simulator* simulator_;
#endif

    PerIsolateThreadData* next_;
    PerIsolateThreadData* prev_;

    friend class Isolate;
    friend class ThreadDataTable;
    friend class EntryStackItem;

    DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
  };

524 525
  static void InitializeOncePerProcess();

526 527
  // Returns the PerIsolateThreadData for the current thread (or nullptr if one
  // is not currently set).
528 529
  static PerIsolateThreadData* CurrentPerIsolateThreadData() {
    return reinterpret_cast<PerIsolateThreadData*>(
530
        base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
531 532 533 534
  }

  // Returns the isolate inside which the current thread is running.
  INLINE(static Isolate* Current()) {
535
    DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
vitalyr@chromium.org's avatar
vitalyr@chromium.org committed
536
    Isolate* isolate = reinterpret_cast<Isolate*>(
537
        base::Thread::GetExistingThreadLocal(isolate_key_));
538
    DCHECK_NOT_NULL(isolate);
539 540 541
    return isolate;
  }

542 543 544 545 546 547
  // Usually called by Init(), but can be called early e.g. to allow
  // testing components that require logging but not the whole
  // isolate.
  //
  // Safe to call more than once.
  void InitializeLoggingAndCounters();
548
  bool InitializeCounters();  // Returns false if already initialized.
549

550
  bool Init(StartupDeserializer* des);
551 552

  // True if at least one thread Enter'ed this isolate.
553
  bool IsInUse() { return entry_stack_ != nullptr; }
554 555 556 557 558 559

  // Destroys the non-default isolates.
  // Sets default isolate into "has_been_disposed" state rather then destroying,
  // for legacy API reasons.
  void TearDown();

560 561
  void ReleaseManagedObjects();

562 563
  static void GlobalTearDown();

564 565
  void ClearSerializerData();

566 567 568 569
  // Find the PerThread for this particular (isolate, thread) combination
  // If one does not yet exist, return null.
  PerIsolateThreadData* FindPerThreadDataForThisThread();

570 571 572 573
  // Find the PerThread for given (isolate, thread) combination
  // If one does not yet exist, return null.
  PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);

574 575 576 577
  // Discard the PerThread for this particular (isolate, thread) combination
  // If one does not yet exist, no-op.
  void DiscardPerThreadDataForThisThread();

578 579 580
  // Returns the key used to store the pointer to the current isolate.
  // Used internally for V8 threads that do not execute JavaScript but still
  // are part of the domain of an isolate (like the context switcher).
581
  static base::Thread::LocalStorageKey isolate_key() {
582 583
    return isolate_key_;
  }
584 585

  // Returns the key used to store process-wide thread IDs.
586
  static base::Thread::LocalStorageKey thread_id_key() {
587 588
    return thread_id_key_;
  }
589

590
  static base::Thread::LocalStorageKey per_isolate_thread_data_key();
591 592

  // Mutex for serializing access to break control structures.
593
  base::RecursiveMutex* break_access() { return &break_access_; }
594

595
  Address get_address_from_id(IsolateAddressId id);
596 597 598

  // Access to top context (where the current function object was created).
  Context* context() { return thread_local_top_.context_; }
599
  inline void set_context(Context* context);
600 601
  Context** context_address() { return &thread_local_top_.context_; }

602
  THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
603 604

  // Access to current thread id.
605
  THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
606 607

  // Interface to pending exception.
608 609 610
  inline Object* pending_exception();
  inline void set_pending_exception(Object* exception_obj);
  inline void clear_pending_exception();
611

612
  // Interface to wasm caught exception.
613 614
  inline Object* get_wasm_caught_exception();
  inline void set_wasm_caught_exception(Object* exception);
615 616
  inline void clear_wasm_caught_exception();

617
  THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
618

619
  inline bool has_pending_exception();
620

621
  THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
622
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
623
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
624 625 626
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)

627 628
  THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)

629
  v8::TryCatch* try_catch_handler() {
630
    return thread_local_top_.try_catch_handler();
631 632 633 634
  }
  bool* external_caught_exception_address() {
    return &thread_local_top_.external_caught_exception_;
  }
635

636
  THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
637

638
  inline void clear_pending_message();
639 640 641 642
  Address pending_message_obj_address() {
    return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
  }

643 644 645
  inline Object* scheduled_exception();
  inline bool has_scheduled_exception();
  inline void clear_scheduled_exception();
646

647 648
  bool IsJavaScriptHandlerOnTop(Object* exception);
  bool IsExternalHandlerOnTop(Object* exception);
649

650
  inline bool is_catchable_by_javascript(Object* exception);
651
  bool is_catchable_by_wasm(Object* exception);
652 653 654 655 656 657

  // JS execution stack (see frames.h).
  static Address c_entry_fp(ThreadLocalTop* thread) {
    return thread->c_entry_fp_;
  }
  static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
658
  Address c_function() { return thread_local_top_.c_function_; }
659 660 661 662 663

  inline Address* c_entry_fp_address() {
    return &thread_local_top_.c_entry_fp_;
  }
  inline Address* handler_address() { return &thread_local_top_.handler_; }
664 665 666
  inline Address* c_function_address() {
    return &thread_local_top_.c_function_;
  }
667

668 669 670
  // Bottom JS entry.
  Address js_entry_sp() {
    return thread_local_top_.js_entry_sp_;
671 672 673 674 675 676
  }
  inline Address* js_entry_sp_address() {
    return &thread_local_top_.js_entry_sp_;
  }

  // Returns the global object of the current context. It could be
677
  // a builtin object, or a JS global object.
678
  inline Handle<JSGlobalObject> global_object();
679 680

  // Returns the global proxy object of the current context.
681
  inline Handle<JSObject> global_proxy();
682 683 684 685 686 687 688 689

  static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
  void FreeThreadResources() { thread_local_top_.Free(); }

  // This method is called by the api after operations that may throw
  // exceptions.  If an exception was thrown and not handled by an external
  // handler the exception is scheduled to be rethrown when we return to running
  // JavaScript code.  If an exception is scheduled true is returned.
690
  V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool is_bottom_call);
691

692
  // Push and pop a promise and the current try-catch handler.
693
  void PushPromise(Handle<JSObject> promise);
694
  void PopPromise();
695 696 697

  // Return the relevant Promise that a throw/rejection pertains to, based
  // on the contents of the Promise stack
698 699
  Handle<Object> GetPromiseOnStackOnThrow();

700 701 702
  // Heuristically guess whether a Promise is handled by user catch handler
  bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);

703 704
  class ExceptionScope {
   public:
705 706
    // Scope currently can only be used for regular exceptions,
    // not termination exception.
707 708
    inline explicit ExceptionScope(Isolate* isolate);
    inline ~ExceptionScope();
709 710 711 712 713 714

   private:
    Isolate* isolate_;
    Handle<Object> pending_exception_;
  };

715 716 717 718 719
  void SetCaptureStackTraceForUncaughtExceptions(
      bool capture,
      int frame_limit,
      StackTrace::StackTraceOptions options);

720 721 722
  void SetAbortOnUncaughtExceptionCallback(
      v8::Isolate::AbortOnUncaughtExceptionCallback callback);

723
  enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
724
  void PrintCurrentStackTrace(FILE* out);
725 726
  void PrintStack(StringStream* accumulator,
                  PrintStackMode mode = kPrintStackVerbose);
727 728
  V8_EXPORT_PRIVATE void PrintStack(FILE* out,
                                    PrintStackMode mode = kPrintStackVerbose);
729
  Handle<String> StackTraceString();
730 731 732
  // Stores a stack trace in a stack-allocated temporary buffer which will
  // end up in the minidump for debugging purposes.
  NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
733
                                      void* ptr2, unsigned int magic2));
734 735 736 737 738 739 740 741
  NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
                                      void* ptr2, void* ptr3, void* ptr4,
                                      void* ptr5, void* ptr6, void* ptr7,
                                      void* ptr8, unsigned int magic2));
  NO_INLINE(void PushCodeObjectsAndDie(unsigned int magic, void* ptr1,
                                       void* ptr2, void* ptr3, void* ptr4,
                                       void* ptr5, void* ptr6, void* ptr7,
                                       void* ptr8, unsigned int magic2));
742 743
  Handle<FixedArray> CaptureCurrentStackTrace(
      int frame_limit, StackTrace::StackTraceOptions options);
744
  Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
745
                                         FrameSkipMode mode,
746
                                         Handle<Object> caller);
747 748 749
  MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
      Handle<JSReceiver> error_object);
  MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
750 751
      Handle<JSReceiver> error_object, FrameSkipMode mode,
      Handle<Object> caller);
752
  Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
753

754 755
  Address GetAbstractPC(int* line, int* column);

756
  // Returns if the given context may access the given global object. If
757 758
  // the result is false, the pending exception is guaranteed to be
  // set.
759
  bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
760

761
  void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
762
  void ReportFailedAccessCheck(Handle<JSObject> receiver);
763 764

  // Exception throwing support. The caller should use the result
jwolfe's avatar
jwolfe committed
765
  // of Throw() as its return value.
766
  Object* Throw(Object* exception, MessageLocation* location = nullptr);
767
  Object* ThrowIllegalOperation();
768 769 770

  template <typename T>
  MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
771
                                       MessageLocation* location = nullptr) {
772 773 774 775
    Throw(*exception, location);
    return MaybeHandle<T>();
  }

776 777 778 779 780
  void set_console_delegate(debug::ConsoleDelegate* delegate) {
    console_delegate_ = delegate;
  }
  debug::ConsoleDelegate* console_delegate() { return console_delegate_; }

781 782
  // Re-throw an exception.  This involves no error reporting since error
  // reporting was handled when the exception was thrown originally.
783
  Object* ReThrow(Object* exception);
784 785 786

  // Find the correct handler for the current pending exception. This also
  // clears and returns the current pending exception.
787
  Object* UnwindAndFindHandler();
788

789
  // Tries to predict whether an exception will be caught. Note that this can
790
  // only produce an estimate, because it is undecidable whether a finally
791
  // clause will consume or re-throw an exception.
792 793 794 795
  enum CatchType {
    NOT_CAUGHT,
    CAUGHT_BY_JAVASCRIPT,
    CAUGHT_BY_EXTERNAL,
796 797 798
    CAUGHT_BY_DESUGARING,
    CAUGHT_BY_PROMISE,
    CAUGHT_BY_ASYNC_AWAIT
799
  };
800
  CatchType PredictExceptionCatcher();
801

802
  void ScheduleThrow(Object* exception);
803 804 805
  // Re-set pending message, script and positions reported to the TryCatch
  // back to the TLS for re-use when rethrowing.
  void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
806 807
  // Un-schedule an exception that was caught by a TryCatch handler.
  void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
808
  void ReportPendingMessages();
809 810
  // Return pending location if any or unfilled structure.
  MessageLocation GetMessageLocation();
811 812

  // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
813
  Object* PromoteScheduledException();
814 815

  // Attempts to compute the current source location, storing the
816 817 818
  // result in the target out parameter. The source location is attached to a
  // Message object as the location which should be shown to the user. It's
  // typically the top-most meaningful location on the stack.
819
  bool ComputeLocation(MessageLocation* target);
820 821
  bool ComputeLocationFromException(MessageLocation* target,
                                    Handle<Object> exception);
822
  bool ComputeLocationFromStackTrace(MessageLocation* target,
823 824 825 826
                                     Handle<Object> exception);

  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
                                        MessageLocation* location);
827 828

  // Out of resource exception helpers.
829 830
  Object* StackOverflow();
  Object* TerminateExecution();
831
  void CancelTerminateExecution();
832

833 834
  void RequestInterrupt(InterruptCallback callback, void* data);
  void InvokeApiInterruptCallbacks();
835

836
  // Administration
837 838 839
  void Iterate(RootVisitor* v);
  void Iterate(RootVisitor* v, ThreadLocalTop* t);
  char* Iterate(RootVisitor* v, char* t);
840 841
  void IterateThread(ThreadVisitor* v, char* t);

842
  // Returns the current native context.
843 844
  inline Handle<Context> native_context();
  inline Context* raw_native_context();
845

846 847 848
  // Returns the native context of the calling JavaScript code.  That
  // is, the native context of the top-most JavaScript frame.
  Handle<Context> GetCallingNativeContext();
849

850 851
  Handle<Context> GetIncumbentContext();

852 853 854 855 856 857 858 859 860 861 862 863
  void RegisterTryCatchHandler(v8::TryCatch* that);
  void UnregisterTryCatchHandler(v8::TryCatch* that);

  char* ArchiveThread(char* to);
  char* RestoreThread(char* from);

  static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
  static const int kBMMaxShift = 250;        // See StringSearchBase.

  // Accessors.
#define GLOBAL_ACCESSOR(type, name, initialvalue)                       \
  inline type name() const {                                            \
864
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
865 866 867
    return name##_;                                                     \
  }                                                                     \
  inline void set_##name(type value) {                                  \
868
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
869 870 871 872 873 874 875
    name##_ = value;                                                    \
  }
  ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR

#define GLOBAL_ARRAY_ACCESSOR(type, name, length)                       \
  inline type* name() {                                                 \
876
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
877 878 879 880 881
    return &(name##_)[0];                                               \
  }
  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR

882 883 884
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
  inline Handle<type> name();                            \
  inline bool is_##name(type* value);
885 886
  NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
887 888

  Bootstrapper* bootstrapper() { return bootstrapper_; }
889 890 891 892 893 894 895
  // Use for updating counters on a foreground thread.
  Counters* counters() { return async_counters().get(); }
  // Use for updating counters on a background thread.
  const std::shared_ptr<Counters>& async_counters() {
    // Make sure InitializeCounters() has been called.
    DCHECK_NOT_NULL(async_counters_.get());
    return async_counters_;
896
  }
897 898
  RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
  CompilationCache* compilation_cache() { return compilation_cache_; }
899 900 901
  Logger* logger() {
    // Call InitializeLoggingAndCounters() if logging is needed before
    // the isolate is fully initialized.
902
    DCHECK_NOT_NULL(logger_);
903 904
    return logger_;
  }
905 906
  StackGuard* stack_guard() { return &stack_guard_; }
  Heap* heap() { return &heap_; }
907 908
  StubCache* load_stub_cache() { return load_stub_cache_; }
  StubCache* store_stub_cache() { return store_stub_cache_; }
909
  DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
910 911 912 913
  bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
  void set_deoptimizer_lazy_throw(bool value) {
    deoptimizer_lazy_throw_ = value;
  }
914
  ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
jarin@chromium.org's avatar
jarin@chromium.org committed
915 916 917
  MaterializedObjectStore* materialized_object_store() {
    return materialized_object_store_;
  }
918

919 920 921 922
  ContextSlotCache* context_slot_cache() {
    return context_slot_cache_;
  }

923 924 925 926
  DescriptorLookupCache* descriptor_lookup_cache() {
    return descriptor_lookup_cache_;
  }

927 928
  HandleScopeData* handle_scope_data() { return &handle_scope_data_; }

929
  HandleScopeImplementer* handle_scope_implementer() {
930
    DCHECK(handle_scope_implementer_);
931 932 933
    return handle_scope_implementer_;
  }

934 935
  UnicodeCache* unicode_cache() {
    return unicode_cache_;
936 937
  }

938 939 940
  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
    return inner_pointer_to_code_cache_;
  }
941 942 943

  GlobalHandles* global_handles() { return global_handles_; }

944 945
  EternalHandles* eternal_handles() { return eternal_handles_; }

946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
  ThreadManager* thread_manager() { return thread_manager_; }

  unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
    return &jsregexp_uncanonicalize_;
  }

  unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
    return &jsregexp_canonrange_;
  }

  RuntimeState* runtime_state() { return &runtime_state_; }

  Builtins* builtins() { return &builtins_; }

  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
      regexp_macro_assembler_canonicalize() {
    return &regexp_macro_assembler_canonicalize_;
  }

  RegExpStack* regexp_stack() { return regexp_stack_; }

967 968 969 970 971
  size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
  void IncreaseTotalRegexpCodeGenerated(int size) {
    total_regexp_code_generated_ += size;
  }

972
  std::vector<int>* regexp_indices() { return &regexp_indices_; }
973

974 975
  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
      interp_canonicalize_mapping() {
976
    return &regexp_macro_assembler_canonicalize_;
977 978
  }

979
  Debug* debug() { return debug_; }
980

981
  bool* is_profiling_address() { return &is_profiling_; }
982 983 984
  CodeEventDispatcher* code_event_dispatcher() const {
    return code_event_dispatcher_.get();
  }
985
  HeapProfiler* heap_profiler() const { return heap_profiler_; }
986

987
#ifdef DEBUG
988 989 990 991
  static size_t non_disposed_isolates() {
    return non_disposed_isolates_.Value();
  }

992 993 994 995 996 997 998 999 1000
  HistogramInfo* heap_histograms() { return heap_histograms_; }

  JSObject::SpillInformation* js_spill_information() {
    return &js_spill_information_;
  }
#endif

  Factory* factory() { return reinterpret_cast<Factory*>(this); }

1001
  static const int kJSRegexpStaticOffsetsVectorSize = 128;
1002

1003
  THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
1004

1005
  THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
1006

1007
  void SetData(uint32_t slot, void* data) {
1008
    DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1009 1010 1011
    embedder_data_[slot] = data;
  }
  void* GetData(uint32_t slot) {
1012
    DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1013 1014
    return embedder_data_[slot];
  }
1015

1016
  bool serializer_enabled() const { return serializer_enabled_; }
1017 1018 1019
  void set_serializer_enabled_for_test(bool serializer_enabled) {
    serializer_enabled_ = serializer_enabled;
  }
1020
  bool snapshot_available() const {
1021
    return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1022
  }
1023

1024 1025 1026
  bool IsDead() { return has_fatal_error_; }
  void SignalFatalError() { has_fatal_error_ = true; }

Mythri's avatar
Mythri committed
1027
  bool use_optimizer();
1028

1029 1030
  bool initialized_from_snapshot() { return initialized_from_snapshot_; }

1031 1032
  bool NeedsSourcePositionsForProfiling() const;

1033 1034 1035 1036 1037 1038 1039 1040
  bool is_best_effort_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBestEffort;
  }

  bool is_precise_count_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kPreciseCount;
  }

1041 1042 1043 1044
  bool is_precise_binary_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kPreciseBinary;
  }

1045 1046 1047 1048
  bool is_block_count_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBlockCount;
  }

1049 1050 1051 1052 1053 1054 1055 1056
  bool is_block_binary_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBlockBinary;
  }

  bool is_block_code_coverage() const {
    return is_block_count_code_coverage() || is_block_binary_code_coverage();
  }

1057 1058 1059 1060
  bool is_collecting_type_profile() const {
    return type_profile_mode() == debug::TypeProfile::kCollect;
  }

1061 1062 1063 1064 1065 1066 1067 1068
  // Collect feedback vectors with data for code coverage or type profile.
  // Reset the list, when both code coverage and type profile are not
  // needed anymore. This keeps many feedback vectors alive, but code
  // coverage or type profile are used for debugging only and increase in
  // memory usage is expected.
  void SetFeedbackVectorsForProfilingTools(Object* value);

  void InitializeVectorListFromHeap();
1069

1070
  double time_millis_since_init() {
1071
    return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1072 1073
  }

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
  DateCache* date_cache() {
    return date_cache_;
  }

  void set_date_cache(DateCache* date_cache) {
    if (date_cache != date_cache_) {
      delete date_cache_;
    }
    date_cache_ = date_cache;
  }

1085 1086
  static const int kProtectorValid = 1;
  static const int kProtectorInvalid = 0;
1087

1088
  inline bool IsArrayConstructorIntact();
1089
  bool IsFastArrayConstructorPrototypeChainIntact();
1090
  inline bool IsArraySpeciesLookupChainIntact();
1091
  bool IsIsConcatSpreadableLookupChainIntact();
1092
  bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
1093
  inline bool IsStringLengthOverflowIntact();
1094
  inline bool IsArrayIteratorLookupChainIntact();
1095

1096 1097 1098
  // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
  inline bool IsFastArrayIterationIntact();

1099 1100 1101
  // Make sure we do check for neutered array buffers.
  inline bool IsArrayBufferNeuteringIntact();

1102 1103 1104 1105 1106
  // On intent to set an element in object, make sure that appropriate
  // notifications occur if the set is on the elements of the array or
  // object prototype. Also ensure that changes to prototype chain between
  // Array and Object fire notifications.
  void UpdateArrayProtectorOnSetElement(Handle<JSObject> object);
1107 1108 1109
  void UpdateArrayProtectorOnSetLength(Handle<JSObject> object) {
    UpdateArrayProtectorOnSetElement(object);
  }
1110 1111 1112 1113 1114 1115
  void UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object) {
    UpdateArrayProtectorOnSetElement(object);
  }
  void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
    UpdateArrayProtectorOnSetElement(object);
  }
1116
  void InvalidateArrayConstructorProtector();
1117
  void InvalidateArraySpeciesProtector();
1118
  void InvalidateIsConcatSpreadableProtector();
1119
  void InvalidateStringLengthOverflowProtector();
1120
  void InvalidateArrayIteratorProtector();
1121
  void InvalidateArrayBufferNeuteringProtector();
1122 1123 1124 1125

  // Returns true if array is the initial array prototype in any native context.
  bool IsAnyInitialArrayPrototype(Handle<JSArray> array);

1126 1127
  V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
      int index);
1128

1129 1130
  AccessCompilerData* access_compiler_data() { return access_compiler_data_; }

1131
  void IterateDeferredHandles(RootVisitor* visitor);
1132 1133 1134
  void LinkDeferredHandles(DeferredHandles* deferred_handles);
  void UnlinkDeferredHandles(DeferredHandles* deferred_handles);

1135 1136 1137 1138
#ifdef DEBUG
  bool IsDeferredHandle(Object** location);
#endif  // DEBUG

1139 1140
  bool concurrent_recompilation_enabled() {
    // Thread is only available with flag enabled.
1141
    DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1142
           FLAG_concurrent_recompilation);
1143
    return optimizing_compile_dispatcher_ != nullptr;
1144 1145
  }

1146 1147
  OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
    return optimizing_compile_dispatcher_;
1148 1149
  }

1150 1151
  int id() const { return static_cast<int>(id_); }

1152
  CompilationStatistics* GetTurboStatistics();
1153
  CodeTracer* GetCodeTracer();
1154

1155
  void DumpAndResetStats();
1156

1157 1158 1159 1160 1161
  FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
  void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
    function_entry_hook_ = function_entry_hook;
  }

1162 1163
  void* stress_deopt_count_address() { return &stress_deopt_count_; }

1164
  V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
1165

1166 1167 1168 1169
  // Generates a random number that is non-zero when masked
  // with the provided mask.
  int GenerateIdentityHash(uint32_t mask);

1170
  // Given an address occupied by a live code object, return that object.
1171
  Code* FindCodeObject(Address a);
1172

1173 1174 1175 1176 1177 1178 1179 1180
  int NextOptimizationId() {
    int id = next_optimization_id_++;
    if (!Smi::IsValid(next_optimization_id_)) {
      next_optimization_id_ = 0;
    }
    return id;
  }

1181 1182 1183 1184
  void AddCallCompletedCallback(CallCompletedCallback callback);
  void RemoveCallCompletedCallback(CallCompletedCallback callback);
  void FireCallCompletedCallback();

1185 1186
  void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
  void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1187
  inline void FireBeforeCallEnteredCallback();
1188

1189 1190
  void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
  void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1191
  inline void FireMicrotasksCompletedCallback();
1192

1193
  void SetPromiseRejectCallback(PromiseRejectCallback callback);
1194
  void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1195 1196
                           v8::PromiseRejectEvent event);

1197 1198 1199
  void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
                          MaybeHandle<Object>* result,
                          MaybeHandle<Object>* maybe_exception);
1200
  void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
1201 1202
                                 MaybeHandle<Object>* result,
                                 MaybeHandle<Object>* maybe_exception);
1203
  void EnqueueMicrotask(Handle<Object> microtask);
1204
  void RunMicrotasks();
1205
  bool IsRunningMicrotasks() const { return is_running_microtasks_; }
1206

1207 1208 1209
  Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
                           Handle<String> name, bool private_symbol);

1210 1211 1212
  void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
  void CountUsage(v8::Isolate::UseCounterFeature feature);

1213 1214 1215
  BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
  BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }

1216
  std::string GetTurboCfgFileName();
1217

1218
#if V8_SFI_HAS_UNIQUE_ID
1219 1220 1221
  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif

1222 1223
  Address promise_hook_or_debug_is_active_address() {
    return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
1224
  }
1225 1226 1227

  void DebugStateUpdated();

1228 1229 1230
  void SetPromiseHook(PromiseHook hook);
  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
                      Handle<Object> parent);
1231

1232 1233 1234
  void AddDetachedContext(Handle<Context> context);
  void CheckDetachedContextsAfterGC();

1235 1236 1237
  std::vector<Object*>* partial_snapshot_cache() {
    return &partial_snapshot_cache_;
  }
1238

1239 1240 1241 1242 1243 1244 1245
  void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
    array_buffer_allocator_ = allocator;
  }
  v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
    return array_buffer_allocator_;
  }

binji's avatar
binji committed
1246 1247
  FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }

1248 1249 1250
  CancelableTaskManager* cancelable_task_manager() {
    return cancelable_task_manager_;
  }
1251

1252 1253 1254 1255
  wasm::CompilationManager* wasm_compilation_manager() {
    return wasm_compilation_manager_.get();
  }

1256
  const AstStringConstants* ast_string_constants() const {
1257 1258 1259
    return ast_string_constants_;
  }

1260 1261
  interpreter::Interpreter* interpreter() const { return interpreter_; }

1262
  AccountingAllocator* allocator() { return allocator_; }
1263

1264 1265
  CompilerDispatcher* compiler_dispatcher() const {
    return compiler_dispatcher_;
1266 1267
  }

1268 1269
  bool IsInAnyContext(Object* object, uint32_t index);

1270 1271
  void SetHostImportModuleDynamicallyCallback(
      HostImportModuleDynamicallyCallback callback);
1272
  MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1273
      Handle<Script> referrer, Handle<Object> specifier);
1274

1275 1276 1277 1278 1279
  void SetHostInitializeImportMetaObjectCallback(
      HostInitializeImportMetaObjectCallback callback);
  Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
      Handle<Module> module);

hpayer's avatar
hpayer committed
1280 1281
  void SetRAILMode(RAILMode rail_mode);

1282 1283 1284 1285
  RAILMode rail_mode() { return rail_mode_.Value(); }

  double LoadStartTimeMs();

1286 1287 1288 1289 1290 1291
  void IsolateInForegroundNotification();

  void IsolateInBackgroundNotification();

  bool IsIsolateInBackground() { return is_isolate_in_background_; }

1292
  PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1293

1294 1295
#ifdef USE_SIMULATOR
  base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
1296 1297 1298
  base::Mutex* simulator_redirection_mutex() {
    return &simulator_redirection_mutex_;
  }
1299 1300
#endif

1301 1302 1303
  void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
  bool allow_atomics_wait() { return allow_atomics_wait_; }

1304 1305
  // List of native heap values allocated by the runtime as part of its
  // implementation that must be freed at isolate deinit.
1306
  class ManagedObjectFinalizer {
1307
   public:
1308
    using Deleter = void (*)(ManagedObjectFinalizer*);
1309

1310
    ManagedObjectFinalizer(void* value, Deleter deleter)
1311
        : value_(value), deleter_(deleter) {}
1312

1313 1314 1315 1316 1317 1318 1319 1320 1321
    void Dispose() { deleter_(this); }

    void* value() const { return value_; }

   private:
    friend class Isolate;

    ManagedObjectFinalizer() = default;

1322 1323 1324 1325 1326 1327
    void* value_ = nullptr;
    Deleter deleter_ = nullptr;
    ManagedObjectFinalizer* prev_ = nullptr;
    ManagedObjectFinalizer* next_ = nullptr;
  };

1328 1329 1330
  static_assert(offsetof(ManagedObjectFinalizer, value_) == 0,
                "value_ must be the first member");

1331 1332
  // Register a finalizer to be called at isolate teardown.
  void RegisterForReleaseAtTeardown(ManagedObjectFinalizer*);
1333 1334

  // Unregister a previously registered value from release at
1335
  // isolate teardown.
1336
  // This transfers the responsibility of the previously managed value's
1337 1338
  // deletion to the caller.
  void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer*);
1339

1340 1341 1342 1343 1344
  size_t elements_deletion_counter() { return elements_deletion_counter_; }
  void set_elements_deletion_counter(size_t value) {
    elements_deletion_counter_ = value;
  }

1345 1346 1347 1348 1349 1350 1351 1352
  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
    return top_backup_incumbent_scope_;
  }
  void set_top_backup_incumbent_scope(
      const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
    top_backup_incumbent_scope_ = top_backup_incumbent_scope;
  }

1353
 protected:
1354
  explicit Isolate(bool enable_serializer);
1355
  bool IsArrayOrObjectOrStringPrototype(Object* object);
1356

1357
 private:
1358 1359 1360
  friend struct GlobalState;
  friend struct InitializeGlobalState;

1361 1362 1363
  // These fields are accessed through the API, offsets must be kept in sync
  // with v8::internal::Internals (in include/v8.h) constants. This is also
  // verified in Isolate::Init() using runtime checks.
1364
  void* embedder_data_[Internals::kNumIsolateDataSlots];
1365 1366
  Heap heap_;

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
  // The per-process lock should be acquired before the ThreadDataTable is
  // modified.
  class ThreadDataTable {
   public:
    ThreadDataTable();
    ~ThreadDataTable();

    PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
    void Insert(PerIsolateThreadData* data);
    void Remove(PerIsolateThreadData* data);
1377
    void RemoveAllThreads(Isolate* isolate);
1378 1379 1380 1381 1382 1383 1384 1385 1386

   private:
    PerIsolateThreadData* list_;
  };

  // These items form a stack synchronously with threads Enter'ing and Exit'ing
  // the Isolate. The top of the stack points to a thread which is currently
  // running the Isolate. When the stack is empty, the Isolate is considered
  // not entered by any thread and can be Disposed.
thakis's avatar
thakis committed
1387
  // If the same thread enters the Isolate more than once, the entry_count_
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
  // is incremented rather then a new item pushed to the stack.
  class EntryStackItem {
   public:
    EntryStackItem(PerIsolateThreadData* previous_thread_data,
                   Isolate* previous_isolate,
                   EntryStackItem* previous_item)
        : entry_count(1),
          previous_thread_data(previous_thread_data),
          previous_isolate(previous_isolate),
          previous_item(previous_item) { }

    int entry_count;
    PerIsolateThreadData* previous_thread_data;
    Isolate* previous_isolate;
    EntryStackItem* previous_item;

1404
   private:
1405 1406 1407
    DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
  };

1408
  static base::LazyMutex thread_data_table_mutex_;
1409

1410 1411 1412
  static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
  static base::Thread::LocalStorageKey isolate_key_;
  static base::Thread::LocalStorageKey thread_id_key_;
1413 1414
  static ThreadDataTable* thread_data_table_;

1415
  // A global counter for all generated Isolates, might overflow.
1416
  static base::Atomic32 isolate_counter_;
1417

1418 1419 1420 1421
#if DEBUG
  static base::Atomic32 isolate_key_created_;
#endif

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
  void Deinit();

  static void SetIsolateThreadLocals(Isolate* isolate,
                                     PerIsolateThreadData* data);

  // Find the PerThread for this particular (isolate, thread) combination.
  // If one does not yet exist, allocate a new one.
  PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();

  // Initializes the current thread to run this Isolate.
  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
  // at the same time, this should be prevented using external locking.
  void Enter();

  // Exits the current thread. The previosuly entered Isolate is restored
  // for the thread.
  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
  // at the same time, this should be prevented using external locking.
  void Exit();

  void InitializeThreadLocal();

  void MarkCompactPrologue(bool is_compacting,
                           ThreadLocalTop* archived_thread_data);
  void MarkCompactEpilogue(bool is_compacting,
                           ThreadLocalTop* archived_thread_data);

  void FillCache();

1451 1452 1453 1454 1455
  // Propagate pending exception message to the v8::TryCatch.
  // If there is no external try-catch or message was successfully propagated,
  // then return true.
  bool PropagatePendingExceptionToExternalTryCatch();

1456 1457
  void RunMicrotasksInternal();

hpayer's avatar
hpayer committed
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
  const char* RAILModeName(RAILMode rail_mode) const {
    switch (rail_mode) {
      case PERFORMANCE_RESPONSE:
        return "RESPONSE";
      case PERFORMANCE_ANIMATION:
        return "ANIMATION";
      case PERFORMANCE_IDLE:
        return "IDLE";
      case PERFORMANCE_LOAD:
        return "LOAD";
    }
    return "";
  }

1472 1473 1474 1475
  // TODO(alph): Remove along with the deprecated GetCpuProfiler().
  friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler();
  CpuProfiler* cpu_profiler() const { return cpu_profiler_; }

1476
  base::Atomic32 id_;
1477
  EntryStackItem* entry_stack_;
1478 1479
  int stack_trace_nesting_level_;
  StringStream* incomplete_message_;
1480
  Address isolate_addresses_[kIsolateAddressCount + 1];  // NOLINT
1481 1482 1483
  Bootstrapper* bootstrapper_;
  RuntimeProfiler* runtime_profiler_;
  CompilationCache* compilation_cache_;
1484
  std::shared_ptr<Counters> async_counters_;
1485
  base::RecursiveMutex break_access_;
1486 1487
  Logger* logger_;
  StackGuard stack_guard_;
1488 1489
  StubCache* load_stub_cache_;
  StubCache* store_stub_cache_;
1490
  DeoptimizerData* deoptimizer_data_;
1491
  bool deoptimizer_lazy_throw_;
jarin@chromium.org's avatar
jarin@chromium.org committed
1492
  MaterializedObjectStore* materialized_object_store_;
1493 1494 1495 1496
  ThreadLocalTop thread_local_top_;
  bool capture_stack_trace_for_uncaught_exceptions_;
  int stack_trace_for_uncaught_exceptions_frame_limit_;
  StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1497
  ContextSlotCache* context_slot_cache_;
1498
  DescriptorLookupCache* descriptor_lookup_cache_;
1499
  HandleScopeData handle_scope_data_;
1500
  HandleScopeImplementer* handle_scope_implementer_;
1501
  UnicodeCache* unicode_cache_;
1502
  AccountingAllocator* allocator_;
1503
  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
1504
  GlobalHandles* global_handles_;
1505
  EternalHandles* eternal_handles_;
1506 1507 1508
  ThreadManager* thread_manager_;
  RuntimeState runtime_state_;
  Builtins builtins_;
1509
  SetupIsolateDelegate* setup_delegate_;
1510 1511 1512 1513 1514
  unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
  unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
  unibrow::Mapping<unibrow::Ecma262Canonicalize>
      regexp_macro_assembler_canonicalize_;
  RegExpStack* regexp_stack_;
1515
  std::vector<int> regexp_indices_;
1516
  DateCache* date_cache_;
1517
  CallInterfaceDescriptorData* call_descriptor_data_;
1518
  AccessCompilerData* access_compiler_data_;
1519
  base::RandomNumberGenerator* random_number_generator_;
1520
  base::AtomicValue<RAILMode> rail_mode_;
1521
  bool promise_hook_or_debug_is_active_;
1522
  PromiseHook promise_hook_;
1523
  HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
1524 1525
  HostInitializeImportMetaObjectCallback
      host_initialize_import_meta_object_callback_;
1526 1527
  base::Mutex rail_mutex_;
  double load_start_time_ms_;
1528

1529 1530 1531
  // Whether the isolate has been created for snapshotting.
  bool serializer_enabled_;

1532 1533 1534
  // True if fatal error has been signaled for this isolate.
  bool has_fatal_error_;

1535 1536 1537
  // True if this isolate was initialized from a snapshot.
  bool initialized_from_snapshot_;

1538 1539 1540
  // True if ES2015 tail call elimination feature is enabled.
  bool is_tail_call_elimination_enabled_;

1541 1542 1543 1544
  // True if the isolate is in background. This flag is used
  // to prioritize between memory usage and latency.
  bool is_isolate_in_background_;

1545 1546 1547
  // Time stamp at initialization.
  double time_millis_at_init_;

1548
#ifdef DEBUG
1549 1550
  static base::AtomicNumber<size_t> non_disposed_isolates_;

1551 1552 1553 1554 1555 1556
  // A static array of histogram info for each type.
  HistogramInfo heap_histograms_[LAST_TYPE + 1];
  JSObject::SpillInformation js_spill_information_;
#endif

  Debug* debug_;
1557
  CpuProfiler* cpu_profiler_;
1558
  HeapProfiler* heap_profiler_;
1559
  std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1560
  FunctionEntryHook function_entry_hook_;
1561

1562
  const AstStringConstants* ast_string_constants_;
1563

1564 1565
  interpreter::Interpreter* interpreter_;

1566
  CompilerDispatcher* compiler_dispatcher_;
1567

1568 1569 1570
  typedef std::pair<InterruptCallback, void*> InterruptEntry;
  std::queue<InterruptEntry> api_interrupts_queue_;

1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
#define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
  type name##_;
  ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
#undef GLOBAL_BACKING_STORE

#define GLOBAL_ARRAY_BACKING_STORE(type, name, length)                         \
  type name##_[length];
  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
#undef GLOBAL_ARRAY_BACKING_STORE

#ifdef DEBUG
  // This class is huge and has a number of fields controlled by
  // preprocessor defines. Make sure the offsets of these fields agree
  // between compilation units.
#define ISOLATE_FIELD_OFFSET(type, name, ignored)                              \
  static const intptr_t name##_debug_offset_;
  ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
  ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif

1592
  DeferredHandles* deferred_handles_head_;
1593
  OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
1594

1595 1596 1597
  // Counts deopt points if deopt_every_n_times is enabled.
  unsigned int stress_deopt_count_;

1598 1599
  int next_optimization_id_;

1600
#if V8_SFI_HAS_UNIQUE_ID
1601 1602 1603
  int next_unique_sfi_id_;
#endif

1604 1605
  // Vector of callbacks before a Call starts execution.
  std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1606

1607 1608
  // Vector of callbacks when a Call completes.
  std::vector<CallCompletedCallback> call_completed_callbacks_;
1609

1610 1611
  // Vector of callbacks after microtasks were run.
  std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
1612
  bool is_running_microtasks_;
1613

1614
  v8::Isolate::UseCounterCallback use_counter_callback_;
1615
  BasicBlockProfiler* basic_block_profiler_;
1616

1617
  std::vector<Object*> partial_snapshot_cache_;
1618

1619 1620
  v8::ArrayBuffer::Allocator* array_buffer_allocator_;

binji's avatar
binji committed
1621 1622
  FutexWaitListNode futex_wait_list_node_;

1623
  CancelableTaskManager* cancelable_task_manager_;
1624

1625 1626
  std::unique_ptr<wasm::CompilationManager> wasm_compilation_manager_;

1627 1628
  debug::ConsoleDelegate* console_delegate_ = nullptr;

1629 1630 1631
  v8::Isolate::AbortOnUncaughtExceptionCallback
      abort_on_uncaught_exception_callback_;

1632 1633
#ifdef USE_SIMULATOR
  base::Mutex simulator_i_cache_mutex_;
1634
  base::Mutex simulator_redirection_mutex_;
1635 1636
#endif

1637 1638
  bool allow_atomics_wait_;

1639 1640
  ManagedObjectFinalizer managed_object_finalizers_list_;

1641 1642
  size_t total_regexp_code_generated_;

1643 1644
  size_t elements_deletion_counter_ = 0;

1645 1646 1647 1648
  // The top entry of the v8::Context::BackupIncumbentScope stack.
  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
      nullptr;

1649
  friend class ExecutionAccess;
1650
  friend class HandleScopeImplementer;
1651
  friend class heap::HeapTester;
1652
  friend class OptimizingCompileDispatcher;
1653
  friend class SweeperThread;
1654 1655 1656
  friend class ThreadManager;
  friend class Simulator;
  friend class StackGuard;
1657
  friend class TestIsolate;
1658
  friend class ThreadId;
1659 1660
  friend class v8::Isolate;
  friend class v8::Locker;
1661
  friend class v8::Unlocker;
1662
  friend class v8::SnapshotCreator;
1663
  friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
1664 1665
  friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
                                                        const char*);
1666 1667 1668 1669 1670

  DISALLOW_COPY_AND_ASSIGN(Isolate);
};


1671 1672 1673 1674
#undef FIELD_ACCESSOR
#undef THREAD_LOCAL_TOP_ACCESSOR


1675 1676
class PromiseOnStack {
 public:
1677 1678
  PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
      : promise_(promise), prev_(prev) {}
1679 1680 1681 1682 1683 1684 1685 1686 1687
  Handle<JSObject> promise() { return promise_; }
  PromiseOnStack* prev() { return prev_; }

 private:
  Handle<JSObject> promise_;
  PromiseOnStack* prev_;
};


1688 1689 1690 1691 1692
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
 public:
1693 1694
  explicit SaveContext(Isolate* isolate);
  ~SaveContext();
1695 1696 1697 1698 1699

  Handle<Context> context() { return context_; }
  SaveContext* prev() { return prev_; }

  // Returns true if this save context is below a given JavaScript frame.
1700
  bool IsBelowFrame(StandardFrame* frame);
1701 1702

 private:
1703
  Isolate* const isolate_;
1704
  Handle<Context> context_;
1705
  SaveContext* const prev_;
1706
  Address c_entry_fp_;
1707 1708 1709 1710 1711 1712
};


class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
 public:
1713
  explicit AssertNoContextChange(Isolate* isolate);
1714
  ~AssertNoContextChange() {
1715
    DCHECK(isolate_->context() == *context_);
1716 1717 1718
  }

 private:
1719
  Isolate* isolate_;
1720 1721 1722
  Handle<Context> context_;
#else
 public:
1723
  explicit AssertNoContextChange(Isolate* isolate) { }
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
#endif
};


class ExecutionAccess BASE_EMBEDDED {
 public:
  explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
    Lock(isolate);
  }
  ~ExecutionAccess() { Unlock(isolate_); }

1735 1736
  static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
  static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1737 1738

  static bool TryLock(Isolate* isolate) {
1739
    return isolate->break_access()->TryLock();
1740 1741 1742 1743 1744 1745 1746
  }

 private:
  Isolate* isolate_;
};


1747
// Support for checking for stack-overflows.
1748 1749 1750 1751
class StackLimitCheck BASE_EMBEDDED {
 public:
  explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }

1752
  // Use this to check for stack-overflows in C++ code.
1753
  bool HasOverflowed() const {
1754
    StackGuard* stack_guard = isolate_->stack_guard();
1755
    return GetCurrentStackPosition() < stack_guard->real_climit();
1756
  }
1757

1758 1759 1760 1761 1762 1763
  // Use this to check for interrupt request in C++ code.
  bool InterruptRequested() {
    StackGuard* stack_guard = isolate_->stack_guard();
    return GetCurrentStackPosition() < stack_guard->climit();
  }

1764
  // Use this to check for stack-overflow when entering runtime from JS code.
1765
  bool JsHasOverflowed(uintptr_t gap = 0) const;
1766

1767 1768 1769 1770
 private:
  Isolate* isolate_;
};

1771 1772 1773 1774 1775 1776 1777
#define STACK_CHECK(isolate, result_value) \
  do {                                     \
    StackLimitCheck stack_check(isolate);  \
    if (stack_check.HasOverflowed()) {     \
      isolate->StackOverflow();            \
      return result_value;                 \
    }                                      \
1778
  } while (false)
1779 1780 1781 1782 1783 1784 1785

// Support for temporarily postponing interrupts. When the outermost
// postpone scope is left the interrupts will be re-enabled and any
// interrupts that occurred while in the scope will be taken into
// account.
class PostponeInterruptsScope BASE_EMBEDDED {
 public:
1786 1787 1788 1789 1790 1791
  PostponeInterruptsScope(Isolate* isolate,
                          int intercept_mask = StackGuard::ALL_INTERRUPTS)
      : stack_guard_(isolate->stack_guard()),
        intercept_mask_(intercept_mask),
        intercepted_flags_(0) {
    stack_guard_->PushPostponeInterruptsScope(this);
1792 1793 1794
  }

  ~PostponeInterruptsScope() {
1795
    stack_guard_->PopPostponeInterruptsScope();
1796
  }
1797 1798 1799 1800 1801

  // Find the bottom-most scope that intercepts this interrupt.
  // Return whether the interrupt has been intercepted.
  bool Intercept(StackGuard::InterruptFlag flag);

1802 1803
 private:
  StackGuard* stack_guard_;
1804 1805 1806 1807 1808
  int intercept_mask_;
  int intercepted_flags_;
  PostponeInterruptsScope* prev_;

  friend class StackGuard;
1809 1810 1811
};


1812
class CodeTracer final : public Malloced {
1813
 public:
1814
  explicit CodeTracer(int isolate_id) : file_(nullptr), scope_depth_(0) {
1815 1816 1817 1818 1819
    if (!ShouldRedirect()) {
      file_ = stdout;
      return;
    }

1820
    if (FLAG_redirect_code_traces_to == nullptr) {
1821 1822
      SNPrintF(filename_,
               "code-%d-%d.asm",
1823
               base::OS::GetCurrentProcessId(),
1824
               isolate_id);
1825
    } else {
1826
      StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
    }

    WriteChars(filename_.start(), "", 0, false);
  }

  class Scope {
   public:
    explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
    ~Scope() { tracer_->CloseFile();  }

    FILE* file() const { return tracer_->file(); }

   private:
    CodeTracer* tracer_;
  };

  void OpenFile() {
    if (!ShouldRedirect()) {
      return;
    }

1848
    if (file_ == nullptr) {
1849
      file_ = base::OS::FOpen(filename_.start(), "ab");
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
    }

    scope_depth_++;
  }

  void CloseFile() {
    if (!ShouldRedirect()) {
      return;
    }

    if (--scope_depth_ == 0) {
      fclose(file_);
1862
      file_ = nullptr;
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
    }
  }

  FILE* file() const { return file_; }

 private:
  static bool ShouldRedirect() {
    return FLAG_redirect_code_traces;
  }

  EmbeddedVector<char, 128> filename_;
  FILE* file_;
  int scope_depth_;
};
1877

1878 1879
}  // namespace internal
}  // namespace v8
1880 1881

#endif  // V8_ISOLATE_H_