isolate.h 67.9 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6 7

#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_

8
#include <cstddef>
9
#include <memory>
10
#include <queue>
11
#include <unordered_map>
12
#include <vector>
13

14
#include "include/v8-inspector.h"
15
#include "include/v8.h"
16
#include "src/allocation.h"
17
#include "src/base/atomicops.h"
18
#include "src/base/macros.h"
19
#include "src/builtins/builtins.h"
20
#include "src/contexts.h"
21
#include "src/date.h"
22
#include "src/debug/debug-interface.h"
23
#include "src/execution.h"
binji's avatar
binji committed
24
#include "src/futex-emulation.h"
25
#include "src/globals.h"
26
#include "src/handles.h"
27
#include "src/heap/factory.h"
28
#include "src/heap/heap.h"
29
#include "src/messages.h"
30
#include "src/objects/code.h"
31
#include "src/objects/debug-objects.h"
32
#include "src/runtime/runtime.h"
33
#include "src/unicode.h"
34 35

namespace v8 {
36 37 38 39 40

namespace base {
class RandomNumberGenerator;
}

41 42 43 44
namespace debug {
class ConsoleDelegate;
}

45 46
namespace internal {

47 48 49 50
namespace heap {
class HeapTester;
}  // namespace heap

51
class AccessCompilerData;
52
class AddressToIndexHashMap;
53
class AstStringConstants;
54
class BasicBlockProfiler;
55
class Bootstrapper;
56
class BuiltinsConstantsTableBuilder;
57
class CallInterfaceDescriptorData;
58
class CancelableTaskManager;
59
class CodeEventDispatcher;
60 61
class CodeGenerator;
class CodeRange;
62
class CodeStubDescriptor;
63
class CodeTracer;
64
class CompilationCache;
65
class CompilationStatistics;
66
class CompilerDispatcher;
67 68 69 70
class ContextSlotCache;
class Counters;
class CpuFeatures;
class CpuProfiler;
71
class Debug;
72
class DeoptimizerData;
73
class DescriptorLookupCache;
74
class EmptyStatement;
75
class EternalHandles;
76
class ExternalCallbackScope;
77
class HandleScopeImplementer;
78
class HeapObjectToIndexHashMap;
79 80
class HeapProfiler;
class InlineRuntimeFunctionsTable;
81
class InnerPointerToCodeCache;
82
class InstructionStream;
83
class Logger;
jarin@chromium.org's avatar
jarin@chromium.org committed
84
class MaterializedObjectStore;
85
class Microtask;
86
class OptimizingCompileDispatcher;
87 88
class PromiseOnStack;
class Redirection;
89
class RegExpStack;
90
class RootVisitor;
91
class RuntimeProfiler;
92
class SaveContext;
93
class SetupIsolateDelegate;
94 95
class Simulator;
class StartupDeserializer;
96
class StandardFrame;
97
class StatsTable;
98 99
class StringTracker;
class StubCache;
100
class SweeperThread;
101 102 103
class ThreadManager;
class ThreadState;
class ThreadVisitor;  // Defined in v8threads.h
104
class TracingCpuProfilerImpl;
jarin@chromium.org's avatar
jarin@chromium.org committed
105
class UnicodeCache;
106
struct ManagedPtrDestructor;
107

108
template <StateTag Tag> class VMState;
109

110 111 112
namespace interpreter {
class Interpreter;
}
113

114
namespace wasm {
115
class WasmEngine;
116 117
}

118 119 120 121 122 123 124
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
  do {                                                 \
    Isolate* __isolate__ = (isolate);                  \
    DCHECK(!__isolate__->has_pending_exception());     \
    if (__isolate__->has_scheduled_exception()) {      \
      return __isolate__->PromoteScheduledException(); \
    }                                                  \
125
  } while (false)
126

127 128
// Macros for MaybeHandle.

129 130 131
#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
  do {                                                      \
    Isolate* __isolate__ = (isolate);                       \
132
    DCHECK(!__isolate__->has_pending_exception());          \
133 134 135 136
    if (__isolate__->has_scheduled_exception()) {           \
      __isolate__->PromoteScheduledException();             \
      return value;                                         \
    }                                                       \
137 138
  } while (false)

139 140 141
#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
#define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
  do {                                                                        \
    Isolate* __isolate__ = (isolate);                                         \
    if (!(call).ToLocal(&dst)) {                                              \
      DCHECK(__isolate__->has_scheduled_exception());                         \
      __isolate__->PromoteScheduledException();                               \
      return value;                                                           \
    }                                                                         \
  } while (false)

#define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
  do {                                                            \
    Isolate* __isolate__ = (isolate);                             \
    if ((call).IsNothing()) {                                     \
      DCHECK(__isolate__->has_scheduled_exception());             \
      __isolate__->PromoteScheduledException();                   \
      return value;                                               \
    }                                                             \
  } while (false)

162 163 164 165 166 167 168 169
#define RETURN_RESULT_OR_FAILURE(isolate, call)     \
  do {                                              \
    Handle<Object> __result__;                      \
    Isolate* __isolate__ = (isolate);               \
    if (!(call).ToHandle(&__result__)) {            \
      DCHECK(__isolate__->has_pending_exception()); \
      return __isolate__->heap()->exception();      \
    }                                               \
170
    DCHECK(!__isolate__->has_pending_exception());  \
171 172 173
    return *__result__;                             \
  } while (false)

174 175 176
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
  do {                                                               \
    if (!(call).ToHandle(&dst)) {                                    \
177
      DCHECK((isolate)->has_pending_exception());                    \
178 179 180 181
      return value;                                                  \
    }                                                                \
  } while (false)

182 183 184 185 186 187
#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)          \
  do {                                                                  \
    Isolate* __isolate__ = (isolate);                                   \
    ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,            \
                                     __isolate__->heap()->exception()); \
  } while (false)
188 189 190 191

#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())

192 193 194 195
#define THROW_NEW_ERROR(isolate, call, T)                       \
  do {                                                          \
    Isolate* __isolate__ = (isolate);                           \
    return __isolate__->Throw<T>(__isolate__->factory()->call); \
196 197
  } while (false)

198 199 200 201
#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
  do {                                                        \
    Isolate* __isolate__ = (isolate);                         \
    return __isolate__->Throw(*__isolate__->factory()->call); \
202 203
  } while (false)

204
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
205
  do {                                                             \
206
    if ((call).is_null()) {                                        \
207
      DCHECK((isolate)->has_pending_exception());                  \
208 209 210 211
      return value;                                                \
    }                                                              \
  } while (false)

212 213 214 215 216 217
#define RETURN_FAILURE_ON_EXCEPTION(isolate, call)               \
  do {                                                           \
    Isolate* __isolate__ = (isolate);                            \
    RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                 \
                              __isolate__->heap()->exception()); \
  } while (false);
218 219

#define RETURN_ON_EXCEPTION(isolate, call, T)  \
220
  RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
221 222


223 224 225 226 227 228 229 230 231 232 233 234 235 236
#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
                              limit_check, increment, body)                \
  do {                                                                     \
    loop_var_type init;                                                    \
    loop_var_type for_with_handle_limit = loop_var;                        \
    Isolate* for_with_handle_isolate = isolate;                            \
    while (limit_check) {                                                  \
      for_with_handle_limit += 1024;                                       \
      HandleScope loop_scope(for_with_handle_isolate);                     \
      for (; limit_check && loop_var < for_with_handle_limit; increment) { \
        body                                                               \
      }                                                                    \
    }                                                                      \
  } while (false)
237

238 239 240 241
// Platform-independent, reliable thread identifier.
class ThreadId {
 public:
  // Creates an invalid ThreadId.
242
  ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
243 244

  ThreadId& operator=(const ThreadId& other) {
245
    base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
246 247
    return *this;
  }
248

249 250
  bool operator==(const ThreadId& other) const { return Equals(other); }

251 252 253 254 255 256 257 258
  // Returns ThreadId for current thread.
  static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }

  // Returns invalid ThreadId (guaranteed not to be equal to any thread).
  static ThreadId Invalid() { return ThreadId(kInvalidId); }

  // Compares ThreadIds for equality.
  INLINE(bool Equals(const ThreadId& other) const) {
259
    return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
260 261 262 263
  }

  // Checks whether this ThreadId refers to any thread.
  INLINE(bool IsValid() const) {
264
    return base::Relaxed_Load(&id_) != kInvalidId;
265 266 267 268
  }

  // Converts ThreadId to an integer representation
  // (required for public API: V8::V8::GetCurrentThreadId).
269
  int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
270 271 272 273 274 275 276 277

  // Converts ThreadId to an integer representation
  // (required for public API: V8::V8::TerminateExecution).
  static ThreadId FromInteger(int id) { return ThreadId(id); }

 private:
  static const int kInvalidId = -1;

278
  explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
279 280 281

  static int AllocateThreadId();

282
  V8_EXPORT_PRIVATE static int GetCurrentThreadId();
283

284
  base::Atomic32 id_;
285

286
  static base::Atomic32 highest_thread_id_;
287 288 289 290

  friend class Isolate;
};

291 292 293 294 295
#define FIELD_ACCESSOR(type, name)                 \
  inline void set_##name(type v) { name##_ = v; }  \
  inline type name() const { return name##_; }


296 297
class ThreadLocalTop BASE_EMBEDDED {
 public:
298 299 300 301
  // Does early low-level initialization that does not depend on the
  // isolate being present.
  ThreadLocalTop();

302 303 304
  // Initialize the thread data.
  void Initialize();

305
  // Get the top C++ try catch handler or nullptr if none are registered.
306
  //
307
  // This method is not guaranteed to return an address that can be
308 309
  // used for comparison with addresses into the JS stack.  If such an
  // address is needed, use try_catch_handler_address.
310
  FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
311

312
  // Get the address of the top C++ try catch handler or nullptr if
313 314 315 316 317 318 319 320 321
  // none are registered.
  //
  // This method always returns an address that can be compared to
  // pointers into the JavaScript stack.  When running on actual
  // hardware, try_catch_handler_address and TryCatchHandler return
  // the same pointer.  When running on a simulator with a separate JS
  // stack, try_catch_handler_address returns a JS stack address that
  // corresponds to the place on the JS stack where the C++ handler
  // would have been if the stack were not separate.
322 323 324 325
  Address try_catch_handler_address() {
    return reinterpret_cast<Address>(
        v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
  }
326

327
  void Free();
328

329
  Isolate* isolate_;
330 331 332
  // The context where the current execution method is created and for variable
  // lookups.
  Context* context_;
333
  ThreadId thread_id_;
334
  Object* pending_exception_;
335 336
  // TODO(kschimpf): Change this to a stack of caught exceptions (rather than
  // just innermost catching try block).
337
  Object* wasm_caught_exception_ = nullptr;
338 339 340

  // Communication channel between Isolate::FindHandler and the CEntryStub.
  Context* pending_handler_context_;
341
  Address pending_handler_entrypoint_;
342
  Address pending_handler_constant_pool_;
343 344 345 346
  Address pending_handler_fp_;
  Address pending_handler_sp_;

  // Communication channel between Isolate::Throw and message consumers.
347
  bool rethrowing_message_;
348
  Object* pending_message_obj_;
349

350 351 352
  // Use a separate value for scheduled exceptions to preserve the
  // invariants that hold about pending_exception.  We may want to
  // unify them later.
353
  Object* scheduled_exception_;
354 355 356 357 358
  bool external_caught_exception_;
  SaveContext* save_context_;

  // Stack.
  Address c_entry_fp_;  // the frame pointer of the top c entry frame
359
  Address handler_;     // try-blocks are chained through the stack
360
  Address c_function_;  // C function that was called at c entry.
361

362 363 364 365 366
  // Throwing an exception may cause a Promise rejection.  For this purpose
  // we keep track of a stack of nested promises and the corresponding
  // try-catch handlers.
  PromiseOnStack* promise_on_stack_;

367 368 369 370
#ifdef USE_SIMULATOR
  Simulator* simulator_;
#endif

371
  Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
372 373
  // the external callback we're currently in
  ExternalCallbackScope* external_callback_scope_;
374 375 376 377 378 379
  StateTag current_vm_state_;

  // Call back function to report unsafe JS accesses.
  v8::FailedAccessCheckCallback failed_access_check_callback_;

 private:
380 381
  void InitializeInternal();

382
  v8::TryCatch* try_catch_handler_;
383 384 385 386 387
};


#ifdef DEBUG

388 389 390 391
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
  V(CommentStatistic, paged_space_comments_statistics, \
    CommentStatistic::kMaxComments + 1)                \
  V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
392 393 394 395 396 397 398 399
#else

#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)

#endif

#define ISOLATE_INIT_ARRAY_LIST(V)                                             \
  /* SerializerDeserializer state. */                                          \
400
  V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
401 402 403 404 405
  V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
  V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
  V(int, suffix_table, (kBMMaxShift + 1))                                      \
  ISOLATE_INIT_DEBUG_ARRAY_LIST(V)

406
typedef std::vector<HeapObject*> DebugObjectCache;
407

408 409 410
#define ISOLATE_INIT_LIST(V)                                                  \
  /* Assembler state. */                                                      \
  V(FatalErrorCallback, exception_behavior, nullptr)                          \
411
  V(OOMErrorCallback, oom_behavior, nullptr)                                  \
412 413
  V(LogEventCallback, event_logger, nullptr)                                  \
  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
414
  V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr)   \
415 416
  V(ExtensionCallback, wasm_module_callback, &NoExtension)                    \
  V(ExtensionCallback, wasm_instance_callback, &NoExtension)                  \
417
  V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr)      \
418 419 420 421
  /* State for Relocatable. */                                                \
  V(Relocatable*, relocatable_top, nullptr)                                   \
  V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
  V(Object*, string_stream_current_security_token, nullptr)                   \
422
  V(const intptr_t*, api_external_references, nullptr)                        \
423 424
  V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
  V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
425 426 427 428 429 430 431 432 433 434
  V(int, pending_microtask_count, 0)                                          \
  V(CompilationStatistics*, turbo_statistics, nullptr)                        \
  V(CodeTracer*, code_tracer, nullptr)                                        \
  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
  V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
  V(const v8::StartupData*, snapshot_blob, nullptr)                           \
  V(int, code_and_metadata_size, 0)                                           \
  V(int, bytecode_and_metadata_size, 0)                                       \
  /* true if being profiled. Causes collection of extra compile info. */      \
  V(bool, is_profiling, false)                                                \
jgruber's avatar
jgruber committed
435 436
  /* true if a trace is being formatted through Error.prepareStackTrace. */   \
  V(bool, formatting_stack_trace, false)                                      \
437
  /* Perform side effect checks on function call and API callbacks. */        \
438
  V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints)  \
439 440
  /* Current code coverage mode */                                            \
  V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort)  \
441
  V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone)   \
442
  V(int, last_stack_frame_info_id, 0)                                         \
443
  V(int, last_console_context_id, 0)                                          \
444 445 446
  V(v8_inspector::V8Inspector*, inspector, nullptr)                           \
  V(bool, next_v8_call_is_safe_for_termination, false)                        \
  V(bool, only_terminate_in_safe_scope, false)
447

448 449 450 451
#define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
  inline void set_##name(type v) { thread_local_top_.name##_ = v; }  \
  inline type name() const { return thread_local_top_.name##_; }

452 453 454
#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
  type* name##_address() { return &thread_local_top_.name##_; }

455 456 457
// HiddenFactory exists so Isolate can privately inherit from it without making
// Factory's members available to Isolate directly.
class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
458

459
class Isolate : private HiddenFactory {
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
  // These forward declarations are required to make the friend declarations in
  // PerIsolateThreadData work on some older versions of gcc.
  class ThreadDataTable;
  class EntryStackItem;
 public:
  ~Isolate();

  // A thread has a PerIsolateThreadData instance for each isolate that it has
  // entered. That instance is allocated when the isolate is initially entered
  // and reused on subsequent entries.
  class PerIsolateThreadData {
   public:
    PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
        : isolate_(isolate),
          thread_id_(thread_id),
          stack_limit_(0),
476
          thread_state_(nullptr),
477
#if USE_SIMULATOR
478
          simulator_(nullptr),
479
#endif
480 481 482
          next_(nullptr),
          prev_(nullptr) {
    }
483
    ~PerIsolateThreadData();
484 485
    Isolate* isolate() const { return isolate_; }
    ThreadId thread_id() const { return thread_id_; }
486 487 488

    FIELD_ACCESSOR(uintptr_t, stack_limit)
    FIELD_ACCESSOR(ThreadState*, thread_state)
489

490
#if USE_SIMULATOR
491
    FIELD_ACCESSOR(Simulator*, simulator)
492 493 494
#endif

    bool Matches(Isolate* isolate, ThreadId thread_id) const {
495
      return isolate_ == isolate && thread_id_.Equals(thread_id);
496 497 498 499 500 501 502 503
    }

   private:
    Isolate* isolate_;
    ThreadId thread_id_;
    uintptr_t stack_limit_;
    ThreadState* thread_state_;

504
#if USE_SIMULATOR
505 506 507 508 509 510 511 512 513 514 515 516 517
    Simulator* simulator_;
#endif

    PerIsolateThreadData* next_;
    PerIsolateThreadData* prev_;

    friend class Isolate;
    friend class ThreadDataTable;
    friend class EntryStackItem;

    DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
  };

518 519
  static void InitializeOncePerProcess();

520 521
  // Returns the PerIsolateThreadData for the current thread (or nullptr if one
  // is not currently set).
522 523
  static PerIsolateThreadData* CurrentPerIsolateThreadData() {
    return reinterpret_cast<PerIsolateThreadData*>(
524
        base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
525 526 527 528
  }

  // Returns the isolate inside which the current thread is running.
  INLINE(static Isolate* Current()) {
529
    DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
vitalyr@chromium.org's avatar
vitalyr@chromium.org committed
530
    Isolate* isolate = reinterpret_cast<Isolate*>(
531
        base::Thread::GetExistingThreadLocal(isolate_key_));
532
    DCHECK_NOT_NULL(isolate);
533 534 535
    return isolate;
  }

536 537 538 539 540 541
  // Usually called by Init(), but can be called early e.g. to allow
  // testing components that require logging but not the whole
  // isolate.
  //
  // Safe to call more than once.
  void InitializeLoggingAndCounters();
542
  bool InitializeCounters();  // Returns false if already initialized.
543

544
  bool Init(StartupDeserializer* des);
545 546

  // True if at least one thread Enter'ed this isolate.
547
  bool IsInUse() { return entry_stack_ != nullptr; }
548 549 550 551 552 553

  // Destroys the non-default isolates.
  // Sets default isolate into "has_been_disposed" state rather then destroying,
  // for legacy API reasons.
  void TearDown();

554
  void ReleaseSharedPtrs();
555

556 557
  void ClearSerializerData();

558 559 560 561
  // Find the PerThread for this particular (isolate, thread) combination
  // If one does not yet exist, return null.
  PerIsolateThreadData* FindPerThreadDataForThisThread();

562 563 564 565
  // Find the PerThread for given (isolate, thread) combination
  // If one does not yet exist, return null.
  PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);

566 567 568 569
  // Discard the PerThread for this particular (isolate, thread) combination
  // If one does not yet exist, no-op.
  void DiscardPerThreadDataForThisThread();

570 571 572
  // Returns the key used to store the pointer to the current isolate.
  // Used internally for V8 threads that do not execute JavaScript but still
  // are part of the domain of an isolate (like the context switcher).
573
  static base::Thread::LocalStorageKey isolate_key() {
574 575
    return isolate_key_;
  }
576 577

  // Returns the key used to store process-wide thread IDs.
578
  static base::Thread::LocalStorageKey thread_id_key() {
579 580
    return thread_id_key_;
  }
581

582
  static base::Thread::LocalStorageKey per_isolate_thread_data_key();
583 584

  // Mutex for serializing access to break control structures.
585
  base::RecursiveMutex* break_access() { return &break_access_; }
586

587
  Address get_address_from_id(IsolateAddressId id);
588 589 590

  // Access to top context (where the current function object was created).
  Context* context() { return thread_local_top_.context_; }
591
  inline void set_context(Context* context);
592 593
  Context** context_address() { return &thread_local_top_.context_; }

594
  THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
595 596

  // Access to current thread id.
597
  THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
598 599

  // Interface to pending exception.
600 601 602
  inline Object* pending_exception();
  inline void set_pending_exception(Object* exception_obj);
  inline void clear_pending_exception();
603

604
  // Interface to wasm caught exception.
605 606
  inline Object* get_wasm_caught_exception();
  inline void set_wasm_caught_exception(Object* exception);
607 608
  inline void clear_wasm_caught_exception();

609
  THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
610

611
  inline bool has_pending_exception();
612

613
  THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
614
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
615
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
616 617 618
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)

619 620
  THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)

621
  v8::TryCatch* try_catch_handler() {
622
    return thread_local_top_.try_catch_handler();
623 624 625 626
  }
  bool* external_caught_exception_address() {
    return &thread_local_top_.external_caught_exception_;
  }
627

628
  THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
629

630
  inline void clear_pending_message();
631 632 633 634
  Address pending_message_obj_address() {
    return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
  }

635 636 637
  inline Object* scheduled_exception();
  inline bool has_scheduled_exception();
  inline void clear_scheduled_exception();
638

639 640
  bool IsJavaScriptHandlerOnTop(Object* exception);
  bool IsExternalHandlerOnTop(Object* exception);
641

642
  inline bool is_catchable_by_javascript(Object* exception);
643
  bool is_catchable_by_wasm(Object* exception);
644 645 646 647 648 649

  // JS execution stack (see frames.h).
  static Address c_entry_fp(ThreadLocalTop* thread) {
    return thread->c_entry_fp_;
  }
  static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
650
  Address c_function() { return thread_local_top_.c_function_; }
651 652 653 654 655

  inline Address* c_entry_fp_address() {
    return &thread_local_top_.c_entry_fp_;
  }
  inline Address* handler_address() { return &thread_local_top_.handler_; }
656 657 658
  inline Address* c_function_address() {
    return &thread_local_top_.c_function_;
  }
659

660 661 662
  // Bottom JS entry.
  Address js_entry_sp() {
    return thread_local_top_.js_entry_sp_;
663 664 665 666 667 668
  }
  inline Address* js_entry_sp_address() {
    return &thread_local_top_.js_entry_sp_;
  }

  // Returns the global object of the current context. It could be
669
  // a builtin object, or a JS global object.
670
  inline Handle<JSGlobalObject> global_object();
671 672

  // Returns the global proxy object of the current context.
673
  inline Handle<JSObject> global_proxy();
674 675 676 677 678 679 680 681

  static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
  void FreeThreadResources() { thread_local_top_.Free(); }

  // This method is called by the api after operations that may throw
  // exceptions.  If an exception was thrown and not handled by an external
  // handler the exception is scheduled to be rethrown when we return to running
  // JavaScript code.  If an exception is scheduled true is returned.
682
  V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool is_bottom_call);
683

684
  // Push and pop a promise and the current try-catch handler.
685
  void PushPromise(Handle<JSObject> promise);
686
  void PopPromise();
687 688 689

  // Return the relevant Promise that a throw/rejection pertains to, based
  // on the contents of the Promise stack
690 691
  Handle<Object> GetPromiseOnStackOnThrow();

692 693 694
  // Heuristically guess whether a Promise is handled by user catch handler
  bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);

695 696
  class ExceptionScope {
   public:
697 698
    // Scope currently can only be used for regular exceptions,
    // not termination exception.
699 700
    inline explicit ExceptionScope(Isolate* isolate);
    inline ~ExceptionScope();
701 702 703 704 705 706

   private:
    Isolate* isolate_;
    Handle<Object> pending_exception_;
  };

707 708 709 710 711
  void SetCaptureStackTraceForUncaughtExceptions(
      bool capture,
      int frame_limit,
      StackTrace::StackTraceOptions options);

712 713 714
  void SetAbortOnUncaughtExceptionCallback(
      v8::Isolate::AbortOnUncaughtExceptionCallback callback);

715
  enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
716
  void PrintCurrentStackTrace(FILE* out);
717 718
  void PrintStack(StringStream* accumulator,
                  PrintStackMode mode = kPrintStackVerbose);
719 720
  V8_EXPORT_PRIVATE void PrintStack(FILE* out,
                                    PrintStackMode mode = kPrintStackVerbose);
721
  Handle<String> StackTraceString();
722 723
  // Stores a stack trace in a stack-allocated temporary buffer which will
  // end up in the minidump for debugging purposes.
724 725 726 727
  NO_INLINE(void PushStackTraceAndDie(void* ptr1 = nullptr,
                                      void* ptr2 = nullptr,
                                      void* ptr3 = nullptr,
                                      void* ptr4 = nullptr));
728 729
  Handle<FixedArray> CaptureCurrentStackTrace(
      int frame_limit, StackTrace::StackTraceOptions options);
730
  Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
731
                                         FrameSkipMode mode,
732
                                         Handle<Object> caller);
733 734 735
  MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
      Handle<JSReceiver> error_object);
  MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
736 737
      Handle<JSReceiver> error_object, FrameSkipMode mode,
      Handle<Object> caller);
738
  Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
739

740 741
  Address GetAbstractPC(int* line, int* column);

742
  // Returns if the given context may access the given global object. If
743 744
  // the result is false, the pending exception is guaranteed to be
  // set.
745
  bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
746

747
  void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
748
  void ReportFailedAccessCheck(Handle<JSObject> receiver);
749 750

  // Exception throwing support. The caller should use the result
jwolfe's avatar
jwolfe committed
751
  // of Throw() as its return value.
752
  Object* Throw(Object* exception, MessageLocation* location = nullptr);
753
  Object* ThrowIllegalOperation();
754 755

  template <typename T>
756 757
  V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
      Handle<Object> exception, MessageLocation* location = nullptr) {
758 759 760 761
    Throw(*exception, location);
    return MaybeHandle<T>();
  }

762 763 764 765 766
  void set_console_delegate(debug::ConsoleDelegate* delegate) {
    console_delegate_ = delegate;
  }
  debug::ConsoleDelegate* console_delegate() { return console_delegate_; }

767 768
  // Re-throw an exception.  This involves no error reporting since error
  // reporting was handled when the exception was thrown originally.
769
  Object* ReThrow(Object* exception);
770 771 772

  // Find the correct handler for the current pending exception. This also
  // clears and returns the current pending exception.
773
  Object* UnwindAndFindHandler();
774

775
  // Tries to predict whether an exception will be caught. Note that this can
776
  // only produce an estimate, because it is undecidable whether a finally
777
  // clause will consume or re-throw an exception.
778 779 780 781
  enum CatchType {
    NOT_CAUGHT,
    CAUGHT_BY_JAVASCRIPT,
    CAUGHT_BY_EXTERNAL,
782 783 784
    CAUGHT_BY_DESUGARING,
    CAUGHT_BY_PROMISE,
    CAUGHT_BY_ASYNC_AWAIT
785
  };
786
  CatchType PredictExceptionCatcher();
787

788
  void ScheduleThrow(Object* exception);
789 790 791
  // Re-set pending message, script and positions reported to the TryCatch
  // back to the TLS for re-use when rethrowing.
  void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
792 793
  // Un-schedule an exception that was caught by a TryCatch handler.
  void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
794
  void ReportPendingMessages();
795 796 797 798 799
  void ReportPendingMessagesFromJavaScript();

  // Implements code shared between the two above methods
  void ReportPendingMessagesImpl(bool report_externally);

800 801
  // Return pending location if any or unfilled structure.
  MessageLocation GetMessageLocation();
802 803

  // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
804
  Object* PromoteScheduledException();
805 806

  // Attempts to compute the current source location, storing the
807 808 809
  // result in the target out parameter. The source location is attached to a
  // Message object as the location which should be shown to the user. It's
  // typically the top-most meaningful location on the stack.
810
  bool ComputeLocation(MessageLocation* target);
811 812
  bool ComputeLocationFromException(MessageLocation* target,
                                    Handle<Object> exception);
813
  bool ComputeLocationFromStackTrace(MessageLocation* target,
814 815 816 817
                                     Handle<Object> exception);

  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
                                        MessageLocation* location);
818 819

  // Out of resource exception helpers.
820 821
  Object* StackOverflow();
  Object* TerminateExecution();
822
  void CancelTerminateExecution();
823

824 825
  void RequestInterrupt(InterruptCallback callback, void* data);
  void InvokeApiInterruptCallbacks();
826

827
  // Administration
828 829 830
  void Iterate(RootVisitor* v);
  void Iterate(RootVisitor* v, ThreadLocalTop* t);
  char* Iterate(RootVisitor* v, char* t);
831 832
  void IterateThread(ThreadVisitor* v, char* t);

833
  // Returns the current native context.
834 835
  inline Handle<Context> native_context();
  inline Context* raw_native_context();
836

837 838 839
  // Returns the native context of the calling JavaScript code.  That
  // is, the native context of the top-most JavaScript frame.
  Handle<Context> GetCallingNativeContext();
840

841 842
  Handle<Context> GetIncumbentContext();

843 844 845 846 847 848 849 850 851 852 853 854
  void RegisterTryCatchHandler(v8::TryCatch* that);
  void UnregisterTryCatchHandler(v8::TryCatch* that);

  char* ArchiveThread(char* to);
  char* RestoreThread(char* from);

  static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
  static const int kBMMaxShift = 250;        // See StringSearchBase.

  // Accessors.
#define GLOBAL_ACCESSOR(type, name, initialvalue)                       \
  inline type name() const {                                            \
855
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
856 857 858
    return name##_;                                                     \
  }                                                                     \
  inline void set_##name(type value) {                                  \
859
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
860 861 862 863 864 865 866
    name##_ = value;                                                    \
  }
  ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR

#define GLOBAL_ARRAY_ACCESSOR(type, name, length)                       \
  inline type* name() {                                                 \
867
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
868 869 870 871 872
    return &(name##_)[0];                                               \
  }
  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR

873 874 875
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
  inline Handle<type> name();                            \
  inline bool is_##name(type* value);
876 877
  NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
878 879

  Bootstrapper* bootstrapper() { return bootstrapper_; }
880 881 882 883 884 885 886
  // Use for updating counters on a foreground thread.
  Counters* counters() { return async_counters().get(); }
  // Use for updating counters on a background thread.
  const std::shared_ptr<Counters>& async_counters() {
    // Make sure InitializeCounters() has been called.
    DCHECK_NOT_NULL(async_counters_.get());
    return async_counters_;
887
  }
888 889
  RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
  CompilationCache* compilation_cache() { return compilation_cache_; }
890 891 892
  Logger* logger() {
    // Call InitializeLoggingAndCounters() if logging is needed before
    // the isolate is fully initialized.
893
    DCHECK_NOT_NULL(logger_);
894 895
    return logger_;
  }
896 897
  StackGuard* stack_guard() { return &stack_guard_; }
  Heap* heap() { return &heap_; }
898
  wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
899 900
  StubCache* load_stub_cache() { return load_stub_cache_; }
  StubCache* store_stub_cache() { return store_stub_cache_; }
901
  DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
902 903 904 905
  bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
  void set_deoptimizer_lazy_throw(bool value) {
    deoptimizer_lazy_throw_ = value;
  }
906
  ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
jarin@chromium.org's avatar
jarin@chromium.org committed
907 908 909
  MaterializedObjectStore* materialized_object_store() {
    return materialized_object_store_;
  }
910

911 912 913 914
  ContextSlotCache* context_slot_cache() {
    return context_slot_cache_;
  }

915 916 917 918
  DescriptorLookupCache* descriptor_lookup_cache() {
    return descriptor_lookup_cache_;
  }

919 920
  HandleScopeData* handle_scope_data() { return &handle_scope_data_; }

921
  HandleScopeImplementer* handle_scope_implementer() {
922
    DCHECK(handle_scope_implementer_);
923 924 925
    return handle_scope_implementer_;
  }

926 927
  UnicodeCache* unicode_cache() {
    return unicode_cache_;
928 929
  }

930 931 932
  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
    return inner_pointer_to_code_cache_;
  }
933 934 935

  GlobalHandles* global_handles() { return global_handles_; }

936 937
  EternalHandles* eternal_handles() { return eternal_handles_; }

938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
  ThreadManager* thread_manager() { return thread_manager_; }

  unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
    return &jsregexp_uncanonicalize_;
  }

  unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
    return &jsregexp_canonrange_;
  }

  RuntimeState* runtime_state() { return &runtime_state_; }

  Builtins* builtins() { return &builtins_; }

  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
      regexp_macro_assembler_canonicalize() {
    return &regexp_macro_assembler_canonicalize_;
  }

  RegExpStack* regexp_stack() { return regexp_stack_; }

959 960 961 962 963
  size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
  void IncreaseTotalRegexpCodeGenerated(int size) {
    total_regexp_code_generated_ += size;
  }

964
  std::vector<int>* regexp_indices() { return &regexp_indices_; }
965

966 967
  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
      interp_canonicalize_mapping() {
968
    return &regexp_macro_assembler_canonicalize_;
969 970
  }

971
  Debug* debug() { return debug_; }
972

973
  bool* is_profiling_address() { return &is_profiling_; }
974 975 976
  CodeEventDispatcher* code_event_dispatcher() const {
    return code_event_dispatcher_.get();
  }
977
  HeapProfiler* heap_profiler() const { return heap_profiler_; }
978

979
#ifdef DEBUG
980 981 982
  static size_t non_disposed_isolates() {
    return non_disposed_isolates_.Value();
  }
983 984
#endif

985 986 987 988 989
  v8::internal::Factory* factory() {
    // Upcast to the privately inherited base-class using c-style casts to avoid
    // undefined behavior (as static_cast cannot cast across private bases).
    return (v8::internal::Factory*)this;  // NOLINT(readability/casting)
  }
990

991
  static const int kJSRegexpStaticOffsetsVectorSize = 128;
992

993
  THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
994

995
  THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
996

997
  void SetData(uint32_t slot, void* data) {
998
    DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
999 1000 1001
    embedder_data_[slot] = data;
  }
  void* GetData(uint32_t slot) {
1002
    DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1003 1004
    return embedder_data_[slot];
  }
1005

1006
  bool serializer_enabled() const { return serializer_enabled_; }
1007 1008 1009

  void enable_serializer() { serializer_enabled_ = true; }

1010
  bool snapshot_available() const {
1011
    return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1012
  }
1013

1014 1015 1016
  bool IsDead() { return has_fatal_error_; }
  void SignalFatalError() { has_fatal_error_ = true; }

Mythri's avatar
Mythri committed
1017
  bool use_optimizer();
1018

1019 1020
  bool initialized_from_snapshot() { return initialized_from_snapshot_; }

1021 1022
  bool NeedsSourcePositionsForProfiling() const;

1023 1024 1025 1026 1027 1028 1029 1030
  bool is_best_effort_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBestEffort;
  }

  bool is_precise_count_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kPreciseCount;
  }

1031 1032 1033 1034
  bool is_precise_binary_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kPreciseBinary;
  }

1035 1036 1037 1038
  bool is_block_count_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBlockCount;
  }

1039 1040 1041 1042 1043 1044 1045 1046
  bool is_block_binary_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBlockBinary;
  }

  bool is_block_code_coverage() const {
    return is_block_count_code_coverage() || is_block_binary_code_coverage();
  }

1047 1048 1049 1050
  bool is_collecting_type_profile() const {
    return type_profile_mode() == debug::TypeProfile::kCollect;
  }

1051 1052 1053 1054 1055 1056 1057
  // Collect feedback vectors with data for code coverage or type profile.
  // Reset the list, when both code coverage and type profile are not
  // needed anymore. This keeps many feedback vectors alive, but code
  // coverage or type profile are used for debugging only and increase in
  // memory usage is expected.
  void SetFeedbackVectorsForProfilingTools(Object* value);

1058
  void MaybeInitializeVectorListFromHeap();
1059

1060
  double time_millis_since_init() {
1061
    return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1062 1063
  }

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
  DateCache* date_cache() {
    return date_cache_;
  }

  void set_date_cache(DateCache* date_cache) {
    if (date_cache != date_cache_) {
      delete date_cache_;
    }
    date_cache_ = date_cache;
  }

1075 1076
  static const int kProtectorValid = 1;
  static const int kProtectorInvalid = 0;
1077

1078
  inline bool IsArrayConstructorIntact();
1079 1080 1081 1082 1083

  // The version with an explicit context parameter can be used when
  // Isolate::context is not set up, e.g. when calling directly into C++ from
  // CSA.
  bool IsNoElementsProtectorIntact(Context* context);
1084
  bool IsNoElementsProtectorIntact();
1085

1086 1087 1088
  inline bool IsArraySpeciesLookupChainIntact();
  inline bool IsTypedArraySpeciesLookupChainIntact();
  inline bool IsPromiseSpeciesLookupChainIntact();
1089
  bool IsIsConcatSpreadableLookupChainIntact();
1090
  bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
1091
  inline bool IsStringLengthOverflowIntact();
1092
  inline bool IsArrayIteratorLookupChainIntact();
1093

1094 1095 1096
  // Make sure we do check for neutered array buffers.
  inline bool IsArrayBufferNeuteringIntact();

1097 1098 1099 1100
  // Disable promise optimizations if promise (debug) hooks have ever been
  // active.
  bool IsPromiseHookProtectorIntact();

1101 1102 1103 1104
  // Make sure a lookup of "resolve" on the %Promise% intrinsic object
  // yeidls the initial Promise.resolve method.
  bool IsPromiseResolveLookupChainIntact();

1105
  // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
1106 1107 1108 1109
  // initial %PromisePrototype% yields the initial method. In addition this
  // protector also guards the negative lookup of "then" on the intrinsic
  // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
  // undefined without triggering any side-effects.
1110
  bool IsPromiseThenLookupChainIntact();
1111
  bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
1112

1113 1114 1115 1116
  // On intent to set an element in object, make sure that appropriate
  // notifications occur if the set is on the elements of the array or
  // object prototype. Also ensure that changes to prototype chain between
  // Array and Object fire notifications.
1117 1118 1119
  void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
  void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
    UpdateNoElementsProtectorOnSetElement(object);
1120
  }
1121 1122
  void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
    UpdateNoElementsProtectorOnSetElement(object);
1123
  }
1124 1125
  void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
    UpdateNoElementsProtectorOnSetElement(object);
1126
  }
1127
  void InvalidateArrayConstructorProtector();
1128 1129 1130
  void InvalidateArraySpeciesProtector();
  void InvalidateTypedArraySpeciesProtector();
  void InvalidatePromiseSpeciesProtector();
1131
  void InvalidateIsConcatSpreadableProtector();
1132
  void InvalidateStringLengthOverflowProtector();
1133
  void InvalidateArrayIteratorProtector();
1134
  void InvalidateArrayBufferNeuteringProtector();
1135
  V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
1136
  void InvalidatePromiseResolveProtector();
1137
  void InvalidatePromiseThenProtector();
1138 1139 1140 1141

  // Returns true if array is the initial array prototype in any native context.
  bool IsAnyInitialArrayPrototype(Handle<JSArray> array);

1142 1143
  V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
      int index);
1144

1145
  void IterateDeferredHandles(RootVisitor* visitor);
1146 1147 1148
  void LinkDeferredHandles(DeferredHandles* deferred_handles);
  void UnlinkDeferredHandles(DeferredHandles* deferred_handles);

1149 1150 1151 1152
#ifdef DEBUG
  bool IsDeferredHandle(Object** location);
#endif  // DEBUG

1153 1154
  bool concurrent_recompilation_enabled() {
    // Thread is only available with flag enabled.
1155
    DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1156
           FLAG_concurrent_recompilation);
1157
    return optimizing_compile_dispatcher_ != nullptr;
1158 1159
  }

1160 1161
  OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
    return optimizing_compile_dispatcher_;
1162
  }
1163 1164 1165
  // Flushes all pending concurrent optimzation jobs from the optimizing
  // compile dispatcher's queue.
  void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
1166

1167 1168
  int id() const { return static_cast<int>(id_); }

1169
  CompilationStatistics* GetTurboStatistics();
1170
  CodeTracer* GetCodeTracer();
1171

1172
  void DumpAndResetStats();
1173

1174 1175 1176 1177 1178
  FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
  void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
    function_entry_hook_ = function_entry_hook;
  }

1179 1180
  void* stress_deopt_count_address() { return &stress_deopt_count_; }

1181 1182
  void set_force_slow_path(bool v) { force_slow_path_ = v; }
  bool force_slow_path() const { return force_slow_path_; }
1183 1184
  bool* force_slow_path_address() { return &force_slow_path_; }

1185
  V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
1186

1187 1188
  V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();

1189 1190 1191 1192
  // Generates a random number that is non-zero when masked
  // with the provided mask.
  int GenerateIdentityHash(uint32_t mask);

1193
  // Given an address occupied by a live code object, return that object.
1194
  Code* FindCodeObject(Address a);
1195

1196 1197 1198 1199 1200 1201 1202 1203
  int NextOptimizationId() {
    int id = next_optimization_id_++;
    if (!Smi::IsValid(next_optimization_id_)) {
      next_optimization_id_ = 0;
    }
    return id;
  }

1204 1205 1206
  void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
  void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
                                   size_t heap_limit);
1207 1208 1209 1210
  void AddCallCompletedCallback(CallCompletedCallback callback);
  void RemoveCallCompletedCallback(CallCompletedCallback callback);
  void FireCallCompletedCallback();

1211 1212
  void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
  void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1213
  inline void FireBeforeCallEnteredCallback();
1214

1215 1216
  void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
  void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1217
  inline void FireMicrotasksCompletedCallback();
1218

1219
  void SetPromiseRejectCallback(PromiseRejectCallback callback);
1220
  void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1221 1222
                           v8::PromiseRejectEvent event);

1223
  void EnqueueMicrotask(Handle<Microtask> microtask);
1224
  void RunMicrotasks();
1225
  bool IsRunningMicrotasks() const { return is_running_microtasks_; }
1226

1227 1228 1229
  Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
                           Handle<String> name, bool private_symbol);

1230 1231 1232
  void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
  void CountUsage(v8::Isolate::UseCounterFeature feature);

1233 1234 1235
  BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
  BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }

1236
  std::string GetTurboCfgFileName();
1237

1238
#if V8_SFI_HAS_UNIQUE_ID
1239 1240 1241
  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif

1242 1243
  Address promise_hook_or_debug_is_active_address() {
    return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
1244
  }
1245

1246 1247 1248 1249 1250 1251 1252 1253
  Address pending_microtask_count_address() {
    return reinterpret_cast<Address>(&pending_microtask_count_);
  }

  Address handle_scope_implementer_address() {
    return reinterpret_cast<Address>(&handle_scope_implementer_);
  }

1254 1255 1256 1257
  Address debug_execution_mode_address() {
    return reinterpret_cast<Address>(&debug_execution_mode_);
  }

1258 1259
  void DebugStateUpdated();

1260 1261 1262
  void SetPromiseHook(PromiseHook hook);
  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
                      Handle<Object> parent);
1263

1264 1265 1266
  void AddDetachedContext(Handle<Context> context);
  void CheckDetachedContextsAfterGC();

1267 1268 1269
  std::vector<Object*>* partial_snapshot_cache() {
    return &partial_snapshot_cache_;
  }
1270

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
  // Off-heap builtins cannot embed constants within the code object itself,
  // and thus need to load them from the root list.
  bool ShouldLoadConstantsFromRootList() const {
#ifdef V8_EMBEDDED_BUILTINS
    return (serializer_enabled() &&
            builtins_constants_table_builder() != nullptr);
#else
    return false;
#endif  // V8_EMBEDDED_BUILTINS
  }

1282
#ifdef V8_EMBEDDED_BUILTINS
1283 1284 1285 1286 1287
  // Called only prior to serialization.
  // This function copies off-heap-safe builtins off the heap, creates off-heap
  // trampolines, and sets up this isolate's embedded blob.
  void PrepareEmbeddedBlobForSerialization();

1288 1289 1290
  BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
    return builtins_constants_table_builder_;
  }
1291

1292 1293 1294 1295
  static const uint8_t* CurrentEmbeddedBlob();
  static uint32_t CurrentEmbeddedBlobSize();

  // TODO(jgruber): Remove these in favor of the static methods above.
1296 1297
  const uint8_t* embedded_blob() const;
  uint32_t embedded_blob_size() const;
1298 1299
#endif

1300 1301 1302 1303 1304 1305 1306
  void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
    array_buffer_allocator_ = allocator;
  }
  v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
    return array_buffer_allocator_;
  }

binji's avatar
binji committed
1307 1308
  FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }

1309 1310 1311
  CancelableTaskManager* cancelable_task_manager() {
    return cancelable_task_manager_;
  }
1312

1313
  const AstStringConstants* ast_string_constants() const {
1314 1315 1316
    return ast_string_constants_;
  }

1317 1318
  interpreter::Interpreter* interpreter() const { return interpreter_; }

1319
  AccountingAllocator* allocator() { return allocator_; }
1320

1321 1322
  CompilerDispatcher* compiler_dispatcher() const {
    return compiler_dispatcher_;
1323 1324
  }

1325 1326
  bool IsInAnyContext(Object* object, uint32_t index);

1327 1328
  void SetHostImportModuleDynamicallyCallback(
      HostImportModuleDynamicallyCallback callback);
1329
  MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1330
      Handle<Script> referrer, Handle<Object> specifier);
1331

1332 1333 1334 1335 1336
  void SetHostInitializeImportMetaObjectCallback(
      HostInitializeImportMetaObjectCallback callback);
  Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
      Handle<Module> module);

hpayer's avatar
hpayer committed
1337 1338
  void SetRAILMode(RAILMode rail_mode);

1339 1340 1341 1342
  RAILMode rail_mode() { return rail_mode_.Value(); }

  double LoadStartTimeMs();

1343 1344 1345 1346 1347 1348
  void IsolateInForegroundNotification();

  void IsolateInBackgroundNotification();

  bool IsIsolateInBackground() { return is_isolate_in_background_; }

1349
  PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1350

1351 1352 1353
  void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
  bool allow_atomics_wait() { return allow_atomics_wait_; }

1354
  // Register a finalizer to be called at isolate teardown.
1355
  void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1356

1357 1358
  // Removes a previously-registered shared object finalizer.
  void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1359

1360 1361 1362 1363 1364
  size_t elements_deletion_counter() { return elements_deletion_counter_; }
  void set_elements_deletion_counter(size_t value) {
    elements_deletion_counter_ = value;
  }

1365 1366 1367 1368 1369 1370 1371 1372
  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
    return top_backup_incumbent_scope_;
  }
  void set_top_backup_incumbent_scope(
      const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
    top_backup_incumbent_scope_ = top_backup_incumbent_scope;
  }

1373 1374
  void SetIdle(bool is_idle);

1375
 protected:
1376
  Isolate();
1377
  bool IsArrayOrObjectOrStringPrototype(Object* object);
1378

1379
 private:
1380 1381 1382
  friend struct GlobalState;
  friend struct InitializeGlobalState;

1383 1384 1385
  // These fields are accessed through the API, offsets must be kept in sync
  // with v8::internal::Internals (in include/v8.h) constants. This is also
  // verified in Isolate::Init() using runtime checks.
1386
  void* embedder_data_[Internals::kNumIsolateDataSlots];
1387 1388
  Heap heap_;

1389 1390 1391 1392 1393
  class ThreadDataTable {
   public:
    ThreadDataTable();
    ~ThreadDataTable();

1394
    PerIsolateThreadData* Lookup(ThreadId thread_id);
1395 1396
    void Insert(PerIsolateThreadData* data);
    void Remove(PerIsolateThreadData* data);
1397
    void RemoveAllThreads();
1398 1399

   private:
1400 1401 1402 1403 1404 1405 1406
    struct Hasher {
      std::size_t operator()(const ThreadId& t) const {
        return std::hash<int>()(t.ToInteger());
      }
    };

    std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
1407 1408 1409 1410 1411 1412
  };

  // These items form a stack synchronously with threads Enter'ing and Exit'ing
  // the Isolate. The top of the stack points to a thread which is currently
  // running the Isolate. When the stack is empty, the Isolate is considered
  // not entered by any thread and can be Disposed.
thakis's avatar
thakis committed
1413
  // If the same thread enters the Isolate more than once, the entry_count_
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
  // is incremented rather then a new item pushed to the stack.
  class EntryStackItem {
   public:
    EntryStackItem(PerIsolateThreadData* previous_thread_data,
                   Isolate* previous_isolate,
                   EntryStackItem* previous_item)
        : entry_count(1),
          previous_thread_data(previous_thread_data),
          previous_isolate(previous_isolate),
          previous_item(previous_item) { }

    int entry_count;
    PerIsolateThreadData* previous_thread_data;
    Isolate* previous_isolate;
    EntryStackItem* previous_item;

1430
   private:
1431 1432 1433
    DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
  };

1434 1435 1436
  static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
  static base::Thread::LocalStorageKey isolate_key_;
  static base::Thread::LocalStorageKey thread_id_key_;
1437

1438
  // A global counter for all generated Isolates, might overflow.
1439
  static base::Atomic32 isolate_counter_;
1440

1441 1442 1443 1444
#if DEBUG
  static base::Atomic32 isolate_key_created_;
#endif

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
  void Deinit();

  static void SetIsolateThreadLocals(Isolate* isolate,
                                     PerIsolateThreadData* data);

  // Find the PerThread for this particular (isolate, thread) combination.
  // If one does not yet exist, allocate a new one.
  PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();

  // Initializes the current thread to run this Isolate.
  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
  // at the same time, this should be prevented using external locking.
  void Enter();

  // Exits the current thread. The previosuly entered Isolate is restored
  // for the thread.
  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
  // at the same time, this should be prevented using external locking.
  void Exit();

  void InitializeThreadLocal();

  void MarkCompactPrologue(bool is_compacting,
                           ThreadLocalTop* archived_thread_data);
  void MarkCompactEpilogue(bool is_compacting,
                           ThreadLocalTop* archived_thread_data);

  void FillCache();

1474 1475 1476 1477 1478
  // Propagate pending exception message to the v8::TryCatch.
  // If there is no external try-catch or message was successfully propagated,
  // then return true.
  bool PropagatePendingExceptionToExternalTryCatch();

1479 1480
  void SetTerminationOnExternalTryCatch();

hpayer's avatar
hpayer committed
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
  const char* RAILModeName(RAILMode rail_mode) const {
    switch (rail_mode) {
      case PERFORMANCE_RESPONSE:
        return "RESPONSE";
      case PERFORMANCE_ANIMATION:
        return "ANIMATION";
      case PERFORMANCE_IDLE:
        return "IDLE";
      case PERFORMANCE_LOAD:
        return "LOAD";
    }
    return "";
  }

1495 1496 1497 1498
  // TODO(alph): Remove along with the deprecated GetCpuProfiler().
  friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler();
  CpuProfiler* cpu_profiler() const { return cpu_profiler_; }

1499
  base::Atomic32 id_;
1500
  EntryStackItem* entry_stack_;
1501 1502
  int stack_trace_nesting_level_;
  StringStream* incomplete_message_;
1503
  Address isolate_addresses_[kIsolateAddressCount + 1];  // NOLINT
1504 1505 1506
  Bootstrapper* bootstrapper_;
  RuntimeProfiler* runtime_profiler_;
  CompilationCache* compilation_cache_;
1507
  std::shared_ptr<Counters> async_counters_;
1508
  base::RecursiveMutex break_access_;
1509 1510
  Logger* logger_;
  StackGuard stack_guard_;
1511 1512
  StubCache* load_stub_cache_;
  StubCache* store_stub_cache_;
1513
  DeoptimizerData* deoptimizer_data_;
1514
  bool deoptimizer_lazy_throw_;
jarin@chromium.org's avatar
jarin@chromium.org committed
1515
  MaterializedObjectStore* materialized_object_store_;
1516 1517 1518 1519
  ThreadLocalTop thread_local_top_;
  bool capture_stack_trace_for_uncaught_exceptions_;
  int stack_trace_for_uncaught_exceptions_frame_limit_;
  StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1520
  ContextSlotCache* context_slot_cache_;
1521
  DescriptorLookupCache* descriptor_lookup_cache_;
1522
  HandleScopeData handle_scope_data_;
1523
  HandleScopeImplementer* handle_scope_implementer_;
1524
  UnicodeCache* unicode_cache_;
1525
  AccountingAllocator* allocator_;
1526
  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
1527
  GlobalHandles* global_handles_;
1528
  EternalHandles* eternal_handles_;
1529 1530 1531
  ThreadManager* thread_manager_;
  RuntimeState runtime_state_;
  Builtins builtins_;
1532
  SetupIsolateDelegate* setup_delegate_;
1533 1534 1535 1536 1537
  unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
  unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
  unibrow::Mapping<unibrow::Ecma262Canonicalize>
      regexp_macro_assembler_canonicalize_;
  RegExpStack* regexp_stack_;
1538
  std::vector<int> regexp_indices_;
1539
  DateCache* date_cache_;
1540
  CallInterfaceDescriptorData* call_descriptor_data_;
1541
  base::RandomNumberGenerator* random_number_generator_;
1542
  base::RandomNumberGenerator* fuzzer_rng_;
1543
  base::AtomicValue<RAILMode> rail_mode_;
1544
  bool promise_hook_or_debug_is_active_;
1545
  PromiseHook promise_hook_;
1546
  HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
1547 1548
  HostInitializeImportMetaObjectCallback
      host_initialize_import_meta_object_callback_;
1549 1550
  base::Mutex rail_mutex_;
  double load_start_time_ms_;
1551

1552 1553 1554
  // Whether the isolate has been created for snapshotting.
  bool serializer_enabled_;

1555 1556 1557
  // True if fatal error has been signaled for this isolate.
  bool has_fatal_error_;

1558 1559 1560
  // True if this isolate was initialized from a snapshot.
  bool initialized_from_snapshot_;

1561 1562 1563
  // True if ES2015 tail call elimination feature is enabled.
  bool is_tail_call_elimination_enabled_;

1564 1565 1566 1567
  // True if the isolate is in background. This flag is used
  // to prioritize between memory usage and latency.
  bool is_isolate_in_background_;

1568 1569 1570
  // Time stamp at initialization.
  double time_millis_at_init_;

1571
#ifdef DEBUG
1572 1573
  static base::AtomicNumber<size_t> non_disposed_isolates_;

1574 1575 1576 1577
  JSObject::SpillInformation js_spill_information_;
#endif

  Debug* debug_;
1578
  CpuProfiler* cpu_profiler_;
1579
  HeapProfiler* heap_profiler_;
1580
  std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1581
  FunctionEntryHook function_entry_hook_;
1582

1583
  const AstStringConstants* ast_string_constants_;
1584

1585 1586
  interpreter::Interpreter* interpreter_;

1587
  CompilerDispatcher* compiler_dispatcher_;
1588

1589 1590 1591
  typedef std::pair<InterruptCallback, void*> InterruptEntry;
  std::queue<InterruptEntry> api_interrupts_queue_;

1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
#define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
  type name##_;
  ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
#undef GLOBAL_BACKING_STORE

#define GLOBAL_ARRAY_BACKING_STORE(type, name, length)                         \
  type name##_[length];
  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
#undef GLOBAL_ARRAY_BACKING_STORE

#ifdef DEBUG
  // This class is huge and has a number of fields controlled by
  // preprocessor defines. Make sure the offsets of these fields agree
  // between compilation units.
#define ISOLATE_FIELD_OFFSET(type, name, ignored)                              \
  static const intptr_t name##_debug_offset_;
  ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
  ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif

1613
  DeferredHandles* deferred_handles_head_;
1614
  OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
1615

1616 1617 1618
  // Counts deopt points if deopt_every_n_times is enabled.
  unsigned int stress_deopt_count_;

1619 1620
  bool force_slow_path_;

1621 1622
  int next_optimization_id_;

1623
#if V8_SFI_HAS_UNIQUE_ID
1624 1625 1626
  int next_unique_sfi_id_;
#endif

1627 1628
  // Vector of callbacks before a Call starts execution.
  std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1629

1630 1631
  // Vector of callbacks when a Call completes.
  std::vector<CallCompletedCallback> call_completed_callbacks_;
1632

1633 1634
  // Vector of callbacks after microtasks were run.
  std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
1635
  bool is_running_microtasks_;
1636

1637
  v8::Isolate::UseCounterCallback use_counter_callback_;
1638
  BasicBlockProfiler* basic_block_profiler_;
1639

1640
  std::vector<Object*> partial_snapshot_cache_;
1641

1642 1643 1644 1645
#ifdef V8_EMBEDDED_BUILTINS
  // Used during builtins compilation to build the builtins constants table,
  // which is stored on the root list prior to serialization.
  BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
1646

1647 1648
  void SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size);

1649 1650
  const uint8_t* embedded_blob_ = nullptr;
  uint32_t embedded_blob_size_ = 0;
1651 1652
#endif

1653 1654
  v8::ArrayBuffer::Allocator* array_buffer_allocator_;

binji's avatar
binji committed
1655 1656
  FutexWaitListNode futex_wait_list_node_;

1657
  CancelableTaskManager* cancelable_task_manager_;
1658

1659 1660
  debug::ConsoleDelegate* console_delegate_ = nullptr;

1661 1662 1663
  v8::Isolate::AbortOnUncaughtExceptionCallback
      abort_on_uncaught_exception_callback_;

1664 1665
  bool allow_atomics_wait_;

1666
  ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
1667

1668 1669
  size_t total_regexp_code_generated_;

1670 1671
  size_t elements_deletion_counter_ = 0;

1672
  std::unique_ptr<wasm::WasmEngine> wasm_engine_;
1673

1674 1675
  std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;

1676 1677 1678 1679
  // The top entry of the v8::Context::BackupIncumbentScope stack.
  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
      nullptr;

1680 1681 1682 1683 1684 1685
  // TODO(kenton@cloudflare.com): This mutex can be removed if
  // thread_data_table_ is always accessed under the isolate lock. I do not
  // know if this is the case, so I'm preserving it for now.
  base::Mutex thread_data_table_mutex_;
  ThreadDataTable thread_data_table_;

1686
  friend class ExecutionAccess;
1687
  friend class HandleScopeImplementer;
1688
  friend class heap::HeapTester;
1689
  friend class OptimizingCompileDispatcher;
1690 1691
  friend class Simulator;
  friend class StackGuard;
1692
  friend class SweeperThread;
1693
  friend class TestIsolate;
1694
  friend class ThreadId;
1695
  friend class ThreadManager;
1696 1697
  friend class v8::Isolate;
  friend class v8::Locker;
1698
  friend class v8::SnapshotCreator;
1699
  friend class v8::Unlocker;
1700
  friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
1701 1702
  friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
                                                        const char*);
1703 1704 1705 1706 1707

  DISALLOW_COPY_AND_ASSIGN(Isolate);
};


1708 1709 1710 1711
#undef FIELD_ACCESSOR
#undef THREAD_LOCAL_TOP_ACCESSOR


1712 1713
class PromiseOnStack {
 public:
1714 1715
  PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
      : promise_(promise), prev_(prev) {}
1716 1717 1718 1719 1720 1721 1722 1723 1724
  Handle<JSObject> promise() { return promise_; }
  PromiseOnStack* prev() { return prev_; }

 private:
  Handle<JSObject> promise_;
  PromiseOnStack* prev_;
};


1725 1726 1727 1728 1729
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
 public:
1730 1731
  explicit SaveContext(Isolate* isolate);
  ~SaveContext();
1732 1733 1734 1735 1736

  Handle<Context> context() { return context_; }
  SaveContext* prev() { return prev_; }

  // Returns true if this save context is below a given JavaScript frame.
1737
  bool IsBelowFrame(StandardFrame* frame);
1738 1739

 private:
1740
  Isolate* const isolate_;
1741
  Handle<Context> context_;
1742
  SaveContext* const prev_;
1743
  Address c_entry_fp_;
1744 1745 1746 1747 1748 1749
};


class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
 public:
1750
  explicit AssertNoContextChange(Isolate* isolate);
1751
  ~AssertNoContextChange() {
1752
    DCHECK(isolate_->context() == *context_);
1753 1754 1755
  }

 private:
1756
  Isolate* isolate_;
1757 1758 1759
  Handle<Context> context_;
#else
 public:
1760
  explicit AssertNoContextChange(Isolate* isolate) { }
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
#endif
};


class ExecutionAccess BASE_EMBEDDED {
 public:
  explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
    Lock(isolate);
  }
  ~ExecutionAccess() { Unlock(isolate_); }

1772 1773
  static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
  static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1774 1775

  static bool TryLock(Isolate* isolate) {
1776
    return isolate->break_access()->TryLock();
1777 1778 1779 1780 1781 1782 1783
  }

 private:
  Isolate* isolate_;
};


1784
// Support for checking for stack-overflows.
1785 1786 1787 1788
class StackLimitCheck BASE_EMBEDDED {
 public:
  explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }

1789
  // Use this to check for stack-overflows in C++ code.
1790
  bool HasOverflowed() const {
1791
    StackGuard* stack_guard = isolate_->stack_guard();
1792
    return GetCurrentStackPosition() < stack_guard->real_climit();
1793
  }
1794

1795 1796 1797 1798 1799 1800
  // Use this to check for interrupt request in C++ code.
  bool InterruptRequested() {
    StackGuard* stack_guard = isolate_->stack_guard();
    return GetCurrentStackPosition() < stack_guard->climit();
  }

1801
  // Use this to check for stack-overflow when entering runtime from JS code.
1802
  bool JsHasOverflowed(uintptr_t gap = 0) const;
1803

1804 1805 1806 1807
 private:
  Isolate* isolate_;
};

1808 1809 1810 1811 1812 1813 1814
#define STACK_CHECK(isolate, result_value) \
  do {                                     \
    StackLimitCheck stack_check(isolate);  \
    if (stack_check.HasOverflowed()) {     \
      isolate->StackOverflow();            \
      return result_value;                 \
    }                                      \
1815
  } while (false)
1816

1817 1818 1819
// Scope intercepts only interrupt which is part of its interrupt_mask and does
// not affect other interrupts.
class InterruptsScope {
1820
 public:
1821
  enum Mode { kPostponeInterrupts, kRunInterrupts };
1822

1823
  virtual ~InterruptsScope() { stack_guard_->PopInterruptsScope(); }
1824

1825 1826 1827
  // Find the scope that intercepts this interrupt.
  // It may be outermost PostponeInterruptsScope or innermost
  // SafeForInterruptsScope if any.
1828 1829 1830
  // Return whether the interrupt has been intercepted.
  bool Intercept(StackGuard::InterruptFlag flag);

1831 1832 1833 1834 1835 1836 1837 1838 1839
 protected:
  InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
      : stack_guard_(isolate->stack_guard()),
        intercept_mask_(intercept_mask),
        intercepted_flags_(0),
        mode_(mode) {
    stack_guard_->PushInterruptsScope(this);
  }

1840 1841
 private:
  StackGuard* stack_guard_;
1842 1843
  int intercept_mask_;
  int intercepted_flags_;
1844 1845
  Mode mode_;
  InterruptsScope* prev_;
1846 1847

  friend class StackGuard;
1848 1849
};

1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
// Support for temporarily postponing interrupts. When the outermost
// postpone scope is left the interrupts will be re-enabled and any
// interrupts that occurred while in the scope will be taken into
// account.
class PostponeInterruptsScope : public InterruptsScope {
 public:
  PostponeInterruptsScope(Isolate* isolate,
                          int intercept_mask = StackGuard::ALL_INTERRUPTS)
      : InterruptsScope(isolate, intercept_mask,
                        InterruptsScope::kPostponeInterrupts) {}
  virtual ~PostponeInterruptsScope() = default;
};

// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
// innermost scope is SafeForInterruptsScope ignoring any outer
// PostponeInterruptsScopes.
class SafeForInterruptsScope : public InterruptsScope {
 public:
  SafeForInterruptsScope(Isolate* isolate, int intercept_mask)
      : InterruptsScope(isolate, intercept_mask,
                        InterruptsScope::kRunInterrupts) {}
  virtual ~SafeForInterruptsScope() = default;
};
1873

1874
class CodeTracer final : public Malloced {
1875
 public:
1876
  explicit CodeTracer(int isolate_id) : file_(nullptr), scope_depth_(0) {
1877 1878 1879 1880 1881
    if (!ShouldRedirect()) {
      file_ = stdout;
      return;
    }

1882
    if (FLAG_redirect_code_traces_to == nullptr) {
1883 1884
      SNPrintF(filename_,
               "code-%d-%d.asm",
1885
               base::OS::GetCurrentProcessId(),
1886
               isolate_id);
1887
    } else {
1888
      StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
    }

    WriteChars(filename_.start(), "", 0, false);
  }

  class Scope {
   public:
    explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
    ~Scope() { tracer_->CloseFile();  }

    FILE* file() const { return tracer_->file(); }

   private:
    CodeTracer* tracer_;
  };

  void OpenFile() {
    if (!ShouldRedirect()) {
      return;
    }

1910
    if (file_ == nullptr) {
1911
      file_ = base::OS::FOpen(filename_.start(), "ab");
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
    }

    scope_depth_++;
  }

  void CloseFile() {
    if (!ShouldRedirect()) {
      return;
    }

    if (--scope_depth_ == 0) {
      fclose(file_);
1924
      file_ = nullptr;
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
    }
  }

  FILE* file() const { return file_; }

 private:
  static bool ShouldRedirect() {
    return FLAG_redirect_code_traces;
  }

  EmbeddedVector<char, 128> filename_;
  FILE* file_;
  int scope_depth_;
};
1939

1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
class StackTraceFailureMessage {
 public:
  explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
                                    void* ptr2 = nullptr, void* ptr3 = nullptr,
                                    void* ptr4 = nullptr);

  V8_NOINLINE void Print() volatile;

  static const uintptr_t kStartMarker = 0xdecade30;
  static const uintptr_t kEndMarker = 0xdecade31;
  static const int kStacktraceBufferSize = 32 * KB;

  uintptr_t start_marker_ = kStartMarker;
  void* isolate_;
  void* ptr1_;
  void* ptr2_;
  void* ptr3_;
  void* ptr4_;
  void* code_objects_[4];
  char js_stack_trace_[kStacktraceBufferSize];
  uintptr_t end_marker_ = kEndMarker;
};

1963 1964
}  // namespace internal
}  // namespace v8
1965 1966

#endif  // V8_ISOLATE_H_