isolate.h 77.1 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6 7

#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_

8
#include <cstddef>
9
#include <functional>
10
#include <memory>
11
#include <queue>
12
#include <unordered_map>
13
#include <vector>
14

15
#include "include/v8-inspector.h"
16
#include "include/v8-internal.h"
17
#include "include/v8.h"
18
#include "src/allocation.h"
19
#include "src/base/atomicops.h"
20
#include "src/base/macros.h"
21
#include "src/builtins/builtins.h"
22
#include "src/contexts.h"
23
#include "src/debug/debug-interface.h"
24
#include "src/execution.h"
binji's avatar
binji committed
25
#include "src/futex-emulation.h"
26
#include "src/globals.h"
27
#include "src/handles.h"
28
#include "src/heap/factory.h"
29
#include "src/heap/heap.h"
30
#include "src/isolate-allocator.h"
31
#include "src/isolate-data.h"
32
#include "src/messages.h"
33
#include "src/objects/code.h"
34
#include "src/objects/debug-objects.h"
35
#include "src/runtime/runtime.h"
36
#include "src/thread-id.h"
37
#include "src/unicode.h"
38

39 40 41 42 43 44 45
#ifdef V8_INTL_SUPPORT
#include "unicode/uversion.h"  // Define U_ICU_NAMESPACE.
namespace U_ICU_NAMESPACE {
class UObject;
}  // namespace U_ICU_NAMESPACE
#endif  // V8_INTL_SUPPORT

46
namespace v8 {
47 48 49 50 51

namespace base {
class RandomNumberGenerator;
}

52 53 54 55
namespace debug {
class ConsoleDelegate;
}

56 57
namespace internal {

58 59 60 61
namespace heap {
class HeapTester;
}  // namespace heap

62
class AddressToIndexHashMap;
63
class AstStringConstants;
64
class Bootstrapper;
65
class BuiltinsConstantsTableBuilder;
66
class CancelableTaskManager;
67
class CodeEventDispatcher;
68
class CodeTracer;
69
class CompilationCache;
70
class CompilationStatistics;
71
class CompilerDispatcher;
72 73
class ContextSlotCache;
class Counters;
74
class Debug;
75
class DeoptimizerData;
76
class DescriptorLookupCache;
77
class EternalHandles;
78
class ExternalCallbackScope;
79
class HandleScopeImplementer;
80
class HeapObjectToIndexHashMap;
81
class HeapProfiler;
82
class InnerPointerToCodeCache;
83
class Logger;
jarin@chromium.org's avatar
jarin@chromium.org committed
84
class MaterializedObjectStore;
85
class Microtask;
86
class MicrotaskQueue;
87
class OptimizingCompileDispatcher;
88
class PromiseOnStack;
89
class RegExpStack;
90
class RootVisitor;
91
class RuntimeProfiler;
92
class SaveContext;
93
class SetupIsolateDelegate;
94 95
class Simulator;
class StartupDeserializer;
96
class StandardFrame;
97 98 99 100
class StubCache;
class ThreadManager;
class ThreadState;
class ThreadVisitor;  // Defined in v8threads.h
101
class TracingCpuProfilerImpl;
jarin@chromium.org's avatar
jarin@chromium.org committed
102
class UnicodeCache;
103
struct ManagedPtrDestructor;
104

105
template <StateTag Tag> class VMState;
106

107 108 109
namespace interpreter {
class Interpreter;
}
110

111 112 113 114
namespace compiler {
class PerIsolateCompilerCache;
}

115
namespace wasm {
116
class WasmEngine;
117 118
}

119 120 121 122 123 124 125
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
  do {                                                 \
    Isolate* __isolate__ = (isolate);                  \
    DCHECK(!__isolate__->has_pending_exception());     \
    if (__isolate__->has_scheduled_exception()) {      \
      return __isolate__->PromoteScheduledException(); \
    }                                                  \
126
  } while (false)
127

128 129
// Macros for MaybeHandle.

130 131 132
#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
  do {                                                      \
    Isolate* __isolate__ = (isolate);                       \
133
    DCHECK(!__isolate__->has_pending_exception());          \
134 135 136 137
    if (__isolate__->has_scheduled_exception()) {           \
      __isolate__->PromoteScheduledException();             \
      return value;                                         \
    }                                                       \
138 139
  } while (false)

140 141 142
#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
#define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
  do {                                                                        \
    Isolate* __isolate__ = (isolate);                                         \
    if (!(call).ToLocal(&dst)) {                                              \
      DCHECK(__isolate__->has_scheduled_exception());                         \
      __isolate__->PromoteScheduledException();                               \
      return value;                                                           \
    }                                                                         \
  } while (false)

#define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
  do {                                                            \
    Isolate* __isolate__ = (isolate);                             \
    if ((call).IsNothing()) {                                     \
      DCHECK(__isolate__->has_scheduled_exception());             \
      __isolate__->PromoteScheduledException();                   \
      return value;                                               \
    }                                                             \
  } while (false)

163
/**
164
 * RETURN_RESULT_OR_FAILURE is used in functions with return type Object (such
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
 * as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
 * the contents of a MaybeHandle<X>, or the "exception" sentinel value.
 * Example usage:
 *
 * RUNTIME_FUNCTION(Runtime_Func) {
 *   ...
 *   RETURN_RESULT_OR_FAILURE(
 *       isolate,
 *       FunctionWithReturnTypeMaybeHandleX(...));
 * }
 *
 * If inside a function with return type MaybeHandle<X> use RETURN_ON_EXCEPTION
 * instead.
 * If inside a function with return type Handle<X>, or Maybe<X> use
 * RETURN_ON_EXCEPTION_VALUE instead.
 */
181 182 183 184 185 186 187 188 189 190
#define RETURN_RESULT_OR_FAILURE(isolate, call)      \
  do {                                               \
    Handle<Object> __result__;                       \
    Isolate* __isolate__ = (isolate);                \
    if (!(call).ToHandle(&__result__)) {             \
      DCHECK(__isolate__->has_pending_exception());  \
      return ReadOnlyRoots(__isolate__).exception(); \
    }                                                \
    DCHECK(!__isolate__->has_pending_exception());   \
    return *__result__;                              \
191 192
  } while (false)

193 194 195
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
  do {                                                               \
    if (!(call).ToHandle(&dst)) {                                    \
196
      DCHECK((isolate)->has_pending_exception());                    \
197 198 199 200
      return value;                                                  \
    }                                                                \
  } while (false)

201 202 203 204 205
#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)                \
  do {                                                                        \
    Isolate* __isolate__ = (isolate);                                         \
    ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,                  \
                                     ReadOnlyRoots(__isolate__).exception()); \
206
  } while (false)
207 208 209 210

#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())

211 212 213 214
#define THROW_NEW_ERROR(isolate, call, T)                       \
  do {                                                          \
    Isolate* __isolate__ = (isolate);                           \
    return __isolate__->Throw<T>(__isolate__->factory()->call); \
215 216
  } while (false)

217 218 219 220
#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
  do {                                                        \
    Isolate* __isolate__ = (isolate);                         \
    return __isolate__->Throw(*__isolate__->factory()->call); \
221 222
  } while (false)

223 224 225 226 227 228 229
#define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
  do {                                                     \
    Isolate* __isolate__ = (isolate);                      \
    __isolate__->Throw(*__isolate__->factory()->call);     \
    return value;                                          \
  } while (false)

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
/**
 * RETURN_ON_EXCEPTION_VALUE conditionally returns the given value when the
 * given MaybeHandle is empty. It is typically used in functions with return
 * type Maybe<X> or Handle<X>. Example usage:
 *
 * Handle<X> Func() {
 *   ...
 *   RETURN_ON_EXCEPTION_VALUE(
 *       isolate,
 *       FunctionWithReturnTypeMaybeHandleX(...),
 *       Handle<X>());
 *   // code to handle non exception
 *   ...
 * }
 *
 * Maybe<bool> Func() {
 *   ..
 *   RETURN_ON_EXCEPTION_VALUE(
 *       isolate,
 *       FunctionWithReturnTypeMaybeHandleX(...),
 *       Nothing<bool>);
 *   // code to handle non exception
 *   return Just(true);
 * }
 *
 * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
 * instead.
257
 * If inside a function with return type Object, use
258 259
 * RETURN_FAILURE_ON_EXCEPTION instead.
 */
260
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
261
  do {                                                             \
262
    if ((call).is_null()) {                                        \
263
      DCHECK((isolate)->has_pending_exception());                  \
264 265 266 267
      return value;                                                \
    }                                                              \
  } while (false)

268 269 270
/**
 * RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
 * the given MaybeHandle is empty; so it can only be used in functions with
271
 * return type Object, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
 * {...}. Example usage:
 *
 * RUNTIME_FUNCTION(Runtime_Func) {
 *   ...
 *   RETURN_FAILURE_ON_EXCEPTION(
 *       isolate,
 *       FunctionWithReturnTypeMaybeHandleX(...));
 *   // code to handle non exception
 *   ...
 * }
 *
 * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
 * instead.
 * If inside a function with return type Maybe<X> or Handle<X>, use
 * RETURN_ON_EXCEPTION_VALUE instead.
 */
288 289 290 291 292
#define RETURN_FAILURE_ON_EXCEPTION(isolate, call)                     \
  do {                                                                 \
    Isolate* __isolate__ = (isolate);                                  \
    RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                       \
                              ReadOnlyRoots(__isolate__).exception()); \
293
  } while (false);
294

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/**
 * RETURN_ON_EXCEPTION conditionally returns an empty MaybeHandle<T> if the
 * given MaybeHandle is empty. Use it to return immediately from a function with
 * return type MaybeHandle when an exception was thrown. Example usage:
 *
 * MaybeHandle<X> Func() {
 *   ...
 *   RETURN_ON_EXCEPTION(
 *       isolate,
 *       FunctionWithReturnTypeMaybeHandleY(...),
 *       X);
 *   // code to handle non exception
 *   ...
 * }
 *
310
 * If inside a function with return type Object, use
311 312 313 314
 * RETURN_FAILURE_ON_EXCEPTION instead.
 * If inside a function with return type
 * Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
 */
315
#define RETURN_ON_EXCEPTION(isolate, call, T)  \
316
  RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
317 318


319 320 321 322 323 324 325 326 327 328 329 330 331 332
#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
                              limit_check, increment, body)                \
  do {                                                                     \
    loop_var_type init;                                                    \
    loop_var_type for_with_handle_limit = loop_var;                        \
    Isolate* for_with_handle_isolate = isolate;                            \
    while (limit_check) {                                                  \
      for_with_handle_limit += 1024;                                       \
      HandleScope loop_scope(for_with_handle_isolate);                     \
      for (; limit_check && loop_var < for_with_handle_limit; increment) { \
        body                                                               \
      }                                                                    \
    }                                                                      \
  } while (false)
333

334 335 336 337
#define FIELD_ACCESSOR(type, name)                 \
  inline void set_##name(type v) { name##_ = v; }  \
  inline type name() const { return name##_; }

338 339 340 341 342
// Controls for manual embedded blob lifecycle management, used by tests and
// mksnapshot.
V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();

343
class ThreadLocalTop {
344
 public:
345 346
  // Does early low-level initialization that does not depend on the
  // isolate being present.
347
  ThreadLocalTop() = default;
348

349
  // Initialize the thread data.
350
  void Initialize(Isolate*);
351

352
  // Get the top C++ try catch handler or nullptr if none are registered.
353
  //
354
  // This method is not guaranteed to return an address that can be
355 356
  // used for comparison with addresses into the JS stack.  If such an
  // address is needed, use try_catch_handler_address.
357
  FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
358

359
  // Get the address of the top C++ try catch handler or nullptr if
360 361 362 363 364 365 366 367 368
  // none are registered.
  //
  // This method always returns an address that can be compared to
  // pointers into the JavaScript stack.  When running on actual
  // hardware, try_catch_handler_address and TryCatchHandler return
  // the same pointer.  When running on a simulator with a separate JS
  // stack, try_catch_handler_address returns a JS stack address that
  // corresponds to the place on the JS stack where the C++ handler
  // would have been if the stack were not separate.
369 370 371 372
  Address try_catch_handler_address() {
    return reinterpret_cast<Address>(
        v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
  }
373

374
  void Free();
375

376
  Isolate* isolate_ = nullptr;
377 378
  // The context where the current execution method is created and for variable
  // lookups.
379 380 381 382 383 384
  // TODO(3770): This field is read/written from generated code, so it would
  // be cleaner to make it an "Address raw_context_", and construct a Context
  // object in the getter. Same for {pending_handler_context_} below. In the
  // meantime, assert that the memory layout is the same.
  STATIC_ASSERT(sizeof(Context) == kPointerSize);
  Context context_;
385
  ThreadId thread_id_ = ThreadId::Invalid();
386
  Object pending_exception_;
387

388
  // Communication channel between Isolate::FindHandler and the CEntry.
389
  Context pending_handler_context_;
390 391 392 393
  Address pending_handler_entrypoint_ = kNullAddress;
  Address pending_handler_constant_pool_ = kNullAddress;
  Address pending_handler_fp_ = kNullAddress;
  Address pending_handler_sp_ = kNullAddress;
394 395

  // Communication channel between Isolate::Throw and message consumers.
396
  bool rethrowing_message_ = false;
397
  Object pending_message_obj_;
398

399 400 401
  // Use a separate value for scheduled exceptions to preserve the
  // invariants that hold about pending_exception.  We may want to
  // unify them later.
402
  Object scheduled_exception_;
403 404
  bool external_caught_exception_ = false;
  SaveContext* save_context_ = nullptr;
405 406

  // Stack.
407 408 409 410 411 412
  // The frame pointer of the top c entry frame.
  Address c_entry_fp_ = kNullAddress;
  // Try-blocks are chained through the stack.
  Address handler_ = kNullAddress;
  // C function that was called at c entry.
  Address c_function_ = kNullAddress;
413

414 415 416
  // Throwing an exception may cause a Promise rejection.  For this purpose
  // we keep track of a stack of nested promises and the corresponding
  // try-catch handlers.
417
  PromiseOnStack* promise_on_stack_ = nullptr;
418

419
#ifdef USE_SIMULATOR
420
  Simulator* simulator_ = nullptr;
421 422
#endif

423 424 425 426 427
  // The stack pointer of the bottom JS entry frame.
  Address js_entry_sp_ = kNullAddress;
  // The external callback we're currently in.
  ExternalCallbackScope* external_callback_scope_ = nullptr;
  StateTag current_vm_state_ = EXTERNAL;
428 429

  // Call back function to report unsafe JS accesses.
430
  v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
431

432 433 434
  // Address of the thread-local "thread in wasm" flag.
  Address thread_in_wasm_flag_address_ = kNullAddress;

435
 private:
436
  v8::TryCatch* try_catch_handler_ = nullptr;
437 438 439 440
};

#ifdef DEBUG

441 442 443 444
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
  V(CommentStatistic, paged_space_comments_statistics, \
    CommentStatistic::kMaxComments + 1)                \
  V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
445 446 447 448 449 450 451 452
#else

#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)

#endif

#define ISOLATE_INIT_ARRAY_LIST(V)                                             \
  /* SerializerDeserializer state. */                                          \
453
  V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
454 455 456 457 458
  V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
  V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
  V(int, suffix_table, (kBMMaxShift + 1))                                      \
  ISOLATE_INIT_DEBUG_ARRAY_LIST(V)

459
typedef std::vector<HeapObject> DebugObjectCache;
460

461 462 463
#define ISOLATE_INIT_LIST(V)                                                  \
  /* Assembler state. */                                                      \
  V(FatalErrorCallback, exception_behavior, nullptr)                          \
464
  V(OOMErrorCallback, oom_behavior, nullptr)                                  \
465 466
  V(LogEventCallback, event_logger, nullptr)                                  \
  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
467
  V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr)   \
468 469
  V(ExtensionCallback, wasm_module_callback, &NoExtension)                    \
  V(ExtensionCallback, wasm_instance_callback, &NoExtension)                  \
470
  V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr)      \
471
  V(WasmStreamingCallback, wasm_streaming_callback, nullptr)                  \
472
  V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr)       \
473 474 475
  /* State for Relocatable. */                                                \
  V(Relocatable*, relocatable_top, nullptr)                                   \
  V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
476
  V(Object, string_stream_current_security_token, Object())                   \
477
  V(const intptr_t*, api_external_references, nullptr)                        \
478 479
  V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
  V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
480
  V(MicrotaskQueue*, default_microtask_queue, nullptr)                        \
481 482 483 484 485 486 487
  V(CompilationStatistics*, turbo_statistics, nullptr)                        \
  V(CodeTracer*, code_tracer, nullptr)                                        \
  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
  V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
  V(const v8::StartupData*, snapshot_blob, nullptr)                           \
  V(int, code_and_metadata_size, 0)                                           \
  V(int, bytecode_and_metadata_size, 0)                                       \
488
  V(int, external_script_source_size, 0)                                      \
489 490
  /* true if being profiled. Causes collection of extra compile info. */      \
  V(bool, is_profiling, false)                                                \
jgruber's avatar
jgruber committed
491 492
  /* true if a trace is being formatted through Error.prepareStackTrace. */   \
  V(bool, formatting_stack_trace, false)                                      \
493
  /* Perform side effect checks on function call and API callbacks. */        \
494
  V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints)  \
495 496
  /* Current code coverage mode */                                            \
  V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort)  \
497
  V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone)   \
498
  V(int, last_stack_frame_info_id, 0)                                         \
499
  V(int, last_console_context_id, 0)                                          \
500 501
  V(v8_inspector::V8Inspector*, inspector, nullptr)                           \
  V(bool, next_v8_call_is_safe_for_termination, false)                        \
502 503
  V(bool, only_terminate_in_safe_scope, false)                                \
  V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info)
504

505 506 507 508
#define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
  inline void set_##name(type v) { thread_local_top_.name##_ = v; }  \
  inline type name() const { return thread_local_top_.name##_; }

509 510 511
#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
  type* name##_address() { return &thread_local_top_.name##_; }

512 513 514
// HiddenFactory exists so Isolate can privately inherit from it without making
// Factory's members available to Isolate directly.
class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
515

516
class Isolate final : private HiddenFactory {
517 518 519 520 521 522 523 524 525 526 527 528 529 530
  // These forward declarations are required to make the friend declarations in
  // PerIsolateThreadData work on some older versions of gcc.
  class ThreadDataTable;
  class EntryStackItem;
 public:
  // A thread has a PerIsolateThreadData instance for each isolate that it has
  // entered. That instance is allocated when the isolate is initially entered
  // and reused on subsequent entries.
  class PerIsolateThreadData {
   public:
    PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
        : isolate_(isolate),
          thread_id_(thread_id),
          stack_limit_(0),
531
          thread_state_(nullptr),
532
#if USE_SIMULATOR
533
          simulator_(nullptr),
534
#endif
535 536 537
          next_(nullptr),
          prev_(nullptr) {
    }
538
    ~PerIsolateThreadData();
539 540
    Isolate* isolate() const { return isolate_; }
    ThreadId thread_id() const { return thread_id_; }
541 542 543

    FIELD_ACCESSOR(uintptr_t, stack_limit)
    FIELD_ACCESSOR(ThreadState*, thread_state)
544

545
#if USE_SIMULATOR
546
    FIELD_ACCESSOR(Simulator*, simulator)
547 548 549
#endif

    bool Matches(Isolate* isolate, ThreadId thread_id) const {
550
      return isolate_ == isolate && thread_id_.Equals(thread_id);
551 552 553 554 555 556 557 558
    }

   private:
    Isolate* isolate_;
    ThreadId thread_id_;
    uintptr_t stack_limit_;
    ThreadState* thread_state_;

559
#if USE_SIMULATOR
560 561 562 563 564 565 566 567 568 569 570 571 572
    Simulator* simulator_;
#endif

    PerIsolateThreadData* next_;
    PerIsolateThreadData* prev_;

    friend class Isolate;
    friend class ThreadDataTable;
    friend class EntryStackItem;

    DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
  };

573 574
  static void InitializeOncePerProcess();

575 576
  // Creates Isolate object. Must be used instead of constructing Isolate with
  // new operator.
577 578
  static V8_EXPORT_PRIVATE Isolate* New(
      IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
579 580 581 582 583 584 585

  // Deletes Isolate object. Must be used instead of delete operator.
  // Destroys the non-default isolates.
  // Sets default isolate into "has_been_disposed" state rather then destroying,
  // for legacy API reasons.
  static void Delete(Isolate* isolate);

586 587 588
  // Returns allocation mode of this isolate.
  V8_INLINE IsolateAllocationMode isolate_allocation_mode();

589 590 591
  // Page allocator that must be used for allocating V8 heap pages.
  v8::PageAllocator* page_allocator();

592 593
  // Returns the PerIsolateThreadData for the current thread (or nullptr if one
  // is not currently set).
594 595
  static PerIsolateThreadData* CurrentPerIsolateThreadData() {
    return reinterpret_cast<PerIsolateThreadData*>(
596
        base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
597 598
  }

599 600
  // Returns the isolate inside which the current thread is running or nullptr.
  V8_INLINE static Isolate* TryGetCurrent() {
601
    DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
602
    return reinterpret_cast<Isolate*>(
603
        base::Thread::GetExistingThreadLocal(isolate_key_));
604 605 606 607 608
  }

  // Returns the isolate inside which the current thread is running.
  V8_INLINE static Isolate* Current() {
    Isolate* isolate = TryGetCurrent();
609
    DCHECK_NOT_NULL(isolate);
610 611 612
    return isolate;
  }

613 614 615
  // Get the isolate that the given HeapObject lives in, returning true on
  // success. If the object is not writable (i.e. lives in read-only space),
  // return false.
616
  inline static bool FromWritableHeapObject(HeapObject obj, Isolate** isolate);
617

618 619 620 621 622 623
  // Usually called by Init(), but can be called early e.g. to allow
  // testing components that require logging but not the whole
  // isolate.
  //
  // Safe to call more than once.
  void InitializeLoggingAndCounters();
624
  bool InitializeCounters();  // Returns false if already initialized.
625

626
  bool Init(StartupDeserializer* des);
627 628

  // True if at least one thread Enter'ed this isolate.
629
  bool IsInUse() { return entry_stack_ != nullptr; }
630

631
  void ReleaseSharedPtrs();
632

633 634
  void ClearSerializerData();

635 636
  bool LogObjectRelocation();

637 638 639 640 641 642 643 644 645 646 647
  // Initializes the current thread to run this Isolate.
  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
  // at the same time, this should be prevented using external locking.
  void Enter();

  // Exits the current thread. The previosuly entered Isolate is restored
  // for the thread.
  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
  // at the same time, this should be prevented using external locking.
  void Exit();

648 649 650 651
  // Find the PerThread for this particular (isolate, thread) combination.
  // If one does not yet exist, allocate a new one.
  PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();

652 653 654 655
  // Find the PerThread for this particular (isolate, thread) combination
  // If one does not yet exist, return null.
  PerIsolateThreadData* FindPerThreadDataForThisThread();

656 657 658 659
  // Find the PerThread for given (isolate, thread) combination
  // If one does not yet exist, return null.
  PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);

660 661 662 663
  // Discard the PerThread for this particular (isolate, thread) combination
  // If one does not yet exist, no-op.
  void DiscardPerThreadDataForThisThread();

664 665 666
  // Returns the key used to store the pointer to the current isolate.
  // Used internally for V8 threads that do not execute JavaScript but still
  // are part of the domain of an isolate (like the context switcher).
667
  static base::Thread::LocalStorageKey isolate_key() {
668 669
    return isolate_key_;
  }
670

671
  static base::Thread::LocalStorageKey per_isolate_thread_data_key();
672 673

  // Mutex for serializing access to break control structures.
674
  base::RecursiveMutex* break_access() { return &break_access_; }
675

676
  Address get_address_from_id(IsolateAddressId id);
677 678

  // Access to top context (where the current function object was created).
679 680 681
  Context context() { return thread_local_top_.context_; }
  inline void set_context(Context context);
  Context* context_address() { return &thread_local_top_.context_; }
682

683
  THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
684 685

  // Access to current thread id.
686
  THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
687 688

  // Interface to pending exception.
689 690
  inline Object pending_exception();
  inline void set_pending_exception(Object exception_obj);
691
  inline void clear_pending_exception();
692

693 694
  bool AreWasmThreadsEnabled(Handle<Context> context);

695
  THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
696

697
  inline bool has_pending_exception();
698

699
  THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
700
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
701
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
702 703 704
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)

705 706
  THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)

707
  v8::TryCatch* try_catch_handler() {
708
    return thread_local_top_.try_catch_handler();
709 710 711 712
  }
  bool* external_caught_exception_address() {
    return &thread_local_top_.external_caught_exception_;
  }
713

714
  THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
715

716
  inline void clear_pending_message();
717 718 719 720
  Address pending_message_obj_address() {
    return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
  }

721
  inline Object scheduled_exception();
722 723
  inline bool has_scheduled_exception();
  inline void clear_scheduled_exception();
724

725 726
  bool IsJavaScriptHandlerOnTop(Object exception);
  bool IsExternalHandlerOnTop(Object exception);
727

728
  inline bool is_catchable_by_javascript(Object exception);
729 730 731 732 733 734

  // JS execution stack (see frames.h).
  static Address c_entry_fp(ThreadLocalTop* thread) {
    return thread->c_entry_fp_;
  }
  static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
735
  Address c_function() { return thread_local_top_.c_function_; }
736 737 738 739 740

  inline Address* c_entry_fp_address() {
    return &thread_local_top_.c_entry_fp_;
  }
  inline Address* handler_address() { return &thread_local_top_.handler_; }
741 742 743
  inline Address* c_function_address() {
    return &thread_local_top_.c_function_;
  }
744

745 746 747
  // Bottom JS entry.
  Address js_entry_sp() {
    return thread_local_top_.js_entry_sp_;
748 749 750 751 752 753
  }
  inline Address* js_entry_sp_address() {
    return &thread_local_top_.js_entry_sp_;
  }

  // Returns the global object of the current context. It could be
754
  // a builtin object, or a JS global object.
755
  inline Handle<JSGlobalObject> global_object();
756 757

  // Returns the global proxy object of the current context.
758
  inline Handle<JSObject> global_proxy();
759 760 761 762 763 764 765 766

  static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
  void FreeThreadResources() { thread_local_top_.Free(); }

  // This method is called by the api after operations that may throw
  // exceptions.  If an exception was thrown and not handled by an external
  // handler the exception is scheduled to be rethrown when we return to running
  // JavaScript code.  If an exception is scheduled true is returned.
767
  V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool clear_exception);
768

769
  // Push and pop a promise and the current try-catch handler.
770
  void PushPromise(Handle<JSObject> promise);
771
  void PopPromise();
772 773 774

  // Return the relevant Promise that a throw/rejection pertains to, based
  // on the contents of the Promise stack
775 776
  Handle<Object> GetPromiseOnStackOnThrow();

777 778 779
  // Heuristically guess whether a Promise is handled by user catch handler
  bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);

780 781
  class ExceptionScope {
   public:
782 783
    // Scope currently can only be used for regular exceptions,
    // not termination exception.
784 785
    inline explicit ExceptionScope(Isolate* isolate);
    inline ~ExceptionScope();
786 787 788 789 790 791

   private:
    Isolate* isolate_;
    Handle<Object> pending_exception_;
  };

792 793 794 795 796
  void SetCaptureStackTraceForUncaughtExceptions(
      bool capture,
      int frame_limit,
      StackTrace::StackTraceOptions options);

797 798 799
  void SetAbortOnUncaughtExceptionCallback(
      v8::Isolate::AbortOnUncaughtExceptionCallback callback);

800
  enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
801
  void PrintCurrentStackTrace(FILE* out);
802 803
  void PrintStack(StringStream* accumulator,
                  PrintStackMode mode = kPrintStackVerbose);
804 805
  V8_EXPORT_PRIVATE void PrintStack(FILE* out,
                                    PrintStackMode mode = kPrintStackVerbose);
806
  Handle<String> StackTraceString();
807 808
  // Stores a stack trace in a stack-allocated temporary buffer which will
  // end up in the minidump for debugging purposes.
809 810 811 812
  V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
                                        void* ptr2 = nullptr,
                                        void* ptr3 = nullptr,
                                        void* ptr4 = nullptr);
813 814
  Handle<FixedArray> CaptureCurrentStackTrace(
      int frame_limit, StackTrace::StackTraceOptions options);
815
  Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
816
                                         FrameSkipMode mode,
817
                                         Handle<Object> caller);
818 819 820
  MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
      Handle<JSReceiver> error_object);
  MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
821 822
      Handle<JSReceiver> error_object, FrameSkipMode mode,
      Handle<Object> caller);
823
  Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
824

825 826
  Address GetAbstractPC(int* line, int* column);

827
  // Returns if the given context may access the given global object. If
828 829
  // the result is false, the pending exception is guaranteed to be
  // set.
830
  bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
831

832
  void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
833
  void ReportFailedAccessCheck(Handle<JSObject> receiver);
834 835

  // Exception throwing support. The caller should use the result
jwolfe's avatar
jwolfe committed
836
  // of Throw() as its return value.
837 838
  Object Throw(Object exception, MessageLocation* location = nullptr);
  Object ThrowIllegalOperation();
839 840

  template <typename T>
841 842
  V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
      Handle<Object> exception, MessageLocation* location = nullptr) {
843 844 845 846
    Throw(*exception, location);
    return MaybeHandle<T>();
  }

847 848 849 850 851
  void set_console_delegate(debug::ConsoleDelegate* delegate) {
    console_delegate_ = delegate;
  }
  debug::ConsoleDelegate* console_delegate() { return console_delegate_; }

852 853 854 855 856 857 858
  void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
    async_event_delegate_ = delegate;
    PromiseHookStateUpdated();
  }
  void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
                                   debug::DebugAsyncActionType);

859 860
  // Re-throw an exception.  This involves no error reporting since error
  // reporting was handled when the exception was thrown originally.
861
  Object ReThrow(Object exception);
862 863 864

  // Find the correct handler for the current pending exception. This also
  // clears and returns the current pending exception.
865
  Object UnwindAndFindHandler();
866

867
  // Tries to predict whether an exception will be caught. Note that this can
868
  // only produce an estimate, because it is undecidable whether a finally
869
  // clause will consume or re-throw an exception.
870 871 872 873
  enum CatchType {
    NOT_CAUGHT,
    CAUGHT_BY_JAVASCRIPT,
    CAUGHT_BY_EXTERNAL,
874 875 876
    CAUGHT_BY_DESUGARING,
    CAUGHT_BY_PROMISE,
    CAUGHT_BY_ASYNC_AWAIT
877
  };
878
  CatchType PredictExceptionCatcher();
879

880
  V8_EXPORT_PRIVATE void ScheduleThrow(Object exception);
881 882 883
  // Re-set pending message, script and positions reported to the TryCatch
  // back to the TLS for re-use when rethrowing.
  void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
884 885
  // Un-schedule an exception that was caught by a TryCatch handler.
  void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
886
  void ReportPendingMessages();
887 888 889 890 891
  void ReportPendingMessagesFromJavaScript();

  // Implements code shared between the two above methods
  void ReportPendingMessagesImpl(bool report_externally);

892 893
  // Return pending location if any or unfilled structure.
  MessageLocation GetMessageLocation();
894 895

  // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
896
  Object PromoteScheduledException();
897 898

  // Attempts to compute the current source location, storing the
899 900 901
  // result in the target out parameter. The source location is attached to a
  // Message object as the location which should be shown to the user. It's
  // typically the top-most meaningful location on the stack.
902
  bool ComputeLocation(MessageLocation* target);
903 904
  bool ComputeLocationFromException(MessageLocation* target,
                                    Handle<Object> exception);
905
  bool ComputeLocationFromStackTrace(MessageLocation* target,
906 907 908 909
                                     Handle<Object> exception);

  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
                                        MessageLocation* location);
910 911

  // Out of resource exception helpers.
912 913
  Object StackOverflow();
  Object TerminateExecution();
914
  void CancelTerminateExecution();
915

916 917
  void RequestInterrupt(InterruptCallback callback, void* data);
  void InvokeApiInterruptCallbacks();
918

919
  // Administration
920 921 922
  void Iterate(RootVisitor* v);
  void Iterate(RootVisitor* v, ThreadLocalTop* t);
  char* Iterate(RootVisitor* v, char* t);
923 924
  void IterateThread(ThreadVisitor* v, char* t);

925
  // Returns the current native context.
926
  inline Handle<NativeContext> native_context();
927
  inline NativeContext raw_native_context();
928

929 930
  Handle<Context> GetIncumbentContext();

931 932 933 934 935 936 937 938 939 940 941 942
  void RegisterTryCatchHandler(v8::TryCatch* that);
  void UnregisterTryCatchHandler(v8::TryCatch* that);

  char* ArchiveThread(char* to);
  char* RestoreThread(char* from);

  static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
  static const int kBMMaxShift = 250;        // See StringSearchBase.

  // Accessors.
#define GLOBAL_ACCESSOR(type, name, initialvalue)                       \
  inline type name() const {                                            \
943
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
944 945 946
    return name##_;                                                     \
  }                                                                     \
  inline void set_##name(type value) {                                  \
947
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
948 949 950 951 952 953 954
    name##_ = value;                                                    \
  }
  ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR

#define GLOBAL_ARRAY_ACCESSOR(type, name, length)                       \
  inline type* name() {                                                 \
955
    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
956 957 958 959 960
    return &(name##_)[0];                                               \
  }
  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR

961 962
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
  inline Handle<type> name();                            \
963
  inline bool is_##name(type##ArgType value);
964 965
  NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
966 967

  Bootstrapper* bootstrapper() { return bootstrapper_; }
968 969 970 971 972 973 974
  // Use for updating counters on a foreground thread.
  Counters* counters() { return async_counters().get(); }
  // Use for updating counters on a background thread.
  const std::shared_ptr<Counters>& async_counters() {
    // Make sure InitializeCounters() has been called.
    DCHECK_NOT_NULL(async_counters_.get());
    return async_counters_;
975
  }
976 977
  RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
  CompilationCache* compilation_cache() { return compilation_cache_; }
978 979 980
  Logger* logger() {
    // Call InitializeLoggingAndCounters() if logging is needed before
    // the isolate is fully initialized.
981
    DCHECK_NOT_NULL(logger_);
982 983
    return logger_;
  }
984 985
  StackGuard* stack_guard() { return &stack_guard_; }
  Heap* heap() { return &heap_; }
986

987 988
  const IsolateData* isolate_data() const { return &isolate_data_; }
  IsolateData* isolate_data() { return &isolate_data_; }
989

990 991 992 993
  // Generated code can embed this address to get access to the isolate-specific
  // data (for example, roots, external references, builtins, etc.).
  // The kRootRegister is set to this value.
  Address isolate_root() const { return isolate_data()->isolate_root(); }
994 995 996
  static size_t isolate_root_bias() {
    return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
  }
997

998
  RootsTable& roots_table() { return isolate_data()->roots(); }
999

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
  // A sub-region of the Isolate object that has "predictable" layout which
  // depends only on the pointer size and therefore it's guaranteed that there
  // will be no compatibility issues because of different compilers used for
  // snapshot generator and actual V8 code.
  // Thus, kRootRegister may be used to address any location that falls into
  // this region.
  // See IsolateData::AssertPredictableLayout() for details.
  base::AddressRegion root_register_addressable_region() const {
    return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
                               sizeof(IsolateData));
  }
1011

1012
  Object root(RootIndex index) { return Object(roots_table()[index]); }
1013 1014 1015 1016 1017

  Handle<Object> root_handle(RootIndex index) {
    return Handle<Object>(&roots_table()[index]);
  }

1018 1019 1020 1021 1022
  ExternalReferenceTable* external_reference_table() {
    DCHECK(isolate_data()->external_reference_table()->is_initialized());
    return isolate_data()->external_reference_table();
  }

1023
  Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
1024
  V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
1025

1026 1027
  StubCache* load_stub_cache() { return load_stub_cache_; }
  StubCache* store_stub_cache() { return store_stub_cache_; }
1028
  DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
1029 1030 1031 1032
  bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
  void set_deoptimizer_lazy_throw(bool value) {
    deoptimizer_lazy_throw_ = value;
  }
1033
  ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

  static uint32_t thread_in_wasm_flag_address_offset() {
    // For WebAssembly trap handlers there is a flag in thread-local storage
    // which indicates that the executing thread executes WebAssembly code. To
    // access this flag directly from generated code, we store a pointer to the
    // flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
    // here returns the offset of that member from {isolate_root()}.
    return static_cast<uint32_t>(
        OFFSET_OF(Isolate, thread_local_top_.thread_in_wasm_flag_address_) -
        isolate_root_bias());
  }

jarin@chromium.org's avatar
jarin@chromium.org committed
1046 1047 1048
  MaterializedObjectStore* materialized_object_store() {
    return materialized_object_store_;
  }
1049 1050 1051 1052 1053

  DescriptorLookupCache* descriptor_lookup_cache() {
    return descriptor_lookup_cache_;
  }

1054 1055
  HandleScopeData* handle_scope_data() { return &handle_scope_data_; }

1056
  HandleScopeImplementer* handle_scope_implementer() {
1057
    DCHECK(handle_scope_implementer_);
1058 1059 1060
    return handle_scope_implementer_;
  }

1061 1062
  UnicodeCache* unicode_cache() {
    return unicode_cache_;
1063 1064
  }

1065 1066 1067
  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
    return inner_pointer_to_code_cache_;
  }
1068 1069 1070

  GlobalHandles* global_handles() { return global_handles_; }

1071 1072
  EternalHandles* eternal_handles() { return eternal_handles_; }

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
  ThreadManager* thread_manager() { return thread_manager_; }

  unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
    return &jsregexp_uncanonicalize_;
  }

  unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
    return &jsregexp_canonrange_;
  }

  RuntimeState* runtime_state() { return &runtime_state_; }

  Builtins* builtins() { return &builtins_; }

  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
      regexp_macro_assembler_canonicalize() {
    return &regexp_macro_assembler_canonicalize_;
  }

  RegExpStack* regexp_stack() { return regexp_stack_; }

1094 1095 1096 1097 1098
  size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
  void IncreaseTotalRegexpCodeGenerated(int size) {
    total_regexp_code_generated_ += size;
  }

1099
  std::vector<int>* regexp_indices() { return &regexp_indices_; }
1100

1101 1102
  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
      interp_canonicalize_mapping() {
1103
    return &regexp_macro_assembler_canonicalize_;
1104 1105
  }

1106
  Debug* debug() { return debug_; }
1107

1108
  bool* is_profiling_address() { return &is_profiling_; }
1109 1110 1111
  CodeEventDispatcher* code_event_dispatcher() const {
    return code_event_dispatcher_.get();
  }
1112
  HeapProfiler* heap_profiler() const { return heap_profiler_; }
1113

1114
#ifdef DEBUG
1115
  static size_t non_disposed_isolates() { return non_disposed_isolates_; }
1116 1117
#endif

1118 1119 1120
  v8::internal::Factory* factory() {
    // Upcast to the privately inherited base-class using c-style casts to avoid
    // undefined behavior (as static_cast cannot cast across private bases).
1121
    // NOLINTNEXTLINE (google-readability-casting)
1122 1123
    return (v8::internal::Factory*)this;  // NOLINT(readability/casting)
  }
1124

1125
  static const int kJSRegexpStaticOffsetsVectorSize = 128;
1126

1127
  THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
1128

1129
  THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
1130

1131
  void SetData(uint32_t slot, void* data) {
1132
    DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1133
    isolate_data_.embedder_data_[slot] = data;
1134 1135
  }
  void* GetData(uint32_t slot) {
1136
    DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1137
    return isolate_data_.embedder_data_[slot];
1138
  }
1139

1140
  bool serializer_enabled() const { return serializer_enabled_; }
1141 1142 1143

  void enable_serializer() { serializer_enabled_ = true; }

1144
  bool snapshot_available() const {
1145
    return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1146
  }
1147

1148 1149 1150
  bool IsDead() { return has_fatal_error_; }
  void SignalFatalError() { has_fatal_error_ = true; }

Mythri's avatar
Mythri committed
1151
  bool use_optimizer();
1152

1153 1154
  bool initialized_from_snapshot() { return initialized_from_snapshot_; }

1155 1156
  bool NeedsSourcePositionsForProfiling() const;

1157 1158
  bool NeedsDetailedOptimizedCodeLineInfo() const;

1159 1160 1161 1162 1163 1164 1165 1166
  bool is_best_effort_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBestEffort;
  }

  bool is_precise_count_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kPreciseCount;
  }

1167 1168 1169 1170
  bool is_precise_binary_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kPreciseBinary;
  }

1171 1172 1173 1174
  bool is_block_count_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBlockCount;
  }

1175 1176 1177 1178 1179 1180 1181 1182
  bool is_block_binary_code_coverage() const {
    return code_coverage_mode() == debug::Coverage::kBlockBinary;
  }

  bool is_block_code_coverage() const {
    return is_block_count_code_coverage() || is_block_binary_code_coverage();
  }

1183 1184 1185 1186
  bool is_collecting_type_profile() const {
    return type_profile_mode() == debug::TypeProfile::kCollect;
  }

1187 1188 1189 1190 1191
  // Collect feedback vectors with data for code coverage or type profile.
  // Reset the list, when both code coverage and type profile are not
  // needed anymore. This keeps many feedback vectors alive, but code
  // coverage or type profile are used for debugging only and increase in
  // memory usage is expected.
1192
  void SetFeedbackVectorsForProfilingTools(Object value);
1193

1194
  void MaybeInitializeVectorListFromHeap();
1195

1196
  double time_millis_since_init() {
1197
    return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1198 1199
  }

1200 1201 1202 1203
  DateCache* date_cache() {
    return date_cache_;
  }

1204
  void set_date_cache(DateCache* date_cache);
1205

1206 1207
#ifdef V8_INTL_SUPPORT

1208 1209 1210 1211 1212 1213 1214
  const std::string& default_locale() { return default_locale_; }

  void set_default_locale(const std::string& locale) {
    DCHECK_EQ(default_locale_.length(), 0);
    default_locale_ = locale;
  }

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
  // enum to access the icu object cache.
  enum class ICUObjectCacheType{
      kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
      kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};

  icu::UObject* get_cached_icu_object(ICUObjectCacheType cache_type);
  void set_icu_object_in_cache(ICUObjectCacheType cache_type,
                               std::shared_ptr<icu::UObject> obj);
  void clear_cached_icu_object(ICUObjectCacheType cache_type);

1225 1226
#endif  // V8_INTL_SUPPORT

1227 1228
  static const int kProtectorValid = 1;
  static const int kProtectorInvalid = 0;
1229

1230
  inline bool IsArrayConstructorIntact();
1231 1232 1233 1234

  // The version with an explicit context parameter can be used when
  // Isolate::context is not set up, e.g. when calling directly into C++ from
  // CSA.
1235
  bool IsNoElementsProtectorIntact(Context context);
1236
  bool IsNoElementsProtectorIntact();
1237

1238
  bool IsArrayOrObjectOrStringPrototype(Object object);
1239

1240 1241
  inline bool IsArraySpeciesLookupChainIntact();
  inline bool IsTypedArraySpeciesLookupChainIntact();
1242
  inline bool IsRegExpSpeciesLookupChainIntact();
1243
  inline bool IsPromiseSpeciesLookupChainIntact();
1244
  bool IsIsConcatSpreadableLookupChainIntact();
1245
  bool IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver);
1246
  inline bool IsStringLengthOverflowIntact();
1247
  inline bool IsArrayIteratorLookupChainIntact();
1248

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
  // The MapIterator protector protects the original iteration behaviors of
  // Map.prototype.keys(), Map.prototype.values(), and Set.prototype.entries().
  // It does not protect the original iteration behavior of
  // Map.prototype[Symbol.iterator](). The protector is invalidated when:
  // * The 'next' property is set on an object where the property holder is the
  //   %MapIteratorPrototype% (e.g. because the object is that very prototype).
  // * The 'Symbol.iterator' property is set on an object where the property
  //   holder is the %IteratorPrototype%. Note that this also invalidates the
  //   SetIterator protector (see below).
  inline bool IsMapIteratorLookupChainIntact();

  // The SetIterator protector protects the original iteration behavior of
  // Set.prototype.keys(), Set.prototype.values(), Set.prototype.entries(),
  // and Set.prototype[Symbol.iterator](). The protector is invalidated when:
  // * The 'next' property is set on an object where the property holder is the
  //   %SetIteratorPrototype% (e.g. because the object is that very prototype).
  // * The 'Symbol.iterator' property is set on an object where the property
  //   holder is the %SetPrototype% OR %IteratorPrototype%. This means that
  //   setting Symbol.iterator on a MapIterator object can also invalidate the
  //   SetIterator protector, and vice versa, setting Symbol.iterator on a
  //   SetIterator object can also invalidate the MapIterator. This is an over-
  //   approximation for the sake of simplicity.
  inline bool IsSetIteratorLookupChainIntact();

  // The StringIteratorProtector protects the original string iteration behavior
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
  // for primitive strings. As long as the StringIteratorProtector is valid,
  // iterating over a primitive string is guaranteed to be unobservable from
  // user code and can thus be cut short. More specifically, the protector gets
  // invalidated as soon as either String.prototype[Symbol.iterator] or
  // String.prototype[Symbol.iterator]().next is modified. This guarantee does
  // not apply to string objects (as opposed to primitives), since they could
  // define their own Symbol.iterator.
  // String.prototype itself does not need to be protected, since it is
  // non-configurable and non-writable.
  inline bool IsStringIteratorLookupChainIntact();

1285 1286
  // Make sure we do check for detached array buffers.
  inline bool IsArrayBufferDetachingIntact();
1287

1288 1289 1290 1291
  // Disable promise optimizations if promise (debug) hooks have ever been
  // active.
  bool IsPromiseHookProtectorIntact();

1292 1293 1294 1295
  // Make sure a lookup of "resolve" on the %Promise% intrinsic object
  // yeidls the initial Promise.resolve method.
  bool IsPromiseResolveLookupChainIntact();

1296
  // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
1297 1298 1299 1300
  // initial %PromisePrototype% yields the initial method. In addition this
  // protector also guards the negative lookup of "then" on the intrinsic
  // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
  // undefined without triggering any side-effects.
1301
  bool IsPromiseThenLookupChainIntact();
1302
  bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
1303

1304 1305 1306 1307
  // On intent to set an element in object, make sure that appropriate
  // notifications occur if the set is on the elements of the array or
  // object prototype. Also ensure that changes to prototype chain between
  // Array and Object fire notifications.
1308 1309 1310
  void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
  void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
    UpdateNoElementsProtectorOnSetElement(object);
1311
  }
1312 1313
  void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
    UpdateNoElementsProtectorOnSetElement(object);
1314
  }
1315 1316
  void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
    UpdateNoElementsProtectorOnSetElement(object);
1317
  }
1318
  void InvalidateArrayConstructorProtector();
1319 1320
  void InvalidateArraySpeciesProtector();
  void InvalidateTypedArraySpeciesProtector();
1321
  void InvalidateRegExpSpeciesProtector();
1322
  void InvalidatePromiseSpeciesProtector();
1323
  void InvalidateIsConcatSpreadableProtector();
1324
  void InvalidateStringLengthOverflowProtector();
1325
  void InvalidateArrayIteratorProtector();
1326 1327
  void InvalidateMapIteratorProtector();
  void InvalidateSetIteratorProtector();
1328
  void InvalidateStringIteratorProtector();
1329
  void InvalidateArrayBufferDetachingProtector();
1330
  V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
1331
  void InvalidatePromiseResolveProtector();
1332
  void InvalidatePromiseThenProtector();
1333 1334 1335 1336

  // Returns true if array is the initial array prototype in any native context.
  bool IsAnyInitialArrayPrototype(Handle<JSArray> array);

1337
  void IterateDeferredHandles(RootVisitor* visitor);
1338 1339 1340
  void LinkDeferredHandles(DeferredHandles* deferred_handles);
  void UnlinkDeferredHandles(DeferredHandles* deferred_handles);

1341
#ifdef DEBUG
1342
  bool IsDeferredHandle(Address* location);
1343 1344
#endif  // DEBUG

1345 1346
  bool concurrent_recompilation_enabled() {
    // Thread is only available with flag enabled.
1347
    DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1348
           FLAG_concurrent_recompilation);
1349
    return optimizing_compile_dispatcher_ != nullptr;
1350 1351
  }

1352 1353
  OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
    return optimizing_compile_dispatcher_;
1354
  }
1355 1356 1357
  // Flushes all pending concurrent optimzation jobs from the optimizing
  // compile dispatcher's queue.
  void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
1358

1359 1360
  int id() const { return static_cast<int>(id_); }

1361
  CompilationStatistics* GetTurboStatistics();
1362
  CodeTracer* GetCodeTracer();
1363

1364
  void DumpAndResetStats();
1365

1366 1367
  void* stress_deopt_count_address() { return &stress_deopt_count_; }

1368 1369
  void set_force_slow_path(bool v) { force_slow_path_ = v; }
  bool force_slow_path() const { return force_slow_path_; }
1370 1371
  bool* force_slow_path_address() { return &force_slow_path_; }

1372 1373 1374 1375
  DebugInfo::ExecutionMode* debug_execution_mode_address() {
    return &debug_execution_mode_;
  }

1376
  V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
1377

1378 1379
  V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();

1380 1381 1382 1383
  // Generates a random number that is non-zero when masked
  // with the provided mask.
  int GenerateIdentityHash(uint32_t mask);

1384
  // Given an address occupied by a live code object, return that object.
1385
  Code FindCodeObject(Address a);
1386

1387 1388 1389 1390 1391 1392 1393 1394
  int NextOptimizationId() {
    int id = next_optimization_id_++;
    if (!Smi::IsValid(next_optimization_id_)) {
      next_optimization_id_ = 0;
    }
    return id;
  }

1395 1396 1397
  void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
  void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
                                   size_t heap_limit);
1398 1399 1400 1401
  void AddCallCompletedCallback(CallCompletedCallback callback);
  void RemoveCallCompletedCallback(CallCompletedCallback callback);
  void FireCallCompletedCallback();

1402 1403
  void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
  void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1404
  inline void FireBeforeCallEnteredCallback();
1405

1406
  void SetPromiseRejectCallback(PromiseRejectCallback callback);
1407
  void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1408 1409
                           v8::PromiseRejectEvent event);

1410 1411
  void RunMicrotasks();

1412 1413
  Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
                           bool private_symbol);
1414

1415 1416 1417
  void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
  void CountUsage(v8::Isolate::UseCounterFeature feature);

1418
  static std::string GetTurboCfgFileName(Isolate* isolate);
1419

1420
#if V8_SFI_HAS_UNIQUE_ID
1421 1422 1423
  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
  Address promise_hook_address() {
    return reinterpret_cast<Address>(&promise_hook_);
  }

  Address async_event_delegate_address() {
    return reinterpret_cast<Address>(&async_event_delegate_);
  }

  Address promise_hook_or_async_event_delegate_address() {
    return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
1434
  }
1435

1436 1437 1438 1439
  Address default_microtask_queue_address() {
    return reinterpret_cast<Address>(&default_microtask_queue_);
  }

1440 1441 1442 1443 1444
  Address promise_hook_or_debug_is_active_or_async_event_delegate_address() {
    return reinterpret_cast<Address>(
        &promise_hook_or_debug_is_active_or_async_event_delegate_);
  }

1445 1446 1447 1448
  Address handle_scope_implementer_address() {
    return reinterpret_cast<Address>(&handle_scope_implementer_);
  }

1449 1450 1451 1452
  void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
                              void* data);
  void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
                              Handle<JSArrayBuffer> array_buffer,
1453
                              size_t offset_in_bytes, int64_t value,
1454 1455 1456
                              double timeout_in_ms,
                              AtomicsWaitWakeHandle* stop_handle);

1457 1458 1459
  void SetPromiseHook(PromiseHook hook);
  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
                      Handle<Object> parent);
1460
  void PromiseHookStateUpdated();
1461

1462 1463 1464
  void AddDetachedContext(Handle<Context> context);
  void CheckDetachedContextsAfterGC();

1465
  std::vector<Object>* read_only_object_cache() {
1466 1467 1468
    return &read_only_object_cache_;
  }

1469
  std::vector<Object>* partial_snapshot_cache() {
1470 1471
    return &partial_snapshot_cache_;
  }
1472

1473 1474
  // Off-heap builtins cannot embed constants within the code object itself,
  // and thus need to load them from the root list.
1475
  // TODO(jgruber): Rename to IsGeneratingEmbeddedBuiltins().
1476
  bool ShouldLoadConstantsFromRootList() const {
1477 1478
    return FLAG_embedded_builtins &&
           builtins_constants_table_builder() != nullptr;
1479 1480
  }

1481 1482 1483
  BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
    return builtins_constants_table_builder_;
  }
1484

1485 1486 1487
  static const uint8_t* CurrentEmbeddedBlob();
  static uint32_t CurrentEmbeddedBlobSize();

1488 1489
  // These always return the same result as static methods above, but don't
  // access the global atomic variable (and thus *might be* slightly faster).
1490 1491
  const uint8_t* embedded_blob() const;
  uint32_t embedded_blob_size() const;
1492

1493 1494 1495 1496 1497 1498 1499
  void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
    array_buffer_allocator_ = allocator;
  }
  v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
    return array_buffer_allocator_;
  }

binji's avatar
binji committed
1500 1501
  FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }

1502 1503 1504
  CancelableTaskManager* cancelable_task_manager() {
    return cancelable_task_manager_;
  }
1505

1506
  const AstStringConstants* ast_string_constants() const {
1507 1508 1509
    return ast_string_constants_;
  }

1510 1511
  interpreter::Interpreter* interpreter() const { return interpreter_; }

1512 1513 1514 1515 1516 1517 1518 1519 1520
  compiler::PerIsolateCompilerCache* compiler_cache() const {
    return compiler_cache_;
  }
  void set_compiler_utils(compiler::PerIsolateCompilerCache* cache,
                          Zone* zone) {
    compiler_cache_ = cache;
    compiler_zone_ = zone;
  }

1521
  AccountingAllocator* allocator() { return allocator_; }
1522

1523 1524
  CompilerDispatcher* compiler_dispatcher() const {
    return compiler_dispatcher_;
1525 1526
  }

1527
  bool IsInAnyContext(Object object, uint32_t index);
1528

1529 1530
  void SetHostImportModuleDynamicallyCallback(
      HostImportModuleDynamicallyCallback callback);
1531
  MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1532
      Handle<Script> referrer, Handle<Object> specifier);
1533

1534 1535 1536 1537 1538
  void SetHostInitializeImportMetaObjectCallback(
      HostInitializeImportMetaObjectCallback callback);
  Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
      Handle<Module> module);

1539 1540
  void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
  MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
1541 1542
                                                   Handle<JSObject> Error,
                                                   Handle<JSArray> sites);
1543 1544
  bool HasPrepareStackTraceCallback() const;

hpayer's avatar
hpayer committed
1545 1546
  void SetRAILMode(RAILMode rail_mode);

1547 1548 1549 1550
  RAILMode rail_mode() { return rail_mode_.Value(); }

  double LoadStartTimeMs();

1551 1552 1553 1554 1555 1556
  void IsolateInForegroundNotification();

  void IsolateInBackgroundNotification();

  bool IsIsolateInBackground() { return is_isolate_in_background_; }

1557 1558 1559 1560 1561 1562
  void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }

  void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }

  bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }

1563
  PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1564

1565 1566 1567
  void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
  bool allow_atomics_wait() { return allow_atomics_wait_; }

1568
  // Register a finalizer to be called at isolate teardown.
1569
  void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1570

1571 1572
  // Removes a previously-registered shared object finalizer.
  void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1573

1574 1575 1576 1577 1578
  size_t elements_deletion_counter() { return elements_deletion_counter_; }
  void set_elements_deletion_counter(size_t value) {
    elements_deletion_counter_ = value;
  }

1579
  wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
1580
  void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
1581

1582 1583 1584 1585 1586 1587 1588 1589
  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
    return top_backup_incumbent_scope_;
  }
  void set_top_backup_incumbent_scope(
      const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
    top_backup_incumbent_scope_ = top_backup_incumbent_scope;
  }

1590 1591
  void SetIdle(bool is_idle);

1592
 private:
1593
  explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
1594
  ~Isolate();
1595 1596

  void CheckIsolateLayout();
1597

1598 1599
  class ThreadDataTable {
   public:
1600
    ThreadDataTable() = default;
1601

1602
    PerIsolateThreadData* Lookup(ThreadId thread_id);
1603 1604
    void Insert(PerIsolateThreadData* data);
    void Remove(PerIsolateThreadData* data);
1605
    void RemoveAllThreads();
1606 1607

   private:
1608 1609 1610 1611 1612 1613 1614
    struct Hasher {
      std::size_t operator()(const ThreadId& t) const {
        return std::hash<int>()(t.ToInteger());
      }
    };

    std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
1615 1616 1617 1618 1619 1620
  };

  // These items form a stack synchronously with threads Enter'ing and Exit'ing
  // the Isolate. The top of the stack points to a thread which is currently
  // running the Isolate. When the stack is empty, the Isolate is considered
  // not entered by any thread and can be Disposed.
thakis's avatar
thakis committed
1621
  // If the same thread enters the Isolate more than once, the entry_count_
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
  // is incremented rather then a new item pushed to the stack.
  class EntryStackItem {
   public:
    EntryStackItem(PerIsolateThreadData* previous_thread_data,
                   Isolate* previous_isolate,
                   EntryStackItem* previous_item)
        : entry_count(1),
          previous_thread_data(previous_thread_data),
          previous_isolate(previous_isolate),
          previous_item(previous_item) { }

    int entry_count;
    PerIsolateThreadData* previous_thread_data;
    Isolate* previous_isolate;
    EntryStackItem* previous_item;

1638
   private:
1639 1640 1641
    DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
  };

1642 1643
  static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
  static base::Thread::LocalStorageKey isolate_key_;
1644

1645
  // A global counter for all generated Isolates, might overflow.
1646
  static base::Atomic32 isolate_counter_;
1647

1648 1649 1650 1651
#if DEBUG
  static base::Atomic32 isolate_key_created_;
#endif

1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
  void Deinit();

  static void SetIsolateThreadLocals(Isolate* isolate,
                                     PerIsolateThreadData* data);

  void InitializeThreadLocal();

  void MarkCompactPrologue(bool is_compacting,
                           ThreadLocalTop* archived_thread_data);
  void MarkCompactEpilogue(bool is_compacting,
                           ThreadLocalTop* archived_thread_data);

  void FillCache();

1666 1667 1668 1669 1670
  // Propagate pending exception message to the v8::TryCatch.
  // If there is no external try-catch or message was successfully propagated,
  // then return true.
  bool PropagatePendingExceptionToExternalTryCatch();

1671 1672
  void SetTerminationOnExternalTryCatch();

1673 1674 1675
  void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
                                           Handle<JSPromise> promise);

hpayer's avatar
hpayer committed
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
  const char* RAILModeName(RAILMode rail_mode) const {
    switch (rail_mode) {
      case PERFORMANCE_RESPONSE:
        return "RESPONSE";
      case PERFORMANCE_ANIMATION:
        return "ANIMATION";
      case PERFORMANCE_IDLE:
        return "IDLE";
      case PERFORMANCE_LOAD:
        return "LOAD";
    }
    return "";
  }

1690 1691 1692 1693 1694
  // This class contains a collection of data accessible from both C++ runtime
  // and compiled code (including assembly stubs, builtins, interpreter bytecode
  // handlers and optimized code).
  IsolateData isolate_data_;

1695
  std::unique_ptr<IsolateAllocator> isolate_allocator_;
1696
  Heap heap_;
1697

1698
  base::Atomic32 id_;
1699 1700 1701 1702 1703 1704 1705
  EntryStackItem* entry_stack_ = nullptr;
  int stack_trace_nesting_level_ = 0;
  StringStream* incomplete_message_ = nullptr;
  Address isolate_addresses_[kIsolateAddressCount + 1] = {};
  Bootstrapper* bootstrapper_ = nullptr;
  RuntimeProfiler* runtime_profiler_ = nullptr;
  CompilationCache* compilation_cache_ = nullptr;
1706
  std::shared_ptr<Counters> async_counters_;
1707
  base::RecursiveMutex break_access_;
1708
  Logger* logger_ = nullptr;
1709
  StackGuard stack_guard_;
1710 1711 1712 1713 1714
  StubCache* load_stub_cache_ = nullptr;
  StubCache* store_stub_cache_ = nullptr;
  DeoptimizerData* deoptimizer_data_ = nullptr;
  bool deoptimizer_lazy_throw_ = false;
  MaterializedObjectStore* materialized_object_store_ = nullptr;
1715
  ThreadLocalTop thread_local_top_;
1716 1717 1718 1719 1720
  bool capture_stack_trace_for_uncaught_exceptions_ = false;
  int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
  StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
      StackTrace::kOverview;
  DescriptorLookupCache* descriptor_lookup_cache_ = nullptr;
1721
  HandleScopeData handle_scope_data_;
1722 1723 1724 1725 1726 1727 1728
  HandleScopeImplementer* handle_scope_implementer_ = nullptr;
  UnicodeCache* unicode_cache_ = nullptr;
  AccountingAllocator* allocator_ = nullptr;
  InnerPointerToCodeCache* inner_pointer_to_code_cache_ = nullptr;
  GlobalHandles* global_handles_ = nullptr;
  EternalHandles* eternal_handles_ = nullptr;
  ThreadManager* thread_manager_ = nullptr;
1729 1730
  RuntimeState runtime_state_;
  Builtins builtins_;
1731
  SetupIsolateDelegate* setup_delegate_ = nullptr;
1732 1733 1734 1735
  unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
  unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
  unibrow::Mapping<unibrow::Ecma262Canonicalize>
      regexp_macro_assembler_canonicalize_;
1736
  RegExpStack* regexp_stack_ = nullptr;
1737
  std::vector<int> regexp_indices_;
1738 1739 1740
  DateCache* date_cache_ = nullptr;
  base::RandomNumberGenerator* random_number_generator_ = nullptr;
  base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
1741
  base::AtomicValue<RAILMode> rail_mode_;
1742 1743 1744 1745 1746
  v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
  void* atomics_wait_callback_data_ = nullptr;
  PromiseHook promise_hook_ = nullptr;
  HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
      nullptr;
1747
  HostInitializeImportMetaObjectCallback
1748
      host_initialize_import_meta_object_callback_ = nullptr;
1749
  base::Mutex rail_mutex_;
1750
  double load_start_time_ms_ = 0;
1751

1752
#ifdef V8_INTL_SUPPORT
1753
  std::string default_locale_;
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763

  struct ICUObjectCacheTypeHash {
    std::size_t operator()(ICUObjectCacheType a) const {
      return static_cast<std::size_t>(a);
    }
  };
  std::unordered_map<ICUObjectCacheType, std::shared_ptr<icu::UObject>,
                     ICUObjectCacheTypeHash>
      icu_object_cache_;

1764 1765
#endif  // V8_INTL_SUPPORT

1766
  // Whether the isolate has been created for snapshotting.
1767
  bool serializer_enabled_ = false;
1768

1769
  // True if fatal error has been signaled for this isolate.
1770
  bool has_fatal_error_ = false;
1771

1772
  // True if this isolate was initialized from a snapshot.
1773
  bool initialized_from_snapshot_ = false;
1774

1775
  // TODO(ishell): remove
1776
  // True if ES2015 tail call elimination feature is enabled.
1777
  bool is_tail_call_elimination_enabled_ = true;
1778

1779 1780
  // True if the isolate is in background. This flag is used
  // to prioritize between memory usage and latency.
1781
  bool is_isolate_in_background_ = false;
1782

1783 1784
  // True if the isolate is in memory savings mode. This flag is used to
  // favor memory over runtime performance.
1785
  bool memory_savings_mode_active_ = false;
1786

1787
  // Time stamp at initialization.
1788
  double time_millis_at_init_ = 0;
1789

1790
#ifdef DEBUG
1791
  static std::atomic<size_t> non_disposed_isolates_;
1792

1793 1794 1795
  JSObject::SpillInformation js_spill_information_;
#endif

1796 1797
  Debug* debug_ = nullptr;
  HeapProfiler* heap_profiler_ = nullptr;
1798
  std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1799

1800
  const AstStringConstants* ast_string_constants_ = nullptr;
1801

1802
  interpreter::Interpreter* interpreter_ = nullptr;
1803

1804 1805 1806
  compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
  Zone* compiler_zone_ = nullptr;

1807
  CompilerDispatcher* compiler_dispatcher_ = nullptr;
1808

1809 1810 1811
  typedef std::pair<InterruptCallback, void*> InterruptEntry;
  std::queue<InterruptEntry> api_interrupts_queue_;

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
#define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
  type name##_;
  ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
#undef GLOBAL_BACKING_STORE

#define GLOBAL_ARRAY_BACKING_STORE(type, name, length)                         \
  type name##_[length];
  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
#undef GLOBAL_ARRAY_BACKING_STORE

#ifdef DEBUG
  // This class is huge and has a number of fields controlled by
  // preprocessor defines. Make sure the offsets of these fields agree
  // between compilation units.
1826
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1827
  V8_EXPORT_PRIVATE static const intptr_t name##_debug_offset_;
1828 1829 1830 1831 1832
  ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
  ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif

1833 1834
  DeferredHandles* deferred_handles_head_ = nullptr;
  OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
1835

1836
  // Counts deopt points if deopt_every_n_times is enabled.
1837
  unsigned int stress_deopt_count_ = 0;
1838

1839
  bool force_slow_path_ = false;
1840

1841
  int next_optimization_id_ = 0;
1842

1843
#if V8_SFI_HAS_UNIQUE_ID
1844
  int next_unique_sfi_id_ = 0;
1845 1846
#endif

1847 1848
  // Vector of callbacks before a Call starts execution.
  std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1849

1850 1851
  // Vector of callbacks when a Call completes.
  std::vector<CallCompletedCallback> call_completed_callbacks_;
1852

1853
  v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
1854

1855 1856
  std::vector<Object> read_only_object_cache_;
  std::vector<Object> partial_snapshot_cache_;
1857

1858 1859 1860
  // Used during builtins compilation to build the builtins constants table,
  // which is stored on the root list prior to serialization.
  BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
1861

1862 1863 1864 1865
  void InitializeDefaultEmbeddedBlob();
  void CreateAndSetEmbeddedBlob();
  void TearDownEmbeddedBlob();

1866
  void SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size);
1867
  void ClearEmbeddedBlob();
1868

1869 1870
  const uint8_t* embedded_blob_ = nullptr;
  uint32_t embedded_blob_size_ = 0;
1871

1872
  v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
1873

binji's avatar
binji committed
1874 1875
  FutexWaitListNode futex_wait_list_node_;

1876
  CancelableTaskManager* cancelable_task_manager_ = nullptr;
1877

1878 1879
  debug::ConsoleDelegate* console_delegate_ = nullptr;

1880 1881
  debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
  bool promise_hook_or_async_event_delegate_ = false;
1882
  bool promise_hook_or_debug_is_active_or_async_event_delegate_ = false;
1883 1884
  int async_task_count_ = 0;

1885
  v8::Isolate::AbortOnUncaughtExceptionCallback
1886
      abort_on_uncaught_exception_callback_ = nullptr;
1887

1888
  bool allow_atomics_wait_ = true;
1889

1890
  base::Mutex managed_ptr_destructors_mutex_;
1891
  ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
1892

1893
  size_t total_regexp_code_generated_ = 0;
1894

1895 1896
  size_t elements_deletion_counter_ = 0;

1897
  std::shared_ptr<wasm::WasmEngine> wasm_engine_;
1898

1899 1900
  std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;

1901 1902 1903 1904
  // The top entry of the v8::Context::BackupIncumbentScope stack.
  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
      nullptr;

1905 1906
  PrepareStackTraceCallback prepare_stack_trace_callback_ = nullptr;

1907 1908 1909 1910 1911 1912
  // TODO(kenton@cloudflare.com): This mutex can be removed if
  // thread_data_table_ is always accessed under the isolate lock. I do not
  // know if this is the case, so I'm preserving it for now.
  base::Mutex thread_data_table_mutex_;
  ThreadDataTable thread_data_table_;

1913 1914 1915 1916 1917 1918
  // Delete new/delete operators to ensure that Isolate::New() and
  // Isolate::Delete() are used for Isolate creation and deletion.
  void* operator new(size_t, void* ptr) { return ptr; }
  void* operator new(size_t) = delete;
  void operator delete(void*) = delete;

1919
  friend class heap::HeapTester;
1920
  friend class TestSerializer;
1921 1922 1923 1924 1925

  DISALLOW_COPY_AND_ASSIGN(Isolate);
};


1926 1927 1928 1929
#undef FIELD_ACCESSOR
#undef THREAD_LOCAL_TOP_ACCESSOR


1930 1931
class PromiseOnStack {
 public:
1932 1933
  PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
      : promise_(promise), prev_(prev) {}
1934 1935 1936 1937 1938 1939 1940 1941 1942
  Handle<JSObject> promise() { return promise_; }
  PromiseOnStack* prev() { return prev_; }

 private:
  Handle<JSObject> promise_;
  PromiseOnStack* prev_;
};


1943 1944 1945
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
1946
class V8_EXPORT_PRIVATE SaveContext {
1947
 public:
1948 1949
  explicit SaveContext(Isolate* isolate);
  ~SaveContext();
1950 1951 1952 1953 1954

  Handle<Context> context() { return context_; }
  SaveContext* prev() { return prev_; }

  // Returns true if this save context is below a given JavaScript frame.
1955
  bool IsBelowFrame(StandardFrame* frame);
1956 1957

 private:
1958
  Isolate* const isolate_;
1959
  Handle<Context> context_;
1960
  SaveContext* const prev_;
1961
  Address c_entry_fp_;
1962 1963
};

1964
class AssertNoContextChange {
1965 1966
#ifdef DEBUG
 public:
1967
  explicit AssertNoContextChange(Isolate* isolate);
1968
  ~AssertNoContextChange() {
1969
    DCHECK(isolate_->context() == *context_);
1970 1971 1972
  }

 private:
1973
  Isolate* isolate_;
1974 1975 1976
  Handle<Context> context_;
#else
 public:
1977
  explicit AssertNoContextChange(Isolate* isolate) { }
1978 1979 1980
#endif
};

1981
class ExecutionAccess {
1982 1983 1984 1985 1986 1987
 public:
  explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
    Lock(isolate);
  }
  ~ExecutionAccess() { Unlock(isolate_); }

1988 1989
  static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
  static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1990 1991

  static bool TryLock(Isolate* isolate) {
1992
    return isolate->break_access()->TryLock();
1993 1994 1995 1996 1997 1998 1999
  }

 private:
  Isolate* isolate_;
};


2000
// Support for checking for stack-overflows.
2001
class StackLimitCheck {
2002 2003 2004
 public:
  explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }

2005
  // Use this to check for stack-overflows in C++ code.
2006
  bool HasOverflowed() const {
2007
    StackGuard* stack_guard = isolate_->stack_guard();
2008
    return GetCurrentStackPosition() < stack_guard->real_climit();
2009
  }
2010

2011 2012 2013 2014 2015 2016
  // Use this to check for interrupt request in C++ code.
  bool InterruptRequested() {
    StackGuard* stack_guard = isolate_->stack_guard();
    return GetCurrentStackPosition() < stack_guard->climit();
  }

2017
  // Use this to check for stack-overflow when entering runtime from JS code.
2018
  bool JsHasOverflowed(uintptr_t gap = 0) const;
2019

2020 2021 2022 2023
 private:
  Isolate* isolate_;
};

2024 2025 2026 2027 2028 2029 2030
#define STACK_CHECK(isolate, result_value) \
  do {                                     \
    StackLimitCheck stack_check(isolate);  \
    if (stack_check.HasOverflowed()) {     \
      isolate->StackOverflow();            \
      return result_value;                 \
    }                                      \
2031
  } while (false)
2032

2033 2034 2035
// Scope intercepts only interrupt which is part of its interrupt_mask and does
// not affect other interrupts.
class InterruptsScope {
2036
 public:
2037
  enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
2038

2039 2040 2041
  virtual ~InterruptsScope() {
    if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
  }
2042

2043 2044 2045
  // Find the scope that intercepts this interrupt.
  // It may be outermost PostponeInterruptsScope or innermost
  // SafeForInterruptsScope if any.
2046 2047 2048
  // Return whether the interrupt has been intercepted.
  bool Intercept(StackGuard::InterruptFlag flag);

2049 2050 2051 2052 2053
  InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
      : stack_guard_(isolate->stack_guard()),
        intercept_mask_(intercept_mask),
        intercepted_flags_(0),
        mode_(mode) {
2054
    if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
2055 2056
  }

2057 2058
 private:
  StackGuard* stack_guard_;
2059 2060
  int intercept_mask_;
  int intercepted_flags_;
2061 2062
  Mode mode_;
  InterruptsScope* prev_;
2063 2064

  friend class StackGuard;
2065 2066
};

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
// Support for temporarily postponing interrupts. When the outermost
// postpone scope is left the interrupts will be re-enabled and any
// interrupts that occurred while in the scope will be taken into
// account.
class PostponeInterruptsScope : public InterruptsScope {
 public:
  PostponeInterruptsScope(Isolate* isolate,
                          int intercept_mask = StackGuard::ALL_INTERRUPTS)
      : InterruptsScope(isolate, intercept_mask,
                        InterruptsScope::kPostponeInterrupts) {}
2077
  ~PostponeInterruptsScope() override = default;
2078 2079 2080 2081 2082 2083 2084
};

// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
// innermost scope is SafeForInterruptsScope ignoring any outer
// PostponeInterruptsScopes.
class SafeForInterruptsScope : public InterruptsScope {
 public:
2085 2086
  SafeForInterruptsScope(Isolate* isolate,
                         int intercept_mask = StackGuard::ALL_INTERRUPTS)
2087 2088
      : InterruptsScope(isolate, intercept_mask,
                        InterruptsScope::kRunInterrupts) {}
2089
  ~SafeForInterruptsScope() override = default;
2090
};
2091

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
class StackTraceFailureMessage {
 public:
  explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
                                    void* ptr2 = nullptr, void* ptr3 = nullptr,
                                    void* ptr4 = nullptr);

  V8_NOINLINE void Print() volatile;

  static const uintptr_t kStartMarker = 0xdecade30;
  static const uintptr_t kEndMarker = 0xdecade31;
  static const int kStacktraceBufferSize = 32 * KB;

  uintptr_t start_marker_ = kStartMarker;
  void* isolate_;
  void* ptr1_;
  void* ptr2_;
  void* ptr3_;
  void* ptr4_;
  void* code_objects_[4];
  char js_stack_trace_[kStacktraceBufferSize];
  uintptr_t end_marker_ = kEndMarker;
};

2115 2116
}  // namespace internal
}  // namespace v8
2117 2118

#endif  // V8_ISOLATE_H_