Commit 8039e271 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[tiering] Move feedback allocation and SP tiering into TieringManager

- Move feedback / SP into TieringManager.
- Rename OnInterruptTickFromBytecode to OnInterruptTick (it's called
  from both Code and Bytecode).
- Remove the SealHandleScope / HandleScope dance.

Bug: v8:7700
Change-Id: I800542deb3805097a589b7766907efb6f40c7dae
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3467875Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79220}
parent c832c6b1
...@@ -474,7 +474,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -474,7 +474,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ Push(kJSFunctionRegister); __ Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size); __ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -557,7 +557,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -557,7 +557,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ masm()->PushArgument(kJSFunctionRegister); __ masm()->PushArgument(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(kInterpreterAccumulatorRegister, params_size); __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -586,7 +586,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel( ...@@ -586,7 +586,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
if (weight < 0) { if (weight < 0) {
SaveAccumulatorScope accumulator_scope(&basm_); SaveAccumulatorScope accumulator_scope(&basm_);
CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode, CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck,
__ FunctionOperand()); __ FunctionOperand());
} }
} }
......
...@@ -428,7 +428,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -428,7 +428,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset)); __ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size); __ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -449,7 +449,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -449,7 +449,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -461,7 +461,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -461,7 +461,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -459,7 +459,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -459,7 +459,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -478,7 +478,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -478,7 +478,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -568,7 +568,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -568,7 +568,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister); __ LoadFunction(kJSFunctionRegister);
__ Push(kJSFunctionRegister); __ Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size); __ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -440,7 +440,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -440,7 +440,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ LoadContext(kContextRegister); __ LoadContext(kContextRegister);
__ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset)); __ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size); __ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size); __ masm()->SmiUntag(params_size);
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include "src/execution/tiering-manager.h" #include "src/execution/tiering-manager.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/baseline/baseline-batch-compiler.h"
#include "src/baseline/baseline.h"
#include "src/codegen/assembler.h" #include "src/codegen/assembler.h"
#include "src/codegen/compilation-cache.h" #include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h" #include "src/codegen/compiler.h"
...@@ -236,7 +238,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function, ...@@ -236,7 +238,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
TieringManager::OnInterruptTickScope::OnInterruptTickScope( TieringManager::OnInterruptTickScope::OnInterruptTickScope(
TieringManager* profiler) TieringManager* profiler)
: handle_scope_(profiler->isolate_), profiler_(profiler) { : profiler_(profiler) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.MarkCandidatesForOptimization"); "V8.MarkCandidatesForOptimization");
} }
...@@ -245,28 +247,72 @@ TieringManager::OnInterruptTickScope::~OnInterruptTickScope() { ...@@ -245,28 +247,72 @@ TieringManager::OnInterruptTickScope::~OnInterruptTickScope() {
profiler_->any_ic_changed_ = false; profiler_->any_ic_changed_ = false;
} }
void TieringManager::OnInterruptTick(JavaScriptFrame* frame) { void TieringManager::OnInterruptTick(Handle<JSFunction> function) {
if (!isolate_->use_optimizer()) return; IsCompiledScope is_compiled_scope(
OnInterruptTickScope scope(this); function->shared().is_compiled_scope(isolate_));
// Remember whether the function had a vector at this point. This is relevant
// later since the configuration 'Ignition without a vector' can be
// considered a tier on its own. We begin tiering up to tiers higher than
// Sparkplug only when reaching this point *with* a feedback vector.
const bool had_feedback_vector = function->has_feedback_vector();
// Ensure that the feedback vector has been allocated, and reset the
// interrupt budget in preparation for the next tick.
if (had_feedback_vector) {
function->SetInterruptBudget();
} else {
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
DCHECK(is_compiled_scope.is_compiled());
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
function->feedback_vector().set_invocation_count(1, kRelaxedStore);
}
JSFunction function = frame->function(); DCHECK(function->has_feedback_vector());
CodeKind code_kind = function.GetActiveTier().value(); DCHECK(function->shared().is_compiled());
DCHECK(function->shared().HasBytecodeArray());
// TODO(jgruber): Consider integrating this into a linear tiering system
// controlled by OptimizationMarker in which the order is always
// Ignition-Sparkplug-Turbofan, and only a single tierup is requested at
// once.
// It's unclear whether this is possible and/or makes sense - for example,
// batching compilation can introduce arbitrary latency between the SP
// compile request and fulfillment, which doesn't work with strictly linear
// tiering.
if (CanCompileWithBaseline(isolate_, function->shared()) &&
!function->ActiveTierIsBaseline()) {
if (FLAG_baseline_batch_compilation) {
isolate_->baseline_batch_compiler()->EnqueueFunction(function);
} else {
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate_));
Compiler::CompileBaseline(isolate_, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
}
DCHECK(function.shared().is_compiled()); // We only tier up beyond sparkplug if we already had a feedback vector.
DCHECK(function.shared().HasBytecodeArray()); if (!had_feedback_vector) return;
DCHECK_IMPLIES(CodeKindIsOptimizedJSFunction(code_kind), // Don't tier up if Turbofan is disabled.
function.has_feedback_vector()); // TODO(jgruber): Update this for a multi-tier world.
if (!function.has_feedback_vector()) return; if (V8_UNLIKELY(!isolate_->use_optimizer())) return;
function.feedback_vector().SaturatingIncrementProfilerTicks(); // --- We've decided to proceed for now. ---
MaybeOptimizeFrame(function, frame, code_kind);
} DisallowGarbageCollection no_gc;
OnInterruptTickScope scope(this);
JSFunction function_obj = *function;
function_obj.feedback_vector().SaturatingIncrementProfilerTicks();
void TieringManager::OnInterruptTickFromBytecode() {
JavaScriptFrameIterator it(isolate_); JavaScriptFrameIterator it(isolate_);
DCHECK(it.frame()->is_unoptimized()); DCHECK(it.frame()->is_unoptimized());
OnInterruptTick(it.frame()); const CodeKind code_kind = function_obj.GetActiveTier().value();
MaybeOptimizeFrame(function_obj, it.frame(), code_kind);
} }
} // namespace internal } // namespace internal
......
...@@ -24,7 +24,7 @@ class TieringManager { ...@@ -24,7 +24,7 @@ class TieringManager {
public: public:
explicit TieringManager(Isolate* isolate) : isolate_(isolate) {} explicit TieringManager(Isolate* isolate) : isolate_(isolate) {}
void OnInterruptTickFromBytecode(); void OnInterruptTick(Handle<JSFunction> function);
void NotifyICChanged() { any_ic_changed_ = true; } void NotifyICChanged() { any_ic_changed_ = true; }
...@@ -32,9 +32,6 @@ class TieringManager { ...@@ -32,9 +32,6 @@ class TieringManager {
int nesting_levels = 1); int nesting_levels = 1);
private: private:
// Helper function called from OnInterruptTick*
void OnInterruptTick(JavaScriptFrame* frame);
// Make the decision whether to optimize the given function, and mark it for // Make the decision whether to optimize the given function, and mark it for
// optimization if the decision was 'yes'. // optimization if the decision was 'yes'.
void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame, void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame,
...@@ -56,7 +53,6 @@ class TieringManager { ...@@ -56,7 +53,6 @@ class TieringManager {
~OnInterruptTickScope(); ~OnInterruptTickScope();
private: private:
HandleScope handle_scope_;
TieringManager* const profiler_; TieringManager* const profiler_;
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
}; };
......
...@@ -1028,11 +1028,10 @@ void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight, ...@@ -1028,11 +1028,10 @@ void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
BIND(&interrupt_check); BIND(&interrupt_check);
// JumpLoop should do a stack check as part of the interrupt. // JumpLoop should do a stack check as part of the interrupt.
CallRuntime( CallRuntime(bytecode() == Bytecode::kJumpLoop
bytecode() == Bytecode::kJumpLoop ? Runtime::kBytecodeBudgetInterruptWithStackCheck
? Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode : Runtime::kBytecodeBudgetInterrupt,
: Runtime::kBytecodeBudgetInterruptFromBytecode, GetContext(), function);
GetContext(), function);
Goto(&done); Goto(&done);
BIND(&ok); BIND(&ok);
......
...@@ -7,10 +7,7 @@ ...@@ -7,10 +7,7 @@
#include "src/api/api.h" #include "src/api/api.h"
#include "src/ast/ast-traversal-visitor.h" #include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h" #include "src/ast/prettyprinter.h"
#include "src/baseline/baseline-batch-compiler.h"
#include "src/baseline/baseline.h"
#include "src/builtins/builtins.h" #include "src/builtins/builtins.h"
#include "src/codegen/compiler.h"
#include "src/common/message-template.h" #include "src/common/message-template.h"
#include "src/debug/debug.h" #include "src/debug/debug.h"
#include "src/execution/arguments-inl.h" #include "src/execution/arguments-inl.h"
...@@ -342,42 +339,7 @@ RUNTIME_FUNCTION(Runtime_StackGuardWithGap) { ...@@ -342,42 +339,7 @@ RUNTIME_FUNCTION(Runtime_StackGuardWithGap) {
return isolate->stack_guard()->HandleInterrupts(); return isolate->stack_guard()->HandleInterrupts();
} }
namespace { RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheck) {
void BytecodeBudgetInterruptFromBytecode(Isolate* isolate,
Handle<JSFunction> function) {
bool should_mark_for_optimization = function->has_feedback_vector();
if (!function->has_feedback_vector()) {
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
DCHECK(is_compiled_scope.is_compiled());
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
function->feedback_vector().set_invocation_count(1, kRelaxedStore);
} else {
function->SetInterruptBudget();
}
if (CanCompileWithBaseline(isolate, function->shared()) &&
!function->ActiveTierIsBaseline()) {
if (FLAG_baseline_batch_compilation) {
isolate->baseline_batch_compiler()->EnqueueFunction(function);
} else {
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
}
if (should_mark_for_optimization) {
SealHandleScope shs(isolate);
isolate->tiering_manager()->OnInterruptTickFromBytecode();
}
}
} // namespace
RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheckFromBytecode) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
...@@ -399,17 +361,17 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheckFromBytecode) { ...@@ -399,17 +361,17 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheckFromBytecode) {
} }
} }
BytecodeBudgetInterruptFromBytecode(isolate, function); isolate->tiering_manager()->OnInterruptTick(function);
return ReadOnlyRoots(isolate).undefined_value(); return ReadOnlyRoots(isolate).undefined_value();
} }
RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) { RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
TRACE_EVENT0("v8.execute", "V8.BytecodeBudgetInterrupt"); TRACE_EVENT0("v8.execute", "V8.BytecodeBudgetInterrupt");
BytecodeBudgetInterruptFromBytecode(isolate, function); isolate->tiering_manager()->OnInterruptTick(function);
return ReadOnlyRoots(isolate).undefined_value(); return ReadOnlyRoots(isolate).undefined_value();
} }
......
...@@ -206,60 +206,60 @@ namespace internal { ...@@ -206,60 +206,60 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTL(F, I) #define FOR_EACH_INTRINSIC_INTL(F, I)
#endif // V8_INTL_SUPPORT #endif // V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \ #define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
F(AccessCheck, 1, 1) \ F(AccessCheck, 1, 1) \
F(AllocateByteArray, 1, 1) \ F(AllocateByteArray, 1, 1) \
F(AllocateInYoungGeneration, 2, 1) \ F(AllocateInYoungGeneration, 2, 1) \
F(AllocateInOldGeneration, 2, 1) \ F(AllocateInOldGeneration, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \ F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \ F(AllocateSeqTwoByteString, 1, 1) \
F(AllowDynamicFunction, 1, 1) \ F(AllowDynamicFunction, 1, 1) \
I(CreateAsyncFromSyncIterator, 1, 1) \ I(CreateAsyncFromSyncIterator, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \ F(CreateListFromArrayLike, 1, 1) \
F(DoubleToStringWithRadix, 2, 1) \ F(DoubleToStringWithRadix, 2, 1) \
F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \ F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \ F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(GetTemplateObject, 3, 1) \ F(GetTemplateObject, 3, 1) \
F(IncrementUseCounter, 1, 1) \ F(IncrementUseCounter, 1, 1) \
F(BytecodeBudgetInterruptFromBytecode, 1, 1) \ F(BytecodeBudgetInterrupt, 1, 1) \
F(BytecodeBudgetInterruptWithStackCheckFromBytecode, 1, 1) \ F(BytecodeBudgetInterruptWithStackCheck, 1, 1) \
F(NewError, 2, 1) \ F(NewError, 2, 1) \
F(NewReferenceError, 2, 1) \ F(NewReferenceError, 2, 1) \
F(NewSyntaxError, 2, 1) \ F(NewSyntaxError, 2, 1) \
F(NewTypeError, -1 /* [1, 4] */, 1) \ F(NewTypeError, -1 /* [1, 4] */, 1) \
F(OrdinaryHasInstance, 2, 1) \ F(OrdinaryHasInstance, 2, 1) \
F(PromoteScheduledException, 0, 1) \ F(PromoteScheduledException, 0, 1) \
F(ReportMessageFromMicrotask, 1, 1) \ F(ReportMessageFromMicrotask, 1, 1) \
F(ReThrow, 1, 1) \ F(ReThrow, 1, 1) \
F(ReThrowWithMessage, 2, 1) \ F(ReThrowWithMessage, 2, 1) \
F(RunMicrotaskCallback, 2, 1) \ F(RunMicrotaskCallback, 2, 1) \
F(PerformMicrotaskCheckpoint, 0, 1) \ F(PerformMicrotaskCheckpoint, 0, 1) \
F(SharedValueBarrierSlow, 1, 1) \ F(SharedValueBarrierSlow, 1, 1) \
F(StackGuard, 0, 1) \ F(StackGuard, 0, 1) \
F(StackGuardWithGap, 1, 1) \ F(StackGuardWithGap, 1, 1) \
F(Throw, 1, 1) \ F(Throw, 1, 1) \
F(ThrowApplyNonFunction, 1, 1) \ F(ThrowApplyNonFunction, 1, 1) \
F(ThrowCalledNonCallable, 1, 1) \ F(ThrowCalledNonCallable, 1, 1) \
F(ThrowConstructedNonConstructable, 1, 1) \ F(ThrowConstructedNonConstructable, 1, 1) \
F(ThrowConstructorReturnedNonObject, 0, 1) \ F(ThrowConstructorReturnedNonObject, 0, 1) \
F(ThrowInvalidStringLength, 0, 1) \ F(ThrowInvalidStringLength, 0, 1) \
F(ThrowInvalidTypedArrayAlignment, 2, 1) \ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
F(ThrowIteratorError, 1, 1) \ F(ThrowIteratorError, 1, 1) \
F(ThrowSpreadArgError, 2, 1) \ F(ThrowSpreadArgError, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \ F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowNotConstructor, 1, 1) \ F(ThrowNotConstructor, 1, 1) \
F(ThrowPatternAssignmentNonCoercible, 1, 1) \ F(ThrowPatternAssignmentNonCoercible, 1, 1) \
F(ThrowRangeError, -1 /* >= 1 */, 1) \ F(ThrowRangeError, -1 /* >= 1 */, 1) \
F(ThrowReferenceError, 1, 1) \ F(ThrowReferenceError, 1, 1) \
F(ThrowAccessedUninitializedVariable, 1, 1) \ F(ThrowAccessedUninitializedVariable, 1, 1) \
F(ThrowStackOverflow, 0, 1) \ F(ThrowStackOverflow, 0, 1) \
F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
F(ThrowSymbolIteratorInvalid, 0, 1) \ F(ThrowSymbolIteratorInvalid, 0, 1) \
F(ThrowThrowMethodMissing, 0, 1) \ F(ThrowThrowMethodMissing, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \ F(ThrowTypeError, -1 /* >= 1 */, 1) \
F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \ F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \
F(Typeof, 1, 1) \ F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) F(UnwindAndFindExceptionHandler, 0, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F, I) \ #define FOR_EACH_INTRINSIC_LITERALS(F, I) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment