Commit 732f394c authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[sparkplug] Clobber accumulator in StaGlobal

StaGlobal didn't write the accumulator, but the baseline implementation
assumed that it could preserve the accumulator by taking the return
value of the StoreGlobalIC. This almost always worked, except for
setters on the global object.

Fix this by marking StaGlobal as clobbering the accumulator, same as
StaNamedProperty (StaNamedProperty needs to do this anyway to avoid
inlined setters from needing to create accumulator-preserving frames;
StaGlobal would have needed the same thing if we'd ever inlined setters
for it).

Also, add a new debug scope, EnsureAccumulatorPreservedScope, to the
baseline compiler, which checks if the accumulator value is preserved
across non-accumulator-writing bytecodes. This found a (benign) bug with
ForInPrepare, so fix that too.

Fixed: chromium:1242306
Change-Id: I220b5b1c41010c16ac9f944cbd55d2705c299434
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3122325
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76525}
parent a91a6e1a
...@@ -508,6 +508,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -508,6 +508,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __ #undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
}
} // namespace baseline } // namespace baseline
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -589,6 +589,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -589,6 +589,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __ #undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
}
} // namespace baseline } // namespace baseline
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -137,6 +137,24 @@ SaveAccumulatorScope::~SaveAccumulatorScope() { ...@@ -137,6 +137,24 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister); assembler_->Pop(kInterpreterAccumulatorRegister);
} }
EnsureAccumulatorPreservedScope::EnsureAccumulatorPreservedScope(
BaselineAssembler* assembler)
: assembler_(assembler)
#ifdef V8_CODE_COMMENTS
,
comment_(assembler->masm(), "EnsureAccumulatorPreservedScope")
#endif
{
assembler_->Push(kInterpreterAccumulatorRegister);
}
EnsureAccumulatorPreservedScope::~EnsureAccumulatorPreservedScope() {
BaselineAssembler::ScratchRegisterScope scratch(assembler_);
Register reg = scratch.AcquireScratch();
assembler_->Pop(reg);
AssertEqualToAccumulator(reg);
}
#undef __ #undef __
} // namespace baseline } // namespace baseline
......
...@@ -202,6 +202,21 @@ class SaveAccumulatorScope final { ...@@ -202,6 +202,21 @@ class SaveAccumulatorScope final {
BaselineAssembler* assembler_; BaselineAssembler* assembler_;
}; };
class EnsureAccumulatorPreservedScope final {
public:
inline explicit EnsureAccumulatorPreservedScope(BaselineAssembler* assembler);
inline ~EnsureAccumulatorPreservedScope();
private:
inline void AssertEqualToAccumulator(Register reg);
BaselineAssembler* assembler_;
#ifdef V8_CODE_COMMENTS
Assembler::CodeComment comment_;
#endif
};
} // namespace baseline } // namespace baseline
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -490,13 +490,31 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -490,13 +490,31 @@ void BaselineCompiler::VisitSingleBytecode() {
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry); TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif #endif
switch (iterator().current_bytecode()) { {
interpreter::Bytecode bytecode = iterator().current_bytecode();
#ifdef DEBUG
base::Optional<EnsureAccumulatorPreservedScope> accumulator_preserved_scope;
// We should make sure to preserve the accumulator whenever the bytecode
// isn't registered as writing to it. We can't do this for jumps or switches
// though, since the control flow would not match the control flow of this
// scope.
if (FLAG_debug_code &&
!interpreter::Bytecodes::WritesAccumulator(bytecode) &&
!interpreter::Bytecodes::IsJump(bytecode) &&
!interpreter::Bytecodes::IsSwitch(bytecode)) {
accumulator_preserved_scope.emplace(&basm_);
}
#endif // DEBUG
switch (bytecode) {
#define BYTECODE_CASE(name, ...) \ #define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \ case interpreter::Bytecode::k##name: \
Visit##name(); \ Visit##name(); \
break; break;
BYTECODE_LIST(BYTECODE_CASE) BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE #undef BYTECODE_CASE
}
} }
#ifdef V8_TRACE_UNOPTIMIZED #ifdef V8_TRACE_UNOPTIMIZED
......
...@@ -465,6 +465,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -465,6 +465,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __ #undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
}
} // namespace baseline } // namespace baseline
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -476,6 +476,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { ...@@ -476,6 +476,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __ #undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->cmp_tagged(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
}
} // namespace baseline } // namespace baseline
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -3648,8 +3648,7 @@ void BytecodeGenerator::BuildVariableAssignment( ...@@ -3648,8 +3648,7 @@ void BytecodeGenerator::BuildVariableAssignment(
break; break;
} }
case VariableLocation::UNALLOCATED: { case VariableLocation::UNALLOCATED: {
FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable); BuildStoreGlobal(variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
break; break;
} }
case VariableLocation::CONTEXT: { case VariableLocation::CONTEXT: {
...@@ -3738,9 +3737,7 @@ void BytecodeGenerator::BuildVariableAssignment( ...@@ -3738,9 +3737,7 @@ void BytecodeGenerator::BuildVariableAssignment(
if (mode == VariableMode::kConst) { if (mode == VariableMode::kConst) {
builder()->CallRuntime(Runtime::kThrowConstAssignError); builder()->CallRuntime(Runtime::kThrowConstAssignError);
} else { } else {
FeedbackSlot slot = BuildStoreGlobal(variable);
GetCachedStoreGlobalICSlot(language_mode(), variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
} }
} }
break; break;
...@@ -3773,6 +3770,21 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr, ...@@ -3773,6 +3770,21 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
} }
} }
void BytecodeGenerator::BuildStoreGlobal(Variable* variable) {
Register value;
if (!execution_result()->IsEffect()) {
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
}
// static // static
BytecodeGenerator::AssignmentLhsData BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) { BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
......
...@@ -241,6 +241,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { ...@@ -241,6 +241,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name); const AstRawString* name);
void BuildStoreNamedProperty(const Expression* object_expr, Register object, void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name); const AstRawString* name);
void BuildStoreGlobal(Variable* variable);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode, void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = TypeofMode::kNotInside); TypeofMode typeof_mode = TypeofMode::kNotInside);
......
...@@ -106,7 +106,7 @@ namespace interpreter { ...@@ -106,7 +106,7 @@ namespace interpreter {
OperandType::kIdx) \ OperandType::kIdx) \
V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \ V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx) \ OperandType::kIdx, OperandType::kIdx) \
V(StaGlobal, ImplicitRegisterUse::kReadAccumulator, OperandType::kIdx, \ V(StaGlobal, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx, \
OperandType::kIdx) \ OperandType::kIdx) \
\ \
/* Context operations */ \ /* Context operations */ \
...@@ -393,7 +393,7 @@ namespace interpreter { ...@@ -393,7 +393,7 @@ namespace interpreter {
\ \
/* Complex flow control For..in */ \ /* Complex flow control For..in */ \
V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \ V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
V(ForInPrepare, ImplicitRegisterUse::kReadAccumulator, \ V(ForInPrepare, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kRegOutTriple, OperandType::kIdx) \ OperandType::kRegOutTriple, OperandType::kIdx) \
V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \ V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \ OperandType::kReg) \
......
...@@ -236,8 +236,14 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) { ...@@ -236,8 +236,14 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1); TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector(); TNode<HeapObject> maybe_vector = LoadFeedbackVector();
CallBuiltin(Builtin::kStoreGlobalIC, context, name, value, slot, TNode<Object> result = CallBuiltin(Builtin::kStoreGlobalIC, context, name,
maybe_vector); value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(result);
Dispatch(); Dispatch();
} }
...@@ -598,14 +604,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler { ...@@ -598,14 +604,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_vector = LoadFeedbackVector(); TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext(); TNode<Context> context = GetContext();
TVARIABLE(Object, var_result); TNode<Object> result =
var_result = CallStub(ic, context, object, name, value, slot, maybe_vector); CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in // To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It // the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we // doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we // restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub. // don't need to keep unnecessary state alive across the callstub.
SetAccumulator(var_result.value()); SetAccumulator(result);
Dispatch(); Dispatch();
} }
}; };
...@@ -642,15 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) { ...@@ -642,15 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<HeapObject> maybe_vector = LoadFeedbackVector(); TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext(); TNode<Context> context = GetContext();
TVARIABLE(Object, var_result); TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
var_result = CallBuiltin(Builtin::kKeyedStoreIC, context, object, name, value, name, value, slot, maybe_vector);
slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in // To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It // the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we // doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we // restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub. // don't need to keep unnecessary state alive across the callstub.
SetAccumulator(var_result.value()); SetAccumulator(result);
Dispatch(); Dispatch();
} }
...@@ -666,15 +671,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) { ...@@ -666,15 +671,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector(); TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext(); TNode<Context> context = GetContext();
TVARIABLE(Object, var_result); TNode<Object> result =
var_result = CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, index, value,
index, value, slot, feedback_vector); slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in // To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It // the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we // doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we // restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub. // don't need to keep unnecessary state alive across the callstub.
SetAccumulator(var_result.value()); SetAccumulator(result);
Dispatch(); Dispatch();
} }
...@@ -2834,6 +2839,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) { ...@@ -2834,6 +2839,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array, ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
&cache_length, UpdateFeedbackMode::kOptionalFeedback); &cache_length, UpdateFeedbackMode::kOptionalFeedback);
// The accumulator is clobbered soon after ForInPrepare, so avoid keeping it
// alive too long and instead set it to cache_array to match the first return
// value of Builtin::kForInPrepare.
SetAccumulator(cache_array);
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0); StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch(); Dispatch();
} }
......
...@@ -58,7 +58,7 @@ snippet: " ...@@ -58,7 +58,7 @@ snippet: "
" "
frame size: 3 frame size: 3
parameter count: 1 parameter count: 1
bytecode array length: 23 bytecode array length: 28
bytecodes: [ bytecodes: [
B(LdaConstant), U8(0), B(LdaConstant), U8(0),
B(Star1), B(Star1),
...@@ -67,8 +67,10 @@ bytecodes: [ ...@@ -67,8 +67,10 @@ bytecodes: [
/* 8 S> */ B(LdaSmi), I8(1), /* 8 S> */ B(LdaSmi), I8(1),
/* 8 E> */ B(StaGlobal), U8(1), U8(0), /* 8 E> */ B(StaGlobal), U8(1), U8(0),
/* 11 S> */ B(LdaSmi), I8(2), /* 11 S> */ B(LdaSmi), I8(2),
B(Star1),
/* 12 E> */ B(StaGlobal), U8(1), U8(0), /* 12 E> */ B(StaGlobal), U8(1), U8(0),
B(Star0), B(Mov), R(1), R(0),
B(Ldar), R(0),
/* 16 S> */ B(Return), /* 16 S> */ B(Return),
] ]
constant pool: [ constant pool: [
......
...@@ -12,13 +12,15 @@ snippet: " ...@@ -12,13 +12,15 @@ snippet: "
function f() { return global &= 1; } function f() { return global &= 1; }
f(); f();
" "
frame size: 0 frame size: 1
parameter count: 1 parameter count: 1
bytecode array length: 10 bytecode array length: 13
bytecodes: [ bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0), /* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(BitwiseAndSmi), I8(1), U8(2), B(BitwiseAndSmi), I8(1), U8(2),
B(Star0),
/* 45 E> */ B(StaGlobal), U8(0), U8(3), /* 45 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 50 S> */ B(Return), /* 50 S> */ B(Return),
] ]
constant pool: [ constant pool: [
...@@ -33,13 +35,15 @@ snippet: " ...@@ -33,13 +35,15 @@ snippet: "
function f() { return unallocated += 1; } function f() { return unallocated += 1; }
f(); f();
" "
frame size: 0 frame size: 1
parameter count: 1 parameter count: 1
bytecode array length: 10 bytecode array length: 13
bytecodes: [ bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0), /* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(AddSmi), I8(1), U8(2), B(AddSmi), I8(1), U8(2),
B(Star0),
/* 51 E> */ B(StaGlobal), U8(0), U8(3), /* 51 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 56 S> */ B(Return), /* 56 S> */ B(Return),
] ]
constant pool: [ constant pool: [
......
...@@ -12,13 +12,15 @@ snippet: " ...@@ -12,13 +12,15 @@ snippet: "
function f() { return ++global; } function f() { return ++global; }
f(); f();
" "
frame size: 0 frame size: 1
parameter count: 1 parameter count: 1
bytecode array length: 9 bytecode array length: 12
bytecodes: [ bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0), /* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(Inc), U8(2), B(Inc), U8(2),
B(Star0),
/* 40 E> */ B(StaGlobal), U8(0), U8(3), /* 40 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 47 S> */ B(Return), /* 47 S> */ B(Return),
] ]
constant pool: [ constant pool: [
...@@ -33,14 +35,15 @@ snippet: " ...@@ -33,14 +35,15 @@ snippet: "
function f() { return global--; } function f() { return global--; }
f(); f();
" "
frame size: 1 frame size: 2
parameter count: 1 parameter count: 1
bytecode array length: 14 bytecode array length: 15
bytecodes: [ bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0), /* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2), B(ToNumeric), U8(2),
B(Star0), B(Star0),
B(Dec), U8(2), B(Dec), U8(2),
B(Star1),
/* 44 E> */ B(StaGlobal), U8(0), U8(3), /* 44 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0), B(Ldar), R(0),
/* 47 S> */ B(Return), /* 47 S> */ B(Return),
...@@ -57,13 +60,15 @@ snippet: " ...@@ -57,13 +60,15 @@ snippet: "
function f() { 'use strict'; return --unallocated; } function f() { 'use strict'; return --unallocated; }
f(); f();
" "
frame size: 0 frame size: 1
parameter count: 1 parameter count: 1
bytecode array length: 9 bytecode array length: 12
bytecodes: [ bytecodes: [
/* 46 S> */ B(LdaGlobal), U8(0), U8(0), /* 46 S> */ B(LdaGlobal), U8(0), U8(0),
B(Dec), U8(2), B(Dec), U8(2),
B(Star0),
/* 55 E> */ B(StaGlobal), U8(0), U8(3), /* 55 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 67 S> */ B(Return), /* 67 S> */ B(Return),
] ]
constant pool: [ constant pool: [
...@@ -78,14 +83,15 @@ snippet: " ...@@ -78,14 +83,15 @@ snippet: "
function f() { return unallocated++; } function f() { return unallocated++; }
f(); f();
" "
frame size: 1 frame size: 2
parameter count: 1 parameter count: 1
bytecode array length: 14 bytecode array length: 15
bytecodes: [ bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0), /* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2), B(ToNumeric), U8(2),
B(Star0), B(Star0),
B(Inc), U8(2), B(Inc), U8(2),
B(Star1),
/* 50 E> */ B(StaGlobal), U8(0), U8(3), /* 50 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0), B(Ldar), R(0),
/* 53 S> */ B(Return), /* 53 S> */ B(Return),
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --sparkplug
function foo(){
// __proto__ is a setter that is defined to return undefined.
return __proto__ = 5;
}
assertEquals(foo(), 5);
assertEquals(foo(), 5);
%EnsureFeedbackVectorForFunction(foo);
assertEquals(foo(), 5);
assertEquals(foo(), 5);
%CompileBaseline(foo);
assertEquals(foo(), 5);
assertEquals(foo(), 5);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment