Commit 732f394c authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[sparkplug] Clobber accumulator in StaGlobal

StaGlobal didn't write the accumulator, but the baseline implementation
assumed that it could preserve the accumulator by taking the return
value of the StoreGlobalIC. This almost always worked, except for
setters on the global object.

Fix this by marking StaGlobal as clobbering the accumulator, same as
StaNamedProperty (StaNamedProperty needs to do this anyway to avoid
inlined setters from needing to create accumulator-preserving frames;
StaGlobal would have needed the same thing if we'd ever inlined setters
for it).

Also, add a new debug scope, EnsureAccumulatorPreservedScope, to the
baseline compiler, which checks if the accumulator value is preserved
across non-accumulator-writing bytecodes. This found a (benign) bug with
ForInPrepare, so fix that too.

Fixed: chromium:1242306
Change-Id: I220b5b1c41010c16ac9f944cbd55d2705c299434
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3122325
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76525}
parent a91a6e1a
......@@ -508,6 +508,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
}
} // namespace baseline
} // namespace internal
} // namespace v8
......
......@@ -589,6 +589,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
}
} // namespace baseline
} // namespace internal
} // namespace v8
......
......@@ -137,6 +137,24 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister);
}
EnsureAccumulatorPreservedScope::EnsureAccumulatorPreservedScope(
BaselineAssembler* assembler)
: assembler_(assembler)
#ifdef V8_CODE_COMMENTS
,
comment_(assembler->masm(), "EnsureAccumulatorPreservedScope")
#endif
{
assembler_->Push(kInterpreterAccumulatorRegister);
}
EnsureAccumulatorPreservedScope::~EnsureAccumulatorPreservedScope() {
BaselineAssembler::ScratchRegisterScope scratch(assembler_);
Register reg = scratch.AcquireScratch();
assembler_->Pop(reg);
AssertEqualToAccumulator(reg);
}
#undef __
} // namespace baseline
......
......@@ -202,6 +202,21 @@ class SaveAccumulatorScope final {
BaselineAssembler* assembler_;
};
class EnsureAccumulatorPreservedScope final {
public:
inline explicit EnsureAccumulatorPreservedScope(BaselineAssembler* assembler);
inline ~EnsureAccumulatorPreservedScope();
private:
inline void AssertEqualToAccumulator(Register reg);
BaselineAssembler* assembler_;
#ifdef V8_CODE_COMMENTS
Assembler::CodeComment comment_;
#endif
};
} // namespace baseline
} // namespace internal
} // namespace v8
......
......@@ -490,13 +490,31 @@ void BaselineCompiler::VisitSingleBytecode() {
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
switch (iterator().current_bytecode()) {
{
interpreter::Bytecode bytecode = iterator().current_bytecode();
#ifdef DEBUG
base::Optional<EnsureAccumulatorPreservedScope> accumulator_preserved_scope;
// We should make sure to preserve the accumulator whenever the bytecode
// isn't registered as writing to it. We can't do this for jumps or switches
// though, since the control flow would not match the control flow of this
// scope.
if (FLAG_debug_code &&
!interpreter::Bytecodes::WritesAccumulator(bytecode) &&
!interpreter::Bytecodes::IsJump(bytecode) &&
!interpreter::Bytecodes::IsSwitch(bytecode)) {
accumulator_preserved_scope.emplace(&basm_);
}
#endif // DEBUG
switch (bytecode) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
break;
BYTECODE_LIST(BYTECODE_CASE)
BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
}
}
#ifdef V8_TRACE_UNOPTIMIZED
......
......@@ -465,6 +465,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
}
} // namespace baseline
} // namespace internal
} // namespace v8
......
......@@ -476,6 +476,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->cmp_tagged(reg, kInterpreterAccumulatorRegister);
assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
}
} // namespace baseline
} // namespace internal
} // namespace v8
......
......@@ -3648,8 +3648,7 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::UNALLOCATED: {
FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
BuildStoreGlobal(variable);
break;
}
case VariableLocation::CONTEXT: {
......@@ -3738,9 +3737,7 @@ void BytecodeGenerator::BuildVariableAssignment(
if (mode == VariableMode::kConst) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
} else {
FeedbackSlot slot =
GetCachedStoreGlobalICSlot(language_mode(), variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
BuildStoreGlobal(variable);
}
}
break;
......@@ -3773,6 +3770,21 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
}
}
void BytecodeGenerator::BuildStoreGlobal(Variable* variable) {
Register value;
if (!execution_result()->IsEffect()) {
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
}
// static
BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
......
......@@ -241,6 +241,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name);
void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
void BuildStoreGlobal(Variable* variable);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = TypeofMode::kNotInside);
......
......@@ -106,7 +106,7 @@ namespace interpreter {
OperandType::kIdx) \
V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx) \
V(StaGlobal, ImplicitRegisterUse::kReadAccumulator, OperandType::kIdx, \
V(StaGlobal, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx, \
OperandType::kIdx) \
\
/* Context operations */ \
......@@ -393,7 +393,7 @@ namespace interpreter {
\
/* Complex flow control For..in */ \
V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
V(ForInPrepare, ImplicitRegisterUse::kReadAccumulator, \
V(ForInPrepare, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kRegOutTriple, OperandType::kIdx) \
V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \
......
......@@ -236,8 +236,14 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
CallBuiltin(Builtin::kStoreGlobalIC, context, name, value, slot,
maybe_vector);
TNode<Object> result = CallBuiltin(Builtin::kStoreGlobalIC, context, name,
value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(result);
Dispatch();
}
......@@ -598,14 +604,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallStub(ic, context, object, name, value, slot, maybe_vector);
TNode<Object> result =
CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(var_result.value());
SetAccumulator(result);
Dispatch();
}
};
......@@ -642,15 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtin::kKeyedStoreIC, context, object, name, value,
slot, maybe_vector);
TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(var_result.value());
SetAccumulator(result);
Dispatch();
}
......@@ -666,15 +671,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array,
index, value, slot, feedback_vector);
TNode<Object> result =
CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, index, value,
slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(var_result.value());
SetAccumulator(result);
Dispatch();
}
......@@ -2834,6 +2839,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
&cache_length, UpdateFeedbackMode::kOptionalFeedback);
// The accumulator is clobbered soon after ForInPrepare, so avoid keeping it
// alive too long and instead set it to cache_array to match the first return
// value of Builtin::kForInPrepare.
SetAccumulator(cache_array);
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
......
......@@ -58,7 +58,7 @@ snippet: "
"
frame size: 3
parameter count: 1
bytecode array length: 23
bytecode array length: 28
bytecodes: [
B(LdaConstant), U8(0),
B(Star1),
......@@ -67,8 +67,10 @@ bytecodes: [
/* 8 S> */ B(LdaSmi), I8(1),
/* 8 E> */ B(StaGlobal), U8(1), U8(0),
/* 11 S> */ B(LdaSmi), I8(2),
B(Star1),
/* 12 E> */ B(StaGlobal), U8(1), U8(0),
B(Star0),
B(Mov), R(1), R(0),
B(Ldar), R(0),
/* 16 S> */ B(Return),
]
constant pool: [
......
......@@ -12,13 +12,15 @@ snippet: "
function f() { return global &= 1; }
f();
"
frame size: 0
frame size: 1
parameter count: 1
bytecode array length: 10
bytecode array length: 13
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(BitwiseAndSmi), I8(1), U8(2),
B(Star0),
/* 45 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 50 S> */ B(Return),
]
constant pool: [
......@@ -33,13 +35,15 @@ snippet: "
function f() { return unallocated += 1; }
f();
"
frame size: 0
frame size: 1
parameter count: 1
bytecode array length: 10
bytecode array length: 13
bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(AddSmi), I8(1), U8(2),
B(Star0),
/* 51 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 56 S> */ B(Return),
]
constant pool: [
......
......@@ -12,13 +12,15 @@ snippet: "
function f() { return ++global; }
f();
"
frame size: 0
frame size: 1
parameter count: 1
bytecode array length: 9
bytecode array length: 12
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(Inc), U8(2),
B(Star0),
/* 40 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 47 S> */ B(Return),
]
constant pool: [
......@@ -33,14 +35,15 @@ snippet: "
function f() { return global--; }
f();
"
frame size: 1
frame size: 2
parameter count: 1
bytecode array length: 14
bytecode array length: 15
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
B(Star0),
B(Dec), U8(2),
B(Star1),
/* 44 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 47 S> */ B(Return),
......@@ -57,13 +60,15 @@ snippet: "
function f() { 'use strict'; return --unallocated; }
f();
"
frame size: 0
frame size: 1
parameter count: 1
bytecode array length: 9
bytecode array length: 12
bytecodes: [
/* 46 S> */ B(LdaGlobal), U8(0), U8(0),
B(Dec), U8(2),
B(Star0),
/* 55 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 67 S> */ B(Return),
]
constant pool: [
......@@ -78,14 +83,15 @@ snippet: "
function f() { return unallocated++; }
f();
"
frame size: 1
frame size: 2
parameter count: 1
bytecode array length: 14
bytecode array length: 15
bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
B(Star0),
B(Inc), U8(2),
B(Star1),
/* 50 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 53 S> */ B(Return),
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --sparkplug
function foo(){
// __proto__ is a setter that is defined to return undefined.
return __proto__ = 5;
}
assertEquals(foo(), 5);
assertEquals(foo(), 5);
%EnsureFeedbackVectorForFunction(foo);
assertEquals(foo(), 5);
assertEquals(foo(), 5);
%CompileBaseline(foo);
assertEquals(foo(), 5);
assertEquals(foo(), 5);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment