Commit 8103fe57 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Add safepoints for deferred calls

Add a concept of "register snapshots" which snapshot the end-state
of the register allocation for a node (i.e. the state of the register
allocation when the node's code completes). These can be requested by
nodes, so that they know which registers need to be kept alive by the
node, and which of those are tagged.

Nodes can then use this information to temporarily spill registers
across a deferred call, without requiring the register allocator to
spill them unconditionally on the non-deferred path. The maglev
safepoint table has support for these additional spilled registers.

Bug: v8:7700
Change-Id: Id0052b5da86dd263f9019b1433fe5994a472a5b1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3751203
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81614}
parent 0ed101e0
......@@ -9,6 +9,7 @@
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/maglev-safepoint-table.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/codegen/x64/assembler-x64.h"
......@@ -121,6 +122,38 @@ void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
}
}
class SaveRegisterStateForCall {
public:
SaveRegisterStateForCall(MaglevCodeGenState* code_gen_state,
RegisterSnapshot snapshot)
: code_gen_state(code_gen_state), snapshot_(snapshot) {
__ PushAll(snapshot_.live_registers);
__ PushAll(snapshot_.live_double_registers);
}
~SaveRegisterStateForCall() {
__ PopAll(snapshot_.live_double_registers);
__ PopAll(snapshot_.live_registers);
}
MaglevSafepointTableBuilder::Safepoint DefineSafepoint() {
auto safepoint = code_gen_state->safepoint_table_builder()->DefineSafepoint(
code_gen_state->masm());
int pushed_reg_index = 0;
for (Register reg : snapshot_.live_registers) {
if (snapshot_.live_tagged_registers.has(reg)) {
safepoint.DefineTaggedRegister(pushed_reg_index);
}
pushed_reg_index++;
}
return safepoint;
}
private:
MaglevCodeGenState* code_gen_state;
RegisterSnapshot snapshot_;
};
// ---
// Deferred code handling.
// ---
......@@ -680,18 +713,35 @@ void CheckMapsWithMigration::GenerateCode(MaglevCodeGenState* code_gen_state,
Immediate(Map::Bits3::IsDeprecatedBit::kMask));
__ j(zero, &deopt_info->deopt_entry_label);
// Otherwise, try migrating the object. If the migration returns Smi
// zero, then it failed and we should deopt.
__ Push(object);
__ Move(kContextRegister,
code_gen_state->broker()->target_native_context().object());
// TODO(verwaest): We're calling so we need to spill around it.
__ CallRuntime(Runtime::kTryMigrateInstance);
__ cmpl(kReturnRegister0, Immediate(0));
// Otherwise, try migrating the object. If the migration
// returns Smi zero, then it failed and we should deopt.
Register return_val = Register::no_reg();
{
SaveRegisterStateForCall save_register_state(
code_gen_state, node->register_snapshot());
__ Push(object);
__ Move(kContextRegister,
code_gen_state->broker()->target_native_context().object());
__ CallRuntime(Runtime::kTryMigrateInstance);
save_register_state.DefineSafepoint();
// Make sure the return value is preserved across the live register
// restoring pop all.
return_val = kReturnRegister0;
if (node->register_snapshot().live_registers.has(return_val)) {
DCHECK(!node->register_snapshot().live_registers.has(map_tmp));
__ Move(map_tmp, return_val);
return_val = map_tmp;
}
}
// On failure, the returned value is zero
__ cmpl(return_val, Immediate(0));
__ j(equal, &deopt_info->deopt_entry_label);
// The migrated object is returned on success, retry the map check.
__ Move(object, kReturnRegister0);
__ Move(object, return_val);
__ LoadMap(map_tmp, object);
__ Cmp(map_tmp, node->map().object());
__ j(equal, return_label);
......@@ -1575,16 +1625,19 @@ void ReduceInterruptBudget::GenerateCode(MaglevCodeGenState* code_gen_state,
Immediate(amount()));
JumpToDeferredIf(
less, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label) {
// TODO(leszeks): Only save registers if they're not free (requires
// fixing the regalloc, same as for scratch).
__ PushCallerSaved(SaveFPRegsMode::kSave);
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, 1);
__ PopCallerSaved(SaveFPRegsMode::kSave);
[](MaglevCodeGenState* code_gen_state, Label* return_label,
ReduceInterruptBudget* node) {
{
SaveRegisterStateForCall save_register_state(
code_gen_state, node->register_snapshot());
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, 1);
save_register_state.DefineSafepoint();
}
__ jmp(return_label);
});
},
this);
}
void ReduceInterruptBudget::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
......
This diff is collapsed.
......@@ -485,6 +485,8 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
UpdateUse(*node->lazy_deopt_info());
}
if (node->properties().needs_register_snapshot()) SaveRegisterSnapshot(node);
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
......@@ -716,6 +718,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
DCHECK_EQ(node->input_count(), 0);
DCHECK(!node->properties().can_eager_deopt());
DCHECK(!node->properties().can_lazy_deopt());
DCHECK(!node->properties().needs_register_snapshot());
// Initialize phis before assigning inputs, in case one of the inputs
// conflicts with a fixed phi.
......@@ -748,6 +751,8 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
if (node->properties().is_call()) SpillAndClearRegisters();
DCHECK(!node->properties().needs_register_snapshot());
DCHECK_EQ(general_registers_.free() | node->temporaries(),
general_registers_.free());
......@@ -1076,6 +1081,19 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
SpillAndClearRegisters(double_registers_);
}
void StraightForwardRegisterAllocator::SaveRegisterSnapshot(NodeBase* node) {
RegisterSnapshot snapshot;
general_registers_.ForEachUsedRegister([&](Register reg, ValueNode* node) {
if (node->properties().value_representation() ==
ValueRepresentation::kTagged) {
snapshot.live_tagged_registers.set(reg);
}
});
snapshot.live_registers = general_registers_.used();
snapshot.live_double_registers = double_registers_.used();
node->set_register_snapshot(snapshot);
}
void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
DCHECK(!node->is_loadable());
uint32_t free_slot;
......
......@@ -163,6 +163,8 @@ class StraightForwardRegisterAllocator {
void SpillAndClearRegisters(RegisterFrameState<RegisterT>& registers);
void SpillAndClearRegisters();
void SaveRegisterSnapshot(NodeBase* node);
void FreeRegistersUsedBy(ValueNode* node);
template <typename RegisterT>
RegisterT FreeUnblockedRegister();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment