Commit 58a7761b authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Skip to the end of bytecode blocks on eager deopt

Unconditional eager deopts from lack of feedback (née soft deopts) mean
that the remainder of the basic block is dead. Avoid emitting this code
by fast forwarding the iterator until the next merge.

The EagerDeopt node becomes a Deopt control node which terminates its
own block (this is to avoid spurious control flow after the EagerDeopt,
or weirdness with liveness). A concept of "merging dead blocks" has to
be introduced so that the successors of the killed block still have the
right number of predecessors.

Bug: v8:7700
Change-Id: Id9c442c3b18d3f394dc2411604d0c8503d6aaae2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3578647Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79952}
parent ecae28fe
...@@ -316,7 +316,7 @@ void MaglevGraphBuilder::BuildPropertyCellAccess( ...@@ -316,7 +316,7 @@ void MaglevGraphBuilder::BuildPropertyCellAccess(
compiler::ObjectRef property_cell_value = property_cell.value(); compiler::ObjectRef property_cell_value = property_cell.value();
if (property_cell_value.IsTheHole()) { if (property_cell_value.IsTheHole()) {
// The property cell is no longer valid. // The property cell is no longer valid.
AddNewNode<EagerDeopt>({}); EmitUnconditionalDeopt();
return; return;
} }
...@@ -366,7 +366,7 @@ void MaglevGraphBuilder::VisitLdaGlobal() { ...@@ -366,7 +366,7 @@ void MaglevGraphBuilder::VisitLdaGlobal() {
feedback(), GetSlotOperand(kSlotOperandIndex))); feedback(), GetSlotOperand(kSlotOperandIndex)));
if (access_feedback.IsInsufficient()) { if (access_feedback.IsInsufficient()) {
AddNewNode<EagerDeopt>({}); EmitUnconditionalDeopt();
return; return;
} }
...@@ -406,7 +406,7 @@ void MaglevGraphBuilder::VisitGetNamedProperty() { ...@@ -406,7 +406,7 @@ void MaglevGraphBuilder::VisitGetNamedProperty() {
switch (processed_feedback.kind()) { switch (processed_feedback.kind()) {
case compiler::ProcessedFeedback::kInsufficient: case compiler::ProcessedFeedback::kInsufficient:
AddNewNode<EagerDeopt>({}); EmitUnconditionalDeopt();
return; return;
case compiler::ProcessedFeedback::kNamedAccess: { case compiler::ProcessedFeedback::kNamedAccess: {
...@@ -460,7 +460,7 @@ void MaglevGraphBuilder::VisitSetNamedProperty() { ...@@ -460,7 +460,7 @@ void MaglevGraphBuilder::VisitSetNamedProperty() {
switch (processed_feedback.kind()) { switch (processed_feedback.kind()) {
case compiler::ProcessedFeedback::kInsufficient: case compiler::ProcessedFeedback::kInsufficient:
AddNewNode<EagerDeopt>({}); EmitUnconditionalDeopt();
return; return;
case compiler::ProcessedFeedback::kNamedAccess: { case compiler::ProcessedFeedback::kNamedAccess: {
......
...@@ -72,6 +72,11 @@ class MaglevGraphBuilder { ...@@ -72,6 +72,11 @@ class MaglevGraphBuilder {
BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset(); BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
while (old_jump_targets != nullptr) { while (old_jump_targets != nullptr) {
BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index); BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
if (predecessor == nullptr) {
// We can have null predecessors if the predecessor is dead.
predecessor_index--;
continue;
}
ControlNode* control = predecessor->control_node(); ControlNode* control = predecessor->control_node();
if (control->Is<ConditionalControlNode>()) { if (control->Is<ConditionalControlNode>()) {
// CreateEmptyBlock automatically registers itself with the offset. // CreateEmptyBlock automatically registers itself with the offset.
...@@ -105,6 +110,53 @@ class MaglevGraphBuilder { ...@@ -105,6 +110,53 @@ class MaglevGraphBuilder {
} }
} }
// Return true if the given offset is a merge point, i.e. there are jumps
// targetting it.
bool IsOffsetAMergePoint(int offset) {
return merge_states_[offset] != nullptr;
}
// Called when a block is killed by an unconditional eager deopt.
void EmitUnconditionalDeopt() {
// Create a block rather than calling finish, since we don't yet know the
// next block's offset before the loop skipping the rest of the bytecodes.
BasicBlock* block = CreateBlock<Deopt>({});
ResolveJumpsToBlockAtOffset(block, block_offset_);
// Skip any bytecodes remaining in the block, up to the next merge point.
while (!IsOffsetAMergePoint(iterator_.next_offset())) {
iterator_.Advance();
if (iterator_.done()) break;
}
// If there is control flow out of this block, we need to kill the merges
// into the control flow targets.
interpreter::Bytecode bytecode = iterator_.current_bytecode();
if (interpreter::Bytecodes::IsForwardJump(bytecode)) {
// Jumps merge into their target, and conditional jumps also merge into
// the fallthrough.
merge_states_[iterator_.GetJumpTargetOffset()]->MergeDead();
if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
merge_states_[iterator_.next_offset()]->MergeDead();
}
} else if (bytecode == interpreter::Bytecode::kJumpLoop) {
// JumpLoop merges into its loop header, which has to be treated specially
// by the merge..
merge_states_[iterator_.GetJumpTargetOffset()]->MergeDeadLoop();
} else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
// Switches merge into their targets, and into the fallthrough.
for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
merge_states_[offset.target_offset]->MergeDead();
}
merge_states_[iterator_.next_offset()]->MergeDead();
} else if (!interpreter::Bytecodes::Returns(bytecode) &&
!interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
// Any other bytecode that doesn't return or throw will merge into the
// fallthrough.
merge_states_[iterator_.next_offset()]->MergeDead();
}
}
void VisitSingleBytecode() { void VisitSingleBytecode() {
int offset = iterator_.current_offset(); int offset = iterator_.current_offset();
if (V8_UNLIKELY(merge_states_[offset] != nullptr)) { if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
...@@ -167,15 +219,15 @@ class MaglevGraphBuilder { ...@@ -167,15 +219,15 @@ class MaglevGraphBuilder {
template <typename NodeT, typename... Args> template <typename NodeT, typename... Args>
NodeT* CreateNewNode(Args&&... args) { NodeT* CreateNewNode(Args&&... args) {
if constexpr (NodeT::kProperties.can_eager_deopt()) { if constexpr (NodeT::kProperties.can_eager_deopt()) {
return Node::New<NodeT>(zone(), *compilation_unit_, return NodeBase::New<NodeT>(zone(), *compilation_unit_,
GetLatestCheckpointedState(), GetLatestCheckpointedState(),
std::forward<Args>(args)...); std::forward<Args>(args)...);
} else if constexpr (NodeT::kProperties.can_lazy_deopt()) { } else if constexpr (NodeT::kProperties.can_lazy_deopt()) {
return Node::New<NodeT>(zone(), *compilation_unit_, return NodeBase::New<NodeT>(zone(), *compilation_unit_,
GetCheckpointedStateForLazyDeopt(), GetCheckpointedStateForLazyDeopt(),
std::forward<Args>(args)...); std::forward<Args>(args)...);
} else { } else {
return Node::New<NodeT>(zone(), std::forward<Args>(args)...); return NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
} }
} }
...@@ -336,8 +388,8 @@ class MaglevGraphBuilder { ...@@ -336,8 +388,8 @@ class MaglevGraphBuilder {
template <typename ControlNodeT, typename... Args> template <typename ControlNodeT, typename... Args>
BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs, BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
Args&&... args) { Args&&... args) {
current_block_->set_control_node(NodeBase::New<ControlNodeT>( current_block_->set_control_node(CreateNewNode<ControlNodeT>(
zone(), control_inputs, std::forward<Args>(args)...)); control_inputs, std::forward<Args>(args)...));
BasicBlock* block = current_block_; BasicBlock* block = current_block_;
current_block_ = nullptr; current_block_ = nullptr;
...@@ -349,21 +401,25 @@ class MaglevGraphBuilder { ...@@ -349,21 +401,25 @@ class MaglevGraphBuilder {
return block; return block;
} }
// Update all jumps which were targetting the not-yet-created block at the
// given `block_offset`, to now point to the given `block`.
void ResolveJumpsToBlockAtOffset(BasicBlock* block, int block_offset) const {
BasicBlockRef* jump_target_refs_head =
jump_targets_[block_offset].SetToBlockAndReturnNext(block);
while (jump_target_refs_head != nullptr) {
jump_target_refs_head =
jump_target_refs_head->SetToBlockAndReturnNext(block);
}
DCHECK_EQ(jump_targets_[block_offset].block_ptr(), block);
}
template <typename ControlNodeT, typename... Args> template <typename ControlNodeT, typename... Args>
BasicBlock* FinishBlock(int next_block_offset, BasicBlock* FinishBlock(int next_block_offset,
std::initializer_list<ValueNode*> control_inputs, std::initializer_list<ValueNode*> control_inputs,
Args&&... args) { Args&&... args) {
BasicBlock* block = BasicBlock* block =
CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...); CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
ResolveJumpsToBlockAtOffset(block, block_offset_);
// Resolve pointers to this basic block.
BasicBlockRef* jump_target_refs_head =
jump_targets_[block_offset_].SetToBlockAndReturnNext(block);
while (jump_target_refs_head != nullptr) {
jump_target_refs_head =
jump_target_refs_head->SetToBlockAndReturnNext(block);
}
DCHECK_EQ(jump_targets_[block_offset_].block_ptr(), block);
// If the next block has merge states, then it's not a simple fallthrough, // If the next block has merge states, then it's not a simple fallthrough,
// and we should reset the checkpoint validity. // and we should reset the checkpoint validity.
...@@ -374,11 +430,11 @@ class MaglevGraphBuilder { ...@@ -374,11 +430,11 @@ class MaglevGraphBuilder {
// which case we merge our state into it. That merge-point could also be a // which case we merge our state into it. That merge-point could also be a
// loop header, in which case the merge state might not exist yet (if the // loop header, in which case the merge state might not exist yet (if the
// only predecessors are this path and the JumpLoop). // only predecessors are this path and the JumpLoop).
DCHECK_NULL(current_block_);
if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) { if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
if (NumPredecessors(next_block_offset) == 1) { if (NumPredecessors(next_block_offset) == 1) {
StartNewBlock(next_block_offset); StartNewBlock(next_block_offset);
} else { } else {
DCHECK_NULL(current_block_);
MergeIntoFrameState(block, next_block_offset); MergeIntoFrameState(block, next_block_offset);
} }
} }
......
...@@ -401,8 +401,12 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) { ...@@ -401,8 +401,12 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
// moves). // moves).
for (int i = 0; i < phi->input_count(); ++i) { for (int i = 0; i < phi->input_count(); ++i) {
if (i > 0) os_ << ", "; if (i > 0) os_ << ", ";
if (state.block()->predecessor_at(i) == nullptr) {
os_ << "<dead>";
} else {
os_ << PrintNodeLabel(graph_labeller, phi->input(i).node()); os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
} }
}
os_ << ") → " << phi->result().operand() << "\n"; os_ << ") → " << phi->result().operand() << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_) MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
...@@ -410,9 +414,10 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) { ...@@ -410,9 +414,10 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
} }
void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) { void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
MaybePrintEagerDeopt(os_, targets_, node, state); MaybePrintEagerDeopt(os_, targets_, node, state);
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
PrintVerticalArrows(os_, targets_); PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, node); PrintPaddedId(os_, graph_labeller, node);
os_ << PrintNode(graph_labeller, node) << "\n"; os_ << PrintNode(graph_labeller, node) << "\n";
...@@ -427,6 +432,8 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, ...@@ -427,6 +432,8 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
const ProcessingState& state) { const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller(); MaglevGraphLabeller* graph_labeller = state.graph_labeller();
MaybePrintEagerDeopt(os_, targets_, control_node, state);
bool has_fallthrough = false; bool has_fallthrough = false;
if (control_node->Is<JumpLoop>()) { if (control_node->Is<JumpLoop>()) {
......
...@@ -62,7 +62,7 @@ class MaglevGraphVerifier { ...@@ -62,7 +62,7 @@ class MaglevGraphVerifier {
case Opcode::kInitialValue: case Opcode::kInitialValue:
case Opcode::kRegisterInput: case Opcode::kRegisterInput:
case Opcode::kGapMove: case Opcode::kGapMove:
case Opcode::kEagerDeopt: case Opcode::kDeopt:
case Opcode::kJump: case Opcode::kJump:
case Opcode::kJumpLoop: case Opcode::kJumpLoop:
// No input. // No input.
......
...@@ -224,6 +224,8 @@ class MergePointRegisterState { ...@@ -224,6 +224,8 @@ class MergePointRegisterState {
class MergePointInterpreterFrameState { class MergePointInterpreterFrameState {
public: public:
static constexpr BasicBlock* kDeadPredecessor = nullptr;
void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit, void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
int merge_offset, interpreter::Register reg, int merge_offset, interpreter::Register reg,
ValueNode* value) { ValueNode* value) {
...@@ -321,6 +323,24 @@ class MergePointInterpreterFrameState { ...@@ -321,6 +323,24 @@ class MergePointInterpreterFrameState {
}); });
} }
// Merges a dead framestate (e.g. one which has been early terminated with a
// deopt).
void MergeDead() {
DCHECK_GT(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = kDeadPredecessor;
predecessors_so_far_++;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
}
// Merges a dead loop framestate (e.g. one where the block containing the
// JumpLoop has been early terminated with a deopt).
void MergeDeadLoop() {
DCHECK_EQ(predecessors_so_far_, predecessor_count_);
DCHECK_NULL(predecessors_[0]);
predecessors_[0] = kDeadPredecessor;
}
const CompactInterpreterFrameState& frame_state() const { const CompactInterpreterFrameState& frame_state() const {
return frame_state_; return frame_state_;
} }
......
...@@ -377,13 +377,6 @@ void SmiConstant::PrintParams(std::ostream& os, ...@@ -377,13 +377,6 @@ void SmiConstant::PrintParams(std::ostream& os,
os << "(" << value() << ")"; os << "(" << value() << ")";
} }
void EagerDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void EagerDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
EmitEagerDeoptIf(always, code_gen_state, this);
}
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state, void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
DefineAsRegister(vreg_state, this); DefineAsRegister(vreg_state, this);
...@@ -890,6 +883,13 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -890,6 +883,13 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
__ Ret(); __ Ret();
} }
void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
EmitEagerDeoptIf(always, code_gen_state, this);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state, void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {} const ProcessingState& state) {}
void Jump::GenerateCode(MaglevCodeGenState* code_gen_state, void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
......
...@@ -85,7 +85,6 @@ class CompactInterpreterFrameState; ...@@ -85,7 +85,6 @@ class CompactInterpreterFrameState;
#define NODE_LIST(V) \ #define NODE_LIST(V) \
V(CheckMaps) \ V(CheckMaps) \
V(GapMove) \ V(GapMove) \
V(EagerDeopt) \
V(StoreField) \ V(StoreField) \
VALUE_NODE_LIST(V) VALUE_NODE_LIST(V)
...@@ -99,6 +98,7 @@ class CompactInterpreterFrameState; ...@@ -99,6 +98,7 @@ class CompactInterpreterFrameState;
#define CONTROL_NODE_LIST(V) \ #define CONTROL_NODE_LIST(V) \
V(Return) \ V(Return) \
V(Deopt) \
CONDITIONAL_CONTROL_NODE_LIST(V) \ CONDITIONAL_CONTROL_NODE_LIST(V) \
UNCONDITIONAL_CONTROL_NODE_LIST(V) UNCONDITIONAL_CONTROL_NODE_LIST(V)
...@@ -1106,19 +1106,6 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> { ...@@ -1106,19 +1106,6 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
const RootIndex index_; const RootIndex index_;
}; };
class EagerDeopt : public FixedInputNodeT<0, EagerDeopt> {
using Base = FixedInputNodeT<0, EagerDeopt>;
public:
explicit EagerDeopt(uint32_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class CheckMaps : public FixedInputNodeT<1, CheckMaps> { class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
using Base = FixedInputNodeT<1, CheckMaps>; using Base = FixedInputNodeT<1, CheckMaps>;
...@@ -1437,6 +1424,7 @@ class ControlNode : public NodeBase { ...@@ -1437,6 +1424,7 @@ class ControlNode : public NodeBase {
} }
void set_next_post_dominating_hole(ControlNode* node) { void set_next_post_dominating_hole(ControlNode* node) {
DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() || DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() ||
node->Is<Deopt>() ||
node->Is<JumpLoop>()); node->Is<JumpLoop>());
next_post_dominating_hole_ = node; next_post_dominating_hole_ = node;
} }
...@@ -1574,6 +1562,19 @@ class Return : public ControlNode { ...@@ -1574,6 +1562,19 @@ class Return : public ControlNode {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
}; };
class Deopt : public ControlNode {
public:
explicit Deopt(uint32_t bitfield) : ControlNode(bitfield) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Deopt>);
}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> { class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> {
using Base = ConditionalControlNodeT<1, BranchIfTrue>; using Base = ConditionalControlNodeT<1, BranchIfTrue>;
......
...@@ -175,7 +175,8 @@ void StraightForwardRegisterAllocator::ComputePostDominatingHoles( ...@@ -175,7 +175,8 @@ void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
// If the first branch returns or jumps back, we've found highest // If the first branch returns or jumps back, we've found highest
// reachable control-node of the longest branch (the second control // reachable control-node of the longest branch (the second control
// node). // node).
if (first->Is<Return>() || first->Is<JumpLoop>()) { if (first->Is<Return>() || first->Is<Deopt>() ||
first->Is<JumpLoop>()) {
control->set_next_post_dominating_hole(second); control->set_next_post_dominating_hole(second);
break; break;
} }
...@@ -240,6 +241,9 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) { ...@@ -240,6 +241,9 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
} else if (control->Is<Return>()) { } else if (control->Is<Return>()) {
printing_visitor_->os() << " " << control->id() << "."; printing_visitor_->os() << " " << control->id() << ".";
break; break;
} else if (control->Is<Deopt>()) {
printing_visitor_->os() << " " << control->id() << "✖️";
break;
} else if (control->Is<JumpLoop>()) { } else if (control->Is<JumpLoop>()) {
printing_visitor_->os() << " " << control->id() << "↰"; printing_visitor_->os() << " " << control->id() << "↰";
break; break;
...@@ -496,6 +500,9 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, ...@@ -496,6 +500,9 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) { BasicBlock* block) {
for (Input& input : *node) AssignInput(input); for (Input& input : *node) AssignInput(input);
AssignTemporaries(node); AssignTemporaries(node);
if (node->properties().can_eager_deopt()) {
UpdateUse(*node->eager_deopt_info());
}
for (Input& input : *node) UpdateUse(&input); for (Input& input : *node) UpdateUse(&input);
if (node->properties().is_call()) SpillAndClearRegisters(); if (node->properties().is_call()) SpillAndClearRegisters();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment