Commit 6e45bf89 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[liftoff] Do not clear the cached instance on loops

Loops will always execute a stack check in the header, so having the
instance cached in a register is handy. Instead of clearing it before
entering a loop, ensure that backward jumps to the loop header move the
instance into the right register.

R=thibaudm@chromium.org

Bug: v8:11336
Change-Id: I16cb13457438b7a1603182d56a3d2ea99d670911
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2743892Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73317}
parent 3e421b11
......@@ -696,15 +696,15 @@ void LiftoffAssembler::MergeFullStackWith(CacheState& target,
transfers.TransferStackSlot(target.stack_state[i], source.stack_state[i]);
}
// Full stack merging is only done for forward jumps, so we can just clear the
// instance cache register at the target in case of mismatch.
if (source.cached_instance != target.cached_instance) {
// Backward jumps (to loop headers) do not have a cached instance anyway, so
// ignore this. On forward jumps, jump reset the cached instance in the
// target state.
target.ClearCachedInstanceRegister();
}
}
void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
JumpDirection jump_direction) {
// Before: ----------------|----- (discarded) ----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
......@@ -726,11 +726,21 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
cache_state_.stack_state[stack_base + i]);
}
if (cache_state_.cached_instance != target.cached_instance) {
// Backward jumps (to loop headers) do not have a cached instance anyway, so
// ignore this. On forward jumps, jump reset the cached instance in the
// target state.
if (cache_state_.cached_instance != target.cached_instance &&
target.cached_instance != no_reg) {
if (jump_direction == kForwardJump) {
// On forward jumps, just reset the cached instance in the target state.
target.ClearCachedInstanceRegister();
} else {
// On backward jumps, we already generated code assuming that the instance
// is available in that register. Thus move it there.
if (cache_state_.cached_instance == no_reg) {
LoadInstanceFromFrame(target.cached_instance);
} else {
Move(target.cached_instance, cache_state_.cached_instance,
kPointerKind);
}
}
}
}
......
......@@ -519,8 +519,9 @@ class LiftoffAssembler : public TurboAssembler {
void MaterializeMergedConstants(uint32_t arity);
enum JumpDirection { kForwardJump, kBackwardJump };
void MergeFullStackWith(CacheState& target, const CacheState& source);
void MergeStackWith(CacheState& target, uint32_t arity);
void MergeStackWith(CacheState& target, uint32_t arity, JumpDirection);
void Spill(VarState* slot);
void SpillLocals();
......
......@@ -1042,8 +1042,6 @@ class LiftoffCompiler {
// TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
// Same for the cached instance register.
__ cache_state()->ClearCachedInstanceRegister();
__ PrepareLoopArgs(loop->start_merge.arity);
......@@ -1186,7 +1184,8 @@ class LiftoffCompiler {
if (!c->end_merge.reached) {
__ DropValue(c->stack_depth + c->num_exceptions);
} else {
__ MergeStackWith(c->label_state, c->br_merge()->arity);
__ MergeStackWith(c->label_state, c->br_merge()->arity,
LiftoffAssembler::kForwardJump);
__ cache_state()->Steal(c->label_state);
}
}
......@@ -2287,7 +2286,9 @@ class LiftoffCompiler {
*__ cache_state(), __ num_locals(), target->br_merge()->arity,
target->stack_depth + target->num_exceptions);
}
__ MergeStackWith(target->label_state, target->br_merge()->arity);
__ MergeStackWith(target->label_state, target->br_merge()->arity,
target->is_loop() ? LiftoffAssembler::kBackwardJump
: LiftoffAssembler::kForwardJump);
__ jmp(target->label.get());
}
......@@ -3758,7 +3759,8 @@ class LiftoffCompiler {
current_try->stack_depth + current_try->num_exceptions);
current_try->try_info->catch_reached = true;
}
__ MergeStackWith(current_try->try_info->catch_state, 1);
__ MergeStackWith(current_try->try_info->catch_state, 1,
LiftoffAssembler::kForwardJump);
__ emit_jump(&current_try->try_info->catch_label);
__ bind(&skip_handler);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment