Commit 6b2dc157 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[wasm] Assume positive stack offsets

The stack offsets of cache slots are always positive, but the compiler
does not know that. The lack of this knowledge makes division by the
system pointer size significantly more expensive.

One solution would be to rewrite the division to be an actual right
shift. Another solution is to teach the compiler that offsets are
positive. This CL does the latter.

This reduces the overall Liftoff compile time of the reproducer in the
linked issue by nearly 25%.

R=jkummerow@chromium.org, cbruni@chromium.org

Bug: v8:13063
Change-Id: Ib55b35d407e9909c792ae095a6767aaa03faebdc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3760453Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81778}
parent 2d1c3ed6
......@@ -344,6 +344,7 @@ path. Add it with -I<path> to the command line
# define V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS \
(V8_HAS_CPP_ATTRIBUTE(no_unique_address))
# define V8_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume))
# define V8_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
# define V8_HAS_BUILTIN_BSWAP16 (__has_builtin(__builtin_bswap16))
# define V8_HAS_BUILTIN_BSWAP32 (__has_builtin(__builtin_bswap32))
......@@ -425,6 +426,15 @@ path. Add it with -I<path> to the command line
# define V8_INLINE inline
#endif
#ifdef DEBUG
// In debug mode, check assumptions instead of actually adding annotations.
# define V8_ASSUME(condition) DCHECK(condition)
#elif V8_HAS_BUILTIN_ASSUME
# define V8_ASSUME(condition) __builtin_assume(condition)
#else
# define V8_ASSUME(condition)
#endif
#if V8_HAS_BUILTIN_ASSUME_ALIGNED
# define V8_ASSUME_ALIGNED(ptr, alignment) \
__builtin_assume_aligned((ptr), (alignment))
......
......@@ -127,10 +127,13 @@ class LiftoffAssembler : public TurboAssembler {
enum Location : uint8_t { kStack, kRegister, kIntConst };
explicit VarState(ValueKind kind, int offset)
: loc_(kStack), kind_(kind), spill_offset_(offset) {}
: loc_(kStack), kind_(kind), spill_offset_(offset) {
DCHECK_LE(0, offset);
}
explicit VarState(ValueKind kind, LiftoffRegister r, int offset)
: loc_(kRegister), kind_(kind), reg_(r), spill_offset_(offset) {
DCHECK_EQ(r.reg_class(), reg_class_for(kind));
DCHECK_LE(0, offset);
}
explicit VarState(ValueKind kind, int32_t i32_const, int offset)
: loc_(kIntConst),
......@@ -138,6 +141,7 @@ class LiftoffAssembler : public TurboAssembler {
i32_const_(i32_const),
spill_offset_(offset) {
DCHECK(kind_ == kI32 || kind_ == kI64);
DCHECK_LE(0, offset);
}
bool is_stack() const { return loc_ == kStack; }
......@@ -161,8 +165,14 @@ class LiftoffAssembler : public TurboAssembler {
: WasmValue(int64_t{i32_const_});
}
int offset() const { return spill_offset_; }
void set_offset(int offset) { spill_offset_ = offset; }
int offset() const {
V8_ASSUME(spill_offset_ >= 0);
return spill_offset_;
}
void set_offset(int offset) {
DCHECK_LE(0, spill_offset_);
spill_offset_ = offset;
}
Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment