Commit 96d869fd authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Remove obsolete bailout in memory store

This seems to be a merge error. The if moved down by a few lines and
now actually implements the bounds check instead of bailing out.
Taking it out revealed a bug where we were trying to access the lowest
8 bits on a register where this is not allowed on ia32, thus a few
more changes were needed in this CL.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Ib1ef131a12df050302ae50115493a1fcd8323fe5
Reviewed-on: https://chromium-review.googlesource.com/852734Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50422}
parent f13540e9
......@@ -27,6 +27,12 @@ inline Operand GetStackSlot(uint32_t index) {
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(ebp, -16); }
static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs");
static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
"kByteRegs only contains gp cache registers");
} // namespace liftoff
static constexpr DoubleRegister kScratchDoubleReg = xmm7;
......@@ -110,7 +116,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register dst = GetUnusedRegister(kGpReg, pinned).gp();
Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
mov(dst, Immediate(offset_imm));
if (offset_reg != no_reg) {
emit_ptrsize_add(dst, dst, offset_reg);
......@@ -119,7 +125,14 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
switch (type.value()) {
case StoreType::kI32Store8:
mov_b(dst_op, src.gp());
// Only the lower 4 registers can be addressed as 8-bit registers.
if (src.gp().is_byte_register()) {
mov_b(dst_op, src.gp());
} else {
Register byte_src = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
mov(byte_src, src.gp());
mov_b(dst_op, byte_src);
}
break;
case StoreType::kI32Store16:
mov_w(dst_op, src.gp());
......
......@@ -244,8 +244,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
dst = VarState(src.type());
}
}
last_spilled_gp_reg = source.last_spilled_gp_reg;
last_spilled_fp_reg = source.last_spilled_fp_reg;
last_spilled_regs = source.last_spilled_regs;
}
void LiftoffAssembler::CacheState::Steal(CacheState& source) {
......@@ -359,10 +358,10 @@ void LiftoffAssembler::SpillLocals() {
}
}
LiftoffRegister LiftoffAssembler::SpillOneRegister(RegClass rc,
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(rc, pinned);
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
int remaining_uses = cache_state_.get_use_count(spill_reg);
DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
......
......@@ -112,26 +112,32 @@ class LiftoffAssembler : public TurboAssembler {
std::vector<VarState> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegister last_spilled_gp_reg = kGpCacheRegList.GetFirstRegSet();
LiftoffRegister last_spilled_fp_reg = kFpCacheRegList.GetFirstRegSet();
LiftoffRegList last_spilled_regs;
// TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
uint32_t stack_base = 0;
bool has_unused_register(RegClass rc,
LiftoffRegList pinned_scope = {}) const {
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList available_regs =
cache_regs & ~used_registers & ~pinned_scope;
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates, pinned);
}
bool has_unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
return !available_regs.is_empty();
}
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned_scope = {}) const {
LiftoffRegList pinned = {}) const {
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList available_regs =
cache_regs & ~used_registers & ~pinned_scope;
LiftoffRegList candidates = GetCacheRegList(rc);
return unused_register(candidates);
}
LiftoffRegister unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
return available_regs.GetFirstRegSet();
}
......@@ -169,17 +175,19 @@ class LiftoffAssembler : public TurboAssembler {
bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
LiftoffRegister GetNextSpillReg(RegClass rc, LiftoffRegList pinned = {}) {
LiftoffRegister* last_spilled_p =
rc == kGpReg ? &last_spilled_gp_reg : &last_spilled_fp_reg;
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList unpinned = cache_regs & ~pinned;
LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
LiftoffRegList pinned = {}) {
LiftoffRegList unpinned = candidates.MaskOut(pinned);
DCHECK(!unpinned.is_empty());
LiftoffRegList remaining_regs =
unpinned.MaskOut((1u << (last_spilled_p->liftoff_code() + 1)) - 1);
if (remaining_regs.is_empty()) remaining_regs = unpinned;
LiftoffRegister reg = remaining_regs.GetFirstRegSet();
*last_spilled_p = reg;
// This method should only be called if none of the candidates is free.
DCHECK(unpinned.MaskOut(used_registers).is_empty());
LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
if (unspilled.is_empty()) {
unspilled = unpinned;
last_spilled_regs = {};
}
LiftoffRegister reg = unspilled.GetFirstRegSet();
last_spilled_regs.set(reg);
return reg;
}
......@@ -221,10 +229,17 @@ class LiftoffAssembler : public TurboAssembler {
}
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(rc, pinned)) {
return cache_state_.unused_register(rc, pinned);
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
}
LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(candidates, pinned)) {
return cache_state_.unused_register(candidates, pinned);
}
return SpillOneRegister(rc, pinned);
return SpillOneRegister(candidates, pinned);
}
void DropStackSlot(VarState* slot) {
......@@ -336,7 +351,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
LiftoffRegister SpillOneRegister(RegClass rc, LiftoffRegList pinned);
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
......
......@@ -684,9 +684,6 @@ class LiftoffCompiler {
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
if (value_type != kWasmI32) return unsupported(decoder, "non-i32 store");
if (!env_->use_trap_handler) {
return unsupported(decoder, "non-traphandler");
}
RegClass rc = reg_class_for(value_type);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc));
......@@ -699,7 +696,6 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
__ Store(addr, index, operand.offset, value, type, pinned);
__ PushRegister(value_type, value);
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
......
......@@ -146,16 +146,25 @@ class LiftoffRegList {
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
bool is_empty() const { return regs_ == 0; }
constexpr bool is_empty() const { return regs_ == 0; }
unsigned GetNumRegsSet() const { return base::bits::CountPopulation(regs_); }
constexpr unsigned GetNumRegsSet() const {
return base::bits::CountPopulation(regs_);
}
constexpr LiftoffRegList operator&(const LiftoffRegList other) const {
return LiftoffRegList(regs_ & other.regs_);
}
LiftoffRegList operator&(LiftoffRegList other) const {
return FromBits(regs_ & other.regs_);
constexpr LiftoffRegList operator~() const {
return LiftoffRegList(~regs_ & (kGpMask | kFpMask));
}
LiftoffRegList operator~() const {
return FromBits(~regs_ & (kGpMask | kFpMask));
constexpr bool operator==(const LiftoffRegList other) const {
return regs_ == other.regs_;
}
constexpr bool operator!=(const LiftoffRegList other) const {
return regs_ != other.regs_;
}
LiftoffRegister GetFirstRegSet() const {
......@@ -171,10 +180,10 @@ class LiftoffRegList {
return LiftoffRegister::from_liftoff_code(last_code);
}
LiftoffRegList MaskOut(storage_t mask) const {
LiftoffRegList MaskOut(const LiftoffRegList mask) const {
// Masking out is guaranteed to return a correct reg list, hence no checks
// needed.
return FromBits(regs_ & ~mask);
return FromBits(regs_ & ~mask.regs_);
}
static LiftoffRegList FromBits(storage_t bits) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment