Commit 96d869fd authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Remove obsolete bailout in memory store

This seems to be a merge error. The if moved down by a few lines and
now actually implements the bounds check instead of bailing out.
Taking it out revealed a bug where we were trying to access the lowest
8 bits on a register where this is not allowed on ia32, thus a few
more changes were needed in this CL.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Ib1ef131a12df050302ae50115493a1fcd8323fe5
Reviewed-on: https://chromium-review.googlesource.com/852734Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50422}
parent f13540e9
...@@ -27,6 +27,12 @@ inline Operand GetStackSlot(uint32_t index) { ...@@ -27,6 +27,12 @@ inline Operand GetStackSlot(uint32_t index) {
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr. // TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(ebp, -16); } inline Operand GetContextOperand() { return Operand(ebp, -16); }
static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs");
static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
"kByteRegs only contains gp cache registers");
} // namespace liftoff } // namespace liftoff
static constexpr DoubleRegister kScratchDoubleReg = xmm7; static constexpr DoubleRegister kScratchDoubleReg = xmm7;
...@@ -110,7 +116,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -110,7 +116,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
if (offset_imm > kMaxInt) { if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register // The immediate can not be encoded in the operand. Load it to a register
// first. // first.
Register dst = GetUnusedRegister(kGpReg, pinned).gp(); Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
mov(dst, Immediate(offset_imm)); mov(dst, Immediate(offset_imm));
if (offset_reg != no_reg) { if (offset_reg != no_reg) {
emit_ptrsize_add(dst, dst, offset_reg); emit_ptrsize_add(dst, dst, offset_reg);
...@@ -119,7 +125,14 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -119,7 +125,14 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
} }
switch (type.value()) { switch (type.value()) {
case StoreType::kI32Store8: case StoreType::kI32Store8:
mov_b(dst_op, src.gp()); // Only the lower 4 registers can be addressed as 8-bit registers.
if (src.gp().is_byte_register()) {
mov_b(dst_op, src.gp());
} else {
Register byte_src = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
mov(byte_src, src.gp());
mov_b(dst_op, byte_src);
}
break; break;
case StoreType::kI32Store16: case StoreType::kI32Store16:
mov_w(dst_op, src.gp()); mov_w(dst_op, src.gp());
......
...@@ -244,8 +244,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source, ...@@ -244,8 +244,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
dst = VarState(src.type()); dst = VarState(src.type());
} }
} }
last_spilled_gp_reg = source.last_spilled_gp_reg; last_spilled_regs = source.last_spilled_regs;
last_spilled_fp_reg = source.last_spilled_fp_reg;
} }
void LiftoffAssembler::CacheState::Steal(CacheState& source) { void LiftoffAssembler::CacheState::Steal(CacheState& source) {
...@@ -359,10 +358,10 @@ void LiftoffAssembler::SpillLocals() { ...@@ -359,10 +358,10 @@ void LiftoffAssembler::SpillLocals() {
} }
} }
LiftoffRegister LiftoffAssembler::SpillOneRegister(RegClass rc, LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) { LiftoffRegList pinned) {
// Spill one cached value to free a register. // Spill one cached value to free a register.
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(rc, pinned); LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
int remaining_uses = cache_state_.get_use_count(spill_reg); int remaining_uses = cache_state_.get_use_count(spill_reg);
DCHECK_LT(0, remaining_uses); DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) { for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
......
...@@ -112,26 +112,32 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -112,26 +112,32 @@ class LiftoffAssembler : public TurboAssembler {
std::vector<VarState> stack_state; std::vector<VarState> stack_state;
LiftoffRegList used_registers; LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0}; uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegister last_spilled_gp_reg = kGpCacheRegList.GetFirstRegSet(); LiftoffRegList last_spilled_regs;
LiftoffRegister last_spilled_fp_reg = kFpCacheRegList.GetFirstRegSet();
// TODO(clemensh): Remove stack_base; use ControlBase::stack_depth. // TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
uint32_t stack_base = 0; uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
LiftoffRegList pinned_scope = {}) const {
DCHECK(rc == kGpReg || rc == kFpReg); DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList cache_regs = GetCacheRegList(rc); LiftoffRegList candidates = GetCacheRegList(rc);
LiftoffRegList available_regs = return has_unused_register(candidates, pinned);
cache_regs & ~used_registers & ~pinned_scope; }
bool has_unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
return !available_regs.is_empty(); return !available_regs.is_empty();
} }
LiftoffRegister unused_register(RegClass rc, LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned_scope = {}) const { LiftoffRegList pinned = {}) const {
DCHECK(rc == kGpReg || rc == kFpReg); DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList cache_regs = GetCacheRegList(rc); LiftoffRegList candidates = GetCacheRegList(rc);
LiftoffRegList available_regs = return unused_register(candidates);
cache_regs & ~used_registers & ~pinned_scope; }
LiftoffRegister unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
return available_regs.GetFirstRegSet(); return available_regs.GetFirstRegSet();
} }
...@@ -169,17 +175,19 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -169,17 +175,19 @@ class LiftoffAssembler : public TurboAssembler {
bool is_free(LiftoffRegister reg) const { return !is_used(reg); } bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
LiftoffRegister GetNextSpillReg(RegClass rc, LiftoffRegList pinned = {}) { LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
LiftoffRegister* last_spilled_p = LiftoffRegList pinned = {}) {
rc == kGpReg ? &last_spilled_gp_reg : &last_spilled_fp_reg; LiftoffRegList unpinned = candidates.MaskOut(pinned);
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList unpinned = cache_regs & ~pinned;
DCHECK(!unpinned.is_empty()); DCHECK(!unpinned.is_empty());
LiftoffRegList remaining_regs = // This method should only be called if none of the candidates is free.
unpinned.MaskOut((1u << (last_spilled_p->liftoff_code() + 1)) - 1); DCHECK(unpinned.MaskOut(used_registers).is_empty());
if (remaining_regs.is_empty()) remaining_regs = unpinned; LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
LiftoffRegister reg = remaining_regs.GetFirstRegSet(); if (unspilled.is_empty()) {
*last_spilled_p = reg; unspilled = unpinned;
last_spilled_regs = {};
}
LiftoffRegister reg = unspilled.GetFirstRegSet();
last_spilled_regs.set(reg);
return reg; return reg;
} }
...@@ -221,10 +229,17 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -221,10 +229,17 @@ class LiftoffAssembler : public TurboAssembler {
} }
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) { LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(rc, pinned)) { DCHECK(rc == kGpReg || rc == kFpReg);
return cache_state_.unused_register(rc, pinned); LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
}
LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(candidates, pinned)) {
return cache_state_.unused_register(candidates, pinned);
} }
return SpillOneRegister(rc, pinned); return SpillOneRegister(candidates, pinned);
} }
void DropStackSlot(VarState* slot) { void DropStackSlot(VarState* slot) {
...@@ -336,7 +351,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -336,7 +351,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger"); "Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_; CacheState cache_state_;
LiftoffRegister SpillOneRegister(RegClass rc, LiftoffRegList pinned); LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
}; };
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState); std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
......
...@@ -684,9 +684,6 @@ class LiftoffCompiler { ...@@ -684,9 +684,6 @@ class LiftoffCompiler {
const Value& index_val, const Value& value_val) { const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type(); ValueType value_type = type.value_type();
if (value_type != kWasmI32) return unsupported(decoder, "non-i32 store"); if (value_type != kWasmI32) return unsupported(decoder, "non-i32 store");
if (!env_->use_trap_handler) {
return unsupported(decoder, "non-traphandler");
}
RegClass rc = reg_class_for(value_type); RegClass rc = reg_class_for(value_type);
LiftoffRegList pinned; LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc)); LiftoffRegister value = pinned.set(__ PopToRegister(rc));
...@@ -699,7 +696,6 @@ class LiftoffCompiler { ...@@ -699,7 +696,6 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize); __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
__ Store(addr, index, operand.offset, value, type, pinned); __ Store(addr, index, operand.offset, value, type, pinned);
__ PushRegister(value_type, value);
} }
void CurrentMemoryPages(Decoder* decoder, Value* result) { void CurrentMemoryPages(Decoder* decoder, Value* result) {
......
...@@ -146,16 +146,25 @@ class LiftoffRegList { ...@@ -146,16 +146,25 @@ class LiftoffRegList {
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0; return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
} }
bool is_empty() const { return regs_ == 0; } constexpr bool is_empty() const { return regs_ == 0; }
unsigned GetNumRegsSet() const { return base::bits::CountPopulation(regs_); } constexpr unsigned GetNumRegsSet() const {
return base::bits::CountPopulation(regs_);
}
constexpr LiftoffRegList operator&(const LiftoffRegList other) const {
return LiftoffRegList(regs_ & other.regs_);
}
LiftoffRegList operator&(LiftoffRegList other) const { constexpr LiftoffRegList operator~() const {
return FromBits(regs_ & other.regs_); return LiftoffRegList(~regs_ & (kGpMask | kFpMask));
} }
LiftoffRegList operator~() const { constexpr bool operator==(const LiftoffRegList other) const {
return FromBits(~regs_ & (kGpMask | kFpMask)); return regs_ == other.regs_;
}
constexpr bool operator!=(const LiftoffRegList other) const {
return regs_ != other.regs_;
} }
LiftoffRegister GetFirstRegSet() const { LiftoffRegister GetFirstRegSet() const {
...@@ -171,10 +180,10 @@ class LiftoffRegList { ...@@ -171,10 +180,10 @@ class LiftoffRegList {
return LiftoffRegister::from_liftoff_code(last_code); return LiftoffRegister::from_liftoff_code(last_code);
} }
LiftoffRegList MaskOut(storage_t mask) const { LiftoffRegList MaskOut(const LiftoffRegList mask) const {
// Masking out is guaranteed to return a correct reg list, hence no checks // Masking out is guaranteed to return a correct reg list, hence no checks
// needed. // needed.
return FromBits(regs_ & ~mask); return FromBits(regs_ & ~mask.regs_);
} }
static LiftoffRegList FromBits(storage_t bits) { static LiftoffRegList FromBits(storage_t bits) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment