Commit 44c5262c authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[liftoff] Removes more uses of index

Convert more uses of index into offsets. We record spill in terms of
offsets (bytes) rather than slot index, so the name of the method can be
changed, and in GetTotalFrameSlotCount we calculate the number of slots
used in terms of number of bytes spilled.

Bug: v8:9909
Change-Id: I26484c1b040cd4711cc7998cb29d68955bf8ddb6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1934528Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65284}
parent 17613fab
......@@ -562,7 +562,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -584,7 +584,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
Register src = no_reg;
......@@ -644,7 +644,7 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillOffset(GetStackOffsetFromIndex(last_stack_slot));
// We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards.
......
......@@ -355,13 +355,13 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst);
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg();
......@@ -402,7 +402,7 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillOffset(GetStackOffsetFromIndex(last_stack_slot));
int max_stp_offset =
-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index + count - 1));
......
......@@ -436,7 +436,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -458,7 +458,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32:
......@@ -507,7 +507,7 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillOffset(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 2) {
// Special straight-line code for up to two slots (6-9 bytes per word:
......
......@@ -79,12 +79,7 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
uint32_t dst_index,
const LiftoffAssembler::CacheState& src_state,
uint32_t src_index) {
const VarState& dst = dst_state.stack_state[dst_index];
const VarState& src = src_state.stack_state[src_index];
void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK_EQ(dst.type(), src.type());
switch (dst.loc()) {
case VarState::kStack:
......@@ -524,7 +519,7 @@ void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
// allocations.
StackTransferRecipe transfers(this);
for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
transfers.TransferStackSlot(target, i, source, i);
transfers.TransferStackSlot(target.stack_state[i], source.stack_state[i]);
}
}
......@@ -543,33 +538,33 @@ void LiftoffAssembler::MergeStackWith(const CacheState& target,
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
for (uint32_t i = 0; i < target_stack_base; ++i) {
transfers.TransferStackSlot(target, i, cache_state_, i);
transfers.TransferStackSlot(target.stack_state[i],
cache_state_.stack_state[i]);
}
for (uint32_t i = 0; i < arity; ++i) {
transfers.TransferStackSlot(target, target_stack_base + i, cache_state_,
stack_base + i);
transfers.TransferStackSlot(target.stack_state[target_stack_base + i],
cache_state_.stack_state[stack_base + i]);
}
}
void LiftoffAssembler::Spill(uint32_t index) {
auto& slot = cache_state_.stack_state[index];
switch (slot.loc()) {
void LiftoffAssembler::Spill(VarState* slot) {
switch (slot->loc()) {
case VarState::kStack:
return;
case VarState::kRegister:
Spill(slot.offset(), slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
Spill(slot->offset(), slot->reg(), slot->type());
cache_state_.dec_used(slot->reg());
break;
case VarState::kIntConst:
Spill(slot.offset(), slot.constant());
Spill(slot->offset(), slot->constant());
break;
}
slot.MakeStack();
slot->MakeStack();
}
void LiftoffAssembler::SpillLocals() {
for (uint32_t i = 0; i < num_locals_; ++i) {
Spill(i);
Spill(&cache_state_.stack_state[i]);
}
}
......
......@@ -353,16 +353,14 @@ class LiftoffAssembler : public TurboAssembler {
void MergeFullStackWith(const CacheState& target, const CacheState& source);
void MergeStackWith(const CacheState& target, uint32_t arity);
void Spill(uint32_t index);
void Spill(VarState* slot);
void SpillLocals();
void SpillAllRegisters();
// Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillSlot(uint32_t offset) {
// TODO(zhin): Temporary for migration from index to offset.
uint32_t index = offset / kStackSlotSize;
if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
void RecordUsedSpillOffset(uint32_t offset) {
if (offset >= num_used_spill_bytes_) num_used_spill_bytes_ = offset;
}
// Load parameters into the right registers / stack slots for the call.
......@@ -680,7 +678,9 @@ class LiftoffAssembler : public TurboAssembler {
void set_num_locals(uint32_t num_locals);
uint32_t GetTotalFrameSlotCount() const {
return num_locals_ + num_used_spill_slots_;
// TODO(zhin): Temporary for migration from index to offset.
return num_locals_ +
((num_used_spill_bytes_ + kStackSlotSize - 1) / kStackSlotSize);
}
ValueType local_type(uint32_t index) {
......@@ -721,7 +721,7 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0;
uint32_t num_used_spill_bytes_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
......
......@@ -540,7 +540,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -562,7 +562,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32: {
......@@ -620,7 +620,7 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillOffset(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one
......
......@@ -457,7 +457,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -478,7 +478,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32: {
......@@ -528,7 +528,7 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillOffset(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one
......
......@@ -150,7 +150,7 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillOffset(last_stack_slot);
// We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards.
......
......@@ -149,7 +149,7 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillOffset(last_stack_slot);
// We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards.
......
......@@ -374,7 +374,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -395,7 +395,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32:
......@@ -449,7 +449,7 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(
RecordUsedSpillOffset(
LiftoffAssembler::GetStackOffsetFromIndex(last_stack_slot));
if (count <= 3) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment