Commit 2c2a6bb0 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Use SmallVector for storing cache state

This replaces another use of std::vector. Stack states (consisting of
locals plus operand stack) are also typically small, thus optimize for
those cases.
Using StackVector as part of CacheState requires the definition of move
constructors and copy constructors, plus a few other methods.

R=tebbi@chromium.org

Bug: v8:8423
Change-Id: I5d39c1ebc4d6d65e4849dd06c556114cd2cd36ff
Reviewed-on: https://chromium-review.googlesource.com/c/1380053
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58429}
parent c6ffff9d
......@@ -24,21 +24,66 @@ class SmallVector {
public:
SmallVector() = default;
SmallVector(const SmallVector& other) V8_NOEXCEPT { *this = other; }
SmallVector(SmallVector&& other) V8_NOEXCEPT { *this = std::move(other); }
~SmallVector() {
if (is_big()) free(begin_);
}
SmallVector& operator=(const SmallVector& other) V8_NOEXCEPT {
if (this == &other) return *this;
size_t other_size = other.size();
if (capacity() < other_size) {
// Create large-enough heap-allocated storage.
if (is_big()) free(begin_);
begin_ = reinterpret_cast<T*>(malloc(sizeof(T) * other_size));
end_of_storage_ = begin_ + other_size;
}
memcpy(begin_, other.begin_, sizeof(T) * other_size);
end_ = begin_ + other_size;
return *this;
}
SmallVector& operator=(SmallVector&& other) V8_NOEXCEPT {
if (this == &other) return *this;
if (other.is_big()) {
if (is_big()) free(begin_);
begin_ = other.begin_;
end_ = other.end_;
end_of_storage_ = other.end_of_storage_;
other.reset();
} else {
DCHECK_GE(capacity(), other.size()); // Sanity check.
size_t other_size = other.size();
memcpy(begin_, other.begin_, sizeof(T) * other_size);
end_ = begin_ + other_size;
}
return *this;
}
T* data() const { return begin_; }
T* begin() const { return begin_; }
T* end() const { return end_; }
size_t size() const { return end_ - begin_; }
bool empty() const { return end_ == begin_; }
size_t capacity() const { return end_of_storage_ - begin_; }
T& back() {
DCHECK_NE(0, size());
return end_[-1];
}
T& operator[](size_t index) {
DCHECK_GT(size(), index);
return begin_[index];
}
const T& operator[](size_t index) const {
DCHECK_GT(size(), index);
return begin_[index];
}
template <typename... Args>
void emplace_back(Args&&... args) {
if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
......@@ -46,13 +91,28 @@ class SmallVector {
++end_;
}
void pop(size_t count) {
void pop_back(size_t count = 1) {
DCHECK_GE(size(), count);
end_ -= count;
}
void resize_no_init(size_t new_size) {
// Resizing without initialization is safe if T is trivially copyable.
ASSERT_TRIVIALLY_COPYABLE(T);
if (new_size > capacity()) Grow(new_size);
end_ = begin_ + new_size;
}
// Clear without freeing any storage.
void clear() { end_ = begin_; }
// Clear and go back to inline storage.
void reset() {
begin_ = inline_storage_begin();
end_ = begin_;
end_of_storage_ = begin_ + kInlineSize;
}
private:
T* begin_ = inline_storage_begin();
T* end_ = begin_;
......@@ -60,9 +120,10 @@ class SmallVector {
typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
inline_storage_;
void Grow() {
void Grow(size_t min_capacity = 0) {
size_t in_use = end_ - begin_;
size_t new_capacity = base::bits::RoundUpToPowerOfTwo(2 * in_use);
size_t new_capacity =
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
T* new_storage = reinterpret_cast<T*>(malloc(sizeof(T) * new_capacity));
memcpy(new_storage, begin_, sizeof(T) * in_use);
if (is_big()) free(begin_);
......@@ -77,8 +138,6 @@ class SmallVector {
const T* inline_storage_begin() const {
return reinterpret_cast<const T*>(&inline_storage_);
}
DISALLOW_COPY_AND_ASSIGN(SmallVector);
};
} // namespace base
......
......@@ -128,7 +128,7 @@ class StackTransferRecipe {
++next_spill_slot;
executed_moves = 1;
}
register_moves_.pop(executed_moves);
register_moves_.pop_back(executed_moves);
}
}
......@@ -276,7 +276,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
uint32_t stack_base = stack_depth + num_locals;
DCHECK(stack_state.empty());
DCHECK_GE(source.stack_height(), stack_base);
stack_state.resize(stack_base + arity, VarState(kWasmStmt));
stack_state.resize_no_init(stack_base + arity);
// |------locals------|---(in between)----|--(discarded)--|----merge----|
// <-- num_locals --> <-- stack_depth -->^stack_base <-- arity -->
......@@ -565,8 +565,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
stack_transfers.Execute();
// Pop parameters from the value stack.
auto stack_end = cache_state_.stack_state.end();
cache_state_.stack_state.erase(stack_end - num_params, stack_end);
cache_state_.stack_state.pop_back(num_params);
// Reset register use counters.
cache_state_.reset_used_registers();
......
......@@ -117,8 +117,7 @@ class LiftoffAssembler : public TurboAssembler {
CacheState(CacheState&&) = default;
CacheState& operator=(CacheState&&) = default;
// TODO(clemensh): Improve memory management here; avoid std::vector.
std::vector<VarState> stack_state;
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment