Commit d520e5f5 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Introduce SmallVector and use in some places

This CL introduces our own minimal SmallVector implementation and uses
it in several places (more might follow).
I measured that in the majority of cases, these vectors are quite small
(<= 8 elements), so we will avoid any heap allocation in those cases.

R=mstarzinger@chromium.org
CC=titzer@chromium.org

Bug: v8:8423
Change-Id: I93a26b3303a10fe1dc93186430e20333ea4970a8
Reviewed-on: https://chromium-review.googlesource.com/c/1378178
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58323}
parent 435af6a5
......@@ -3236,6 +3236,7 @@ v8_component("v8_libbase") {
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
"src/base/small-vector.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/template-utils.h",
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_SMALL_VECTOR_H_
#define V8_BASE_SMALL_VECTOR_H_
#include <type_traits>
#include "src/base/bits.h"
#include "src/base/macros.h"
namespace v8 {
namespace base {
// Minimal SmallVector implementation. Uses inline storage first, switches to
// malloc when it overflows.
template <typename T, size_t kInlineSize>
class SmallVector {
// Currently only support trivially copyable and trivially destructible data
// types, as it uses memcpy to copy elements and never calls destructors.
ASSERT_TRIVIALLY_COPYABLE(T);
STATIC_ASSERT(std::is_trivially_destructible<T>::value);
public:
SmallVector() = default;
~SmallVector() {
if (is_big()) free(begin_);
}
T* data() const { return begin_; }
T* begin() const { return begin_; }
T* end() const { return end_; }
size_t size() const { return end_ - begin_; }
bool empty() const { return end_ == begin_; }
T& back() {
DCHECK_NE(0, size());
return end_[-1];
}
template <typename... Args>
void emplace_back(Args&&... args) {
if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
new (end_) T(std::forward<Args>(args)...);
++end_;
}
void pop(size_t count) {
DCHECK_GE(size(), count);
end_ -= count;
}
void clear() { end_ = begin_; }
private:
T* begin_ = inline_storage_begin();
T* end_ = begin_;
T* end_of_storage_ = begin_ + kInlineSize;
typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
inline_storage_;
void Grow() {
size_t in_use = end_ - begin_;
size_t new_capacity = base::bits::RoundUpToPowerOfTwo(2 * in_use);
T* new_storage = reinterpret_cast<T*>(malloc(sizeof(T) * new_capacity));
memcpy(new_storage, begin_, sizeof(T) * in_use);
if (is_big()) free(begin_);
begin_ = new_storage;
end_ = new_storage + in_use;
end_of_storage_ = new_storage + new_capacity;
}
bool is_big() const { return begin_ != inline_storage_begin(); }
T* inline_storage_begin() { return reinterpret_cast<T*>(&inline_storage_); }
const T* inline_storage_begin() const {
return reinterpret_cast<const T*>(&inline_storage_);
}
DISALLOW_COPY_AND_ASSIGN(SmallVector);
};
} // namespace base
} // namespace v8
#endif // V8_BASE_SMALL_VECTOR_H_
......@@ -846,7 +846,9 @@ inline void Emit64BitShiftOperation(
// Temporary registers cannot overlap with {dst}.
pinned.set(dst);
std::vector<LiftoffAssembler::ParallelRegisterMoveTuple> reg_moves;
constexpr size_t kMaxRegMoves = 3;
base::SmallVector<LiftoffAssembler::ParallelRegisterMoveTuple, kMaxRegMoves>
reg_moves;
// If {dst} contains {ecx}, replace it by an unused register, which is then
// moved to {ecx} in the end.
......@@ -866,7 +868,7 @@ inline void Emit64BitShiftOperation(
reg_moves.emplace_back(dst, src, kWasmI64);
reg_moves.emplace_back(ecx, amount, kWasmI32);
assm->ParallelRegisterMove({reg_moves.data(), reg_moves.size()});
assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift.
(assm->*emit_shift)(dst.high_gp(), dst.low_gp());
......
......@@ -128,8 +128,7 @@ class StackTransferRecipe {
++next_spill_slot;
executed_moves = 1;
}
register_moves_.erase(register_moves_.end() - executed_moves,
register_moves_.end());
register_moves_.pop(executed_moves);
}
}
......@@ -244,25 +243,27 @@ class StackTransferRecipe {
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
register_loads_.push_back(RegisterLoad::Const(dst, value));
register_loads_.emplace_back(RegisterLoad::Const(dst, value));
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
ValueType type) {
register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
register_loads_.emplace_back(RegisterLoad::Stack(dst, stack_index, type));
}
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
register_loads_.emplace_back(
RegisterLoad::HalfStack(dst, half_stack_index));
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves_;
std::vector<RegisterLoad> register_loads_;
base::SmallVector<RegisterMove, 8> register_moves_;
base::SmallVector<RegisterLoad, 8> register_loads_;
LiftoffRegList move_dst_regs_;
LiftoffRegList move_src_regs_;
LiftoffAssembler* const asm_;
DISALLOW_COPY_AND_ASSIGN(StackTransferRecipe);
};
} // namespace
......
......@@ -9,6 +9,7 @@
#include <memory>
#include "src/base/bits.h"
#include "src/base/small-vector.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
......@@ -725,8 +726,10 @@ class LiftoffStackSlots {
RegPairHalf half_;
};
std::vector<Slot> slots_;
base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
};
} // namespace wasm
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment