bits.h 12.7 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_BASE_BITS_H_
#define V8_BASE_BITS_H_

8
#include <stdint.h>
9
#include <type_traits>
10 11

#include "src/base/base-export.h"
12
#include "src/base/macros.h"
13 14 15
#if V8_CC_MSVC
#include <intrin.h>
#endif
bmeurer@chromium.org's avatar
bmeurer@chromium.org committed
16 17 18
#if V8_OS_WIN32
#include "src/base/win32-headers.h"
#endif
19 20 21 22 23

namespace v8 {
namespace base {
namespace bits {

24 25 26 27 28 29
// CountPopulation(value) returns the number of bits set in |value|.
template <typename T>
constexpr inline
    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
                            unsigned>::type
    CountPopulation(T value) {
30
  STATIC_ASSERT(sizeof(T) <= 8);
31
#if V8_HAS_BUILTIN_POPCOUNT
32 33
  return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value))
                        : __builtin_popcount(static_cast<uint32_t>(value));
34
#else
35 36
  // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry
  // S. Warren, Jr.), chapter 5-1.
37
  constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333,
38
                               0x0f0f0f0f0f0f0f0f};
39
  // Start with 64 buckets of 1 bits, holding values from [0,1].
40
  value = ((value >> 1) & mask[0]) + (value & mask[0]);
41
  // Having 32 buckets of 2 bits, holding values from [0,2] now.
42
  value = ((value >> 2) & mask[1]) + (value & mask[1]);
43 44 45 46 47 48 49 50 51
  // Having 16 buckets of 4 bits, holding values from [0,4] now.
  value = ((value >> 4) & mask[2]) + (value & mask[2]);
  // Having 8 buckets of 8 bits, holding values from [0,8] now.
  // From this point on, the buckets are bigger than the number of bits
  // required to hold the values, and the buckets are bigger the maximum
  // result, so there's no need to mask value anymore, since there's no
  // more risk of overflow between buckets.
  if (sizeof(T) > 1) value = (value >> (sizeof(T) > 1 ? 8 : 0)) + value;
  // Having 4 buckets of 16 bits, holding values from [0,16] now.
52
  if (sizeof(T) > 2) value = (value >> (sizeof(T) > 2 ? 16 : 0)) + value;
53
  // Having 2 buckets of 32 bits, holding values from [0,32] now.
54
  if (sizeof(T) > 4) value = (value >> (sizeof(T) > 4 ? 32 : 0)) + value;
55
  // Having 1 buckets of 64 bits, holding values from [0,64] now.
56
  return static_cast<unsigned>(value & 0xff);
57 58 59
#endif
}

60 61 62
// ReverseBits(value) returns |value| in reverse bit order.
template <typename T>
T ReverseBits(T value) {
63 64
  STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
                (sizeof(value) == 4) || (sizeof(value) == 8));
65 66 67 68 69 70 71 72
  T result = 0;
  for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
    result = (result << 1) | (value & 1);
    value >>= 1;
  }
  return result;
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
// CountLeadingZeros(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns
// {sizeof(T) * 8}.
template <typename T, unsigned bits = sizeof(T) * 8>
inline constexpr
    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
                            unsigned>::type
    CountLeadingZeros(T value) {
  static_assert(bits > 0, "invalid instantiation");
#if V8_HAS_BUILTIN_CLZ
  return value == 0
             ? bits
             : bits == 64
                   ? __builtin_clzll(static_cast<uint64_t>(value))
                   : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits);
88
#else
89 90 91 92 93 94 95 96
  // Binary search algorithm taken from "Hacker's Delight" (by Henry S. Warren,
  // Jr.), figures 5-11 and 5-12.
  if (bits == 1) return static_cast<unsigned>(value) ^ 1;
  T upper_half = value >> (bits / 2);
  T next_value = upper_half != 0 ? upper_half : value;
  unsigned add = upper_half != 0 ? 0 : bits / 2;
  constexpr unsigned next_bits = bits == 1 ? 1 : bits / 2;
  return CountLeadingZeros<T, next_bits>(next_value) + add;
97 98 99
#endif
}

100 101 102 103 104 105
inline constexpr unsigned CountLeadingZeros32(uint32_t value) {
  return CountLeadingZeros(value);
}
inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
  return CountLeadingZeros(value);
}
106

107
// CountTrailingZeros(value) returns the number of zero bits preceding the
108
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
109 110 111 112 113 114
// returns {sizeof(T) * 8}.
template <typename T, unsigned bits = sizeof(T) * 8>
inline constexpr
    typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
                            unsigned>::type
    CountTrailingZeros(T value) {
115
#if V8_HAS_BUILTIN_CTZ
116 117 118
  return value == 0 ? bits
                    : bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
                                 : __builtin_ctz(static_cast<uint32_t>(value));
119
#else
120 121 122 123 124 125
  // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
  // chapter 5-4. On x64, since is faster than counting in a loop and faster
  // than doing binary search.
  using U = typename std::make_unsigned<T>::type;
  U u = value;
  return CountPopulation(static_cast<U>(~u & (u - 1u)));
126 127 128
#endif
}

129 130 131 132 133 134
inline constexpr unsigned CountTrailingZeros32(uint32_t value) {
  return CountTrailingZeros(value);
}
inline constexpr unsigned CountTrailingZeros64(uint64_t value) {
  return CountTrailingZeros(value);
}
135

136
// Returns true iff |value| is a power of 2.
137
template <typename T,
138 139
          typename = typename std::enable_if<std::is_integral<T>::value ||
                                             std::is_enum<T>::value>::type>
140 141
constexpr inline bool IsPowerOfTwo(T value) {
  return value > 0 && (value & (value - 1)) == 0;
142 143
}

144 145 146 147
// Identical to {CountTrailingZeros}, but only works for powers of 2.
template <typename T,
          typename = typename std::enable_if<std::is_integral<T>::value>::type>
inline constexpr int WhichPowerOfTwo(T value) {
148
  CONSTEXPR_DCHECK(IsPowerOfTwo(value));
149 150 151 152 153 154 155 156 157 158 159 160 161 162
#if V8_HAS_BUILTIN_CTZ
  STATIC_ASSERT(sizeof(T) <= 8);
  return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
                        : __builtin_ctz(static_cast<uint32_t>(value));
#else
  // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
  // chapter 5-4. On x64, since is faster than counting in a loop and faster
  // than doing binary search.
  using U = typename std::make_unsigned<T>::type;
  U u = value;
  return CountPopulation(static_cast<U>(u - 1));
#endif
}

163 164 165
// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
// greater than or equal to |value|. If you pass in a |value| that is already a
// power of two, it is returned as is. |value| must be less than or equal to
166 167 168
// 0x80000000u. Uses computation based on leading zeros if we have compiler
// support for that. Falls back to the implementation from "Hacker's Delight" by
// Henry S. Warren, Jr., figure 3-3, page 48, where the function is called clp2.
169
V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
170 171
// Same for 64 bit integers. |value| must be <= 2^63
V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
172 173 174 175 176 177 178 179
// Same for size_t integers.
inline size_t RoundUpToPowerOfTwo(size_t value) {
  if (sizeof(size_t) == sizeof(uint64_t)) {
    return RoundUpToPowerOfTwo64(value);
  } else {
    return RoundUpToPowerOfTwo32(value);
  }
}
180 181 182 183 184 185 186 187 188 189 190 191

// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a
// power of two, it is returned as is.
inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
  if (value > 0x80000000u) return 0x80000000u;
  uint32_t result = RoundUpToPowerOfTwo32(value);
  if (result > value) result >>= 1;
  return result;
}


192
// Precondition: 0 <= shift < 32
193 194
inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) {
  return (value >> shift) | (value << ((32 - shift) & 31));
195 196
}

197
// Precondition: 0 <= shift < 32
198 199
inline constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
  return (value << shift) | (value >> ((32 - shift) & 31));
200
}
201

202
// Precondition: 0 <= shift < 64
203 204
inline constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift) {
  return (value >> shift) | (value << ((64 - shift) & 63));
205 206
}

207
// Precondition: 0 <= shift < 64
208 209
inline constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
  return (value << shift) | (value >> ((64 - shift) & 63));
210 211
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed summation resulted in an overflow.
inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
#if V8_HAS_BUILTIN_SADD_OVERFLOW
  return __builtin_sadd_overflow(lhs, rhs, val);
#else
  uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
  *val = bit_cast<int32_t>(res);
  return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
#endif
}


// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed subtraction resulted in an overflow.
inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
#if V8_HAS_BUILTIN_SSUB_OVERFLOW
  return __builtin_ssub_overflow(lhs, rhs, val);
#else
  uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
  *val = bit_cast<int32_t>(res);
  return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
#endif
}

239 240 241
// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow.
242
V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
243

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed summation resulted in an overflow.
inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
  uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
  *val = bit_cast<int64_t>(res);
  return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
}


// SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed subtraction resulted in an overflow.
inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
  uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
  *val = bit_cast<int64_t>(res);
  return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
}

263 264 265
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
266
V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
267 268 269 270

// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
// adds the accumulate value |acc|.
271 272
V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
                                             int32_t acc);
273 274 275 276

// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
// is minint and |rhs| is -1, it returns minint.
277
V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
278 279 280 281

// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
// is -1, it returns zero.
282
V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
283

284 285 286 287 288 289 290 291 292 293 294 295 296
// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the unsigned summation resulted in an overflow.
inline bool UnsignedAddOverflow32(uint32_t lhs, uint32_t rhs, uint32_t* val) {
#if V8_HAS_BUILTIN_SADD_OVERFLOW
  return __builtin_uadd_overflow(lhs, rhs, val);
#else
  *val = lhs + rhs;
  return *val < (lhs | rhs);
#endif
}


297 298 299 300 301 302 303 304 305 306 307 308 309
// UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to uint32. If |rhs| is zero, then zero is returned.
inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
  return rhs ? lhs / rhs : 0u;
}


// UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to uint32. If |rhs| is zero, then zero is returned.
inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
  return rhs ? lhs % rhs : 0u;
}

310 311 312

// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
// checks and returns the result.
313
V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
314

315
// SignedSaturatedSub64(lhs, rhs) subtracts |lhs| by |rhs|,
316
// checks and returns the result.
317
V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
318

319 320 321 322 323
}  // namespace bits
}  // namespace base
}  // namespace v8

#endif  // V8_BASE_BITS_H_