time.cc 29.6 KB
Newer Older
1
// Copyright 2013 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/base/platform/time.h"
6 7

#if V8_OS_POSIX
8
#include <fcntl.h>  // for O_RDONLY
9
#include <sys/time.h>
10
#include <unistd.h>
11
#endif
12

13
#if V8_OS_DARWIN
14
#include <mach/mach.h>
15
#include <mach/mach_time.h>
16
#include <pthread.h>
17 18
#endif

19 20 21 22 23 24
#if V8_OS_FUCHSIA
#include <threads.h>
#include <zircon/syscalls.h>
#include <zircon/threads.h>
#endif

25 26
#include <cstring>
#include <ostream>
27

28
#if V8_OS_WIN
29 30 31 32 33
#include <windows.h>

// This has to come after windows.h.
#include <mmsystem.h>  // For timeGetTime().

34 35
#include <atomic>

36 37 38
#include "src/base/lazy-instance.h"
#include "src/base/win32-headers.h"
#endif
39 40 41
#include "src/base/cpu.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
42

johnx's avatar
johnx committed
43 44 45 46
#if V8_OS_STARBOARD
#include "starboard/time.h"
#endif

47 48
namespace {

49
#if V8_OS_DARWIN
50 51 52 53 54 55 56 57
int64_t ComputeThreadTicks() {
  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
  thread_basic_info_data_t thread_info_data;
  kern_return_t kr = thread_info(
      pthread_mach_thread_np(pthread_self()),
      THREAD_BASIC_INFO,
      reinterpret_cast<thread_info_t>(&thread_info_data),
      &thread_info_count);
58
  CHECK_EQ(kr, KERN_SUCCESS);
59

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
  // We can add the seconds into a {int64_t} without overflow.
  CHECK_LE(thread_info_data.user_time.seconds,
           std::numeric_limits<int64_t>::max() -
               thread_info_data.system_time.seconds);
  int64_t seconds =
      thread_info_data.user_time.seconds + thread_info_data.system_time.seconds;
  // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
  // in [0, 2 * kMicrosecondsPerSecond) must result in a valid {int64_t}.
  static constexpr int64_t kSecondsLimit =
      (std::numeric_limits<int64_t>::max() /
       v8::base::Time::kMicrosecondsPerSecond) -
      2;
  CHECK_GT(kSecondsLimit, seconds);
  int64_t micros = seconds * v8::base::Time::kMicrosecondsPerSecond;
  micros += (thread_info_data.user_time.microseconds +
             thread_info_data.system_time.microseconds);
  return micros;
77
}
78 79 80 81 82 83 84 85 86
#elif V8_OS_FUCHSIA
V8_INLINE int64_t GetFuchsiaThreadTicks() {
  zx_info_thread_stats_t info;
  zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
                                          ZX_INFO_THREAD_STATS, &info,
                                          sizeof(info), nullptr, nullptr);
  CHECK_EQ(status, ZX_OK);
  return info.total_runtime / v8::base::Time::kNanosecondsPerMicrosecond;
}
87 88 89 90 91
#elif V8_OS_POSIX
// Helper function to get results from clock_gettime() and convert to a
// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
// _POSIX_MONOTONIC_CLOCK to -1.
92
V8_INLINE int64_t ClockNow(clockid_t clk_id) {
93 94
#if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
  defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
95
#if defined(V8_OS_AIX)
96 97
  // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
  // resolution of 10ms. thread_cputime API provides the time in ns.
98
  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
99 100
#if defined(__PASE__)  // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi
    return 0;
101 102
#else
    thread_cputime_t tc;
103 104 105
    if (thread_cputime(-1, &tc) != 0) {
      UNREACHABLE();
    }
106 107 108
    return (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond)
           + (tc.utime / v8::base::Time::kNanosecondsPerMicrosecond);
#endif  // defined(__PASE__)
109
  }
110
#endif  // defined(V8_OS_AIX)
111 112 113 114
  struct timespec ts;
  if (clock_gettime(clk_id, &ts) != 0) {
    UNREACHABLE();
  }
115 116 117 118 119 120 121 122
  // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
  // in [0, kMicrosecondsPerSecond) must result in a valid {int64_t}.
  static constexpr int64_t kSecondsLimit =
      (std::numeric_limits<int64_t>::max() /
       v8::base::Time::kMicrosecondsPerSecond) -
      1;
  CHECK_GT(kSecondsLimit, ts.tv_sec);
  int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond;
123
  result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
124
  return result;
125 126 127 128
#else  // Monotonic clock not supported.
  return 0;
#endif
}
129

130 131 132 133 134 135 136
V8_INLINE int64_t NanosecondsNow() {
  struct timespec ts;
  clock_gettime(CLOCK_MONOTONIC, &ts);
  return int64_t{ts.tv_sec} * v8::base::Time::kNanosecondsPerSecond +
         ts.tv_nsec;
}

137
inline bool IsHighResolutionTimer(clockid_t clk_id) {
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
  // Currently this is only needed for CLOCK_MONOTONIC. If other clocks need
  // to be checked, care must be taken to support all platforms correctly;
  // see ClockNow() above for precedent.
  DCHECK_EQ(clk_id, CLOCK_MONOTONIC);
  int64_t previous = NanosecondsNow();
  // There should be enough attempts to make the loop run for more than one
  // microsecond if the early return is not taken -- the elapsed time can't
  // be measured in that situation, so we have to estimate it offline.
  constexpr int kAttempts = 100;
  for (int i = 0; i < kAttempts; i++) {
    int64_t next = NanosecondsNow();
    int64_t delta = next - previous;
    if (delta == 0) continue;
    // We expect most systems to take this branch on the first iteration.
    if (delta <= v8::base::Time::kNanosecondsPerMicrosecond) {
      return true;
    }
    previous = next;
  }
  // As of 2022, we expect that the loop above has taken at least 2 μs (on
  // a fast desktop). If we still haven't seen a non-zero clock increment
  // in sub-microsecond range, assume a low resolution timer.
  return false;
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174
#elif V8_OS_WIN
// Returns the current value of the performance counter.
V8_INLINE uint64_t QPCNowRaw() {
  LARGE_INTEGER perf_counter_now = {};
  // According to the MSDN documentation for QueryPerformanceCounter(), this
  // will never fail on systems that run XP or later.
  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
  BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
  DCHECK(result);
  USE(result);
  return perf_counter_now.QuadPart;
}
175
#endif  // V8_OS_DARWIN
176 177 178

}  // namespace

179
namespace v8 {
180
namespace base {
181 182

int TimeDelta::InDays() const {
183 184 185 186
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int>::max();
  }
187 188 189 190
  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
}

int TimeDelta::InHours() const {
191 192 193 194
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int>::max();
  }
195 196 197 198
  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
}

int TimeDelta::InMinutes() const {
199 200 201 202
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int>::max();
  }
203 204 205 206
  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
}

double TimeDelta::InSecondsF() const {
207 208 209 210
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<double>::infinity();
  }
211 212 213 214
  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
}

int64_t TimeDelta::InSeconds() const {
215 216 217 218
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int64_t>::max();
  }
219 220 221 222
  return delta_ / Time::kMicrosecondsPerSecond;
}

double TimeDelta::InMillisecondsF() const {
223 224 225 226
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<double>::infinity();
  }
227 228 229 230
  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
}

int64_t TimeDelta::InMilliseconds() const {
231 232 233 234
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int64_t>::max();
  }
235 236 237
  return delta_ / Time::kMicrosecondsPerMillisecond;
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
int64_t TimeDelta::InMillisecondsRoundedUp() const {
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int64_t>::max();
  }
  return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
         Time::kMicrosecondsPerMillisecond;
}

int64_t TimeDelta::InMicroseconds() const {
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int64_t>::max();
  }
  return delta_;
}
254 255

int64_t TimeDelta::InNanoseconds() const {
256 257 258 259
  if (IsMax()) {
    // Preserve max to prevent overflow.
    return std::numeric_limits<int64_t>::max();
  }
260 261 262
  return delta_ * Time::kNanosecondsPerMicrosecond;
}

263
#if V8_OS_DARWIN
264 265

TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
266 267
  DCHECK_GE(ts.tv_nsec, 0);
  DCHECK_LT(ts.tv_nsec,
268
            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
269 270 271 272 273 274 275
  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
}


struct mach_timespec TimeDelta::ToMachTimespec() const {
  struct mach_timespec ts;
276
  DCHECK_GE(delta_, 0);
277
  ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
278 279 280 281 282
  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
      Time::kNanosecondsPerMicrosecond;
  return ts;
}

283
#endif  // V8_OS_DARWIN
284

285 286 287
#if V8_OS_POSIX

TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
288 289
  DCHECK_GE(ts.tv_nsec, 0);
  DCHECK_LT(ts.tv_nsec,
290 291 292 293 294 295 296 297
            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
}


struct timespec TimeDelta::ToTimespec() const {
  struct timespec ts;
298
  ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
299 300 301 302 303 304 305 306
  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
      Time::kNanosecondsPerMicrosecond;
  return ts;
}

#endif  // V8_OS_POSIX


307 308 309 310 311
#if V8_OS_WIN

// We implement time using the high-resolution timers so that we can get
// timeouts which are smaller than 10-15ms. To avoid any drift, we
// periodically resync the internal clock to the system clock.
312
class Clock final {
313
 public:
314
  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
315 316

  Time Now() {
317 318
    // Time between resampling the un-granular clock for this API (1 minute).
    const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
319

320
    MutexGuard lock_guard(&mutex_);
321

322 323 324 325 326 327 328 329 330 331 332
    // Determine current time and ticks.
    TimeTicks ticks = GetSystemTicks();
    Time time = GetSystemTime();

    // Check if we need to synchronize with the system clock due to a backwards
    // time change or the amount of time elapsed.
    TimeDelta elapsed = ticks - initial_ticks_;
    if (time < initial_time_ || elapsed > kMaxElapsedTime) {
      initial_ticks_ = ticks;
      initial_time_ = time;
      return time;
333 334
    }

335
    return initial_time_ + elapsed;
336 337 338
  }

  Time NowFromSystemTime() {
339
    MutexGuard lock_guard(&mutex_);
340 341
    initial_ticks_ = GetSystemTicks();
    initial_time_ = GetSystemTime();
342 343 344 345
    return initial_time_;
  }

 private:
346 347 348
  static TimeTicks GetSystemTicks() {
    return TimeTicks::Now();
  }
349

350
  static Time GetSystemTime() {
351 352 353 354 355 356 357
    FILETIME ft;
    ::GetSystemTimeAsFileTime(&ft);
    return Time::FromFiletime(ft);
  }

  TimeTicks initial_ticks_;
  Time initial_time_;
358
  Mutex mutex_;
359 360
};

361
namespace {
362
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
363
}  // namespace
364

365
Time Time::Now() { return GetClock()->Now(); }
366

367
Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
368 369

// Time between windows epoch and standard epoch.
370
static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386

Time Time::FromFiletime(FILETIME ft) {
  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
    return Time();
  }
  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
      ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
    return Max();
  }
  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
                (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
  return Time(us - kTimeToEpochInMicroseconds);
}


FILETIME Time::ToFiletime() const {
387
  DCHECK_GE(us_, 0);
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
  FILETIME ft;
  if (IsNull()) {
    ft.dwLowDateTime = 0;
    ft.dwHighDateTime = 0;
    return ft;
  }
  if (IsMax()) {
    ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
    ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
    return ft;
  }
  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
  ft.dwLowDateTime = static_cast<DWORD>(us);
  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
  return ft;
}

#elif V8_OS_POSIX

Time Time::Now() {
  struct timeval tv;
409
  int result = gettimeofday(&tv, nullptr);
410
  DCHECK_EQ(0, result);
411 412 413 414 415 416 417 418 419 420
  USE(result);
  return FromTimeval(tv);
}


Time Time::NowFromSystemTime() {
  return Now();
}


421
Time Time::FromTimespec(struct timespec ts) {
422 423
  DCHECK_GE(ts.tv_nsec, 0);
  DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
    return Time();
  }
  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
      ts.tv_sec == std::numeric_limits<time_t>::max()) {
    return Max();
  }
  return Time(ts.tv_sec * kMicrosecondsPerSecond +
              ts.tv_nsec / kNanosecondsPerMicrosecond);
}


struct timespec Time::ToTimespec() const {
  struct timespec ts;
  if (IsNull()) {
    ts.tv_sec = 0;
    ts.tv_nsec = 0;
    return ts;
  }
  if (IsMax()) {
    ts.tv_sec = std::numeric_limits<time_t>::max();
    ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
    return ts;
  }
448
  ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
449 450 451 452 453
  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
  return ts;
}


454
Time Time::FromTimeval(struct timeval tv) {
455
  DCHECK_GE(tv.tv_usec, 0);
456
  DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
    return Time();
  }
  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
      tv.tv_sec == std::numeric_limits<time_t>::max()) {
    return Max();
  }
  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
}


struct timeval Time::ToTimeval() const {
  struct timeval tv;
  if (IsNull()) {
    tv.tv_sec = 0;
    tv.tv_usec = 0;
    return tv;
  }
  if (IsMax()) {
    tv.tv_sec = std::numeric_limits<time_t>::max();
    tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
    return tv;
  }
480
  tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
481 482 483 484
  tv.tv_usec = us_ % kMicrosecondsPerSecond;
  return tv;
}

johnx's avatar
johnx committed
485 486 487 488 489 490 491
#elif V8_OS_STARBOARD

Time Time::Now() { return Time(SbTimeToPosix(SbTimeGetNow())); }

Time Time::NowFromSystemTime() { return Now(); }

#endif  // V8_OS_STARBOARD
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516

Time Time::FromJsTime(double ms_since_epoch) {
  // The epoch is a valid time, so this constructor doesn't interpret
  // 0 as the null time.
  if (ms_since_epoch == std::numeric_limits<double>::max()) {
    return Max();
  }
  return Time(
      static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
}


double Time::ToJsTime() const {
  if (IsNull()) {
    // Preserve 0 so the invalid result doesn't depend on the platform.
    return 0;
  }
  if (IsMax()) {
    // Preserve max without offset to prevent overflow.
    return std::numeric_limits<double>::max();
  }
  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
}


517 518 519 520 521
std::ostream& operator<<(std::ostream& os, const Time& time) {
  return os << time.ToJsTime();
}


522 523
#if V8_OS_WIN

524 525 526 527 528 529 530 531 532 533 534 535 536
namespace {

// We define a wrapper to adapt between the __stdcall and __cdecl call of the
// mock function, and to avoid a static constructor.  Assigning an import to a
// function pointer directly would require setup code to fetch from the IAT.
DWORD timeGetTimeWrapper() { return timeGetTime(); }

DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;

// A structure holding the most significant bits of "last seen" and a
// "rollover" counter.
union LastTimeAndRolloversState {
  // The state as a single 32-bit opaque value.
537
  int32_t as_opaque_32;
538 539 540 541 542 543 544 545 546 547 548 549 550 551

  // The state as usable values.
  struct {
    // The top 8-bits of the "last" time. This is enough to check for rollovers
    // and the small bit-size means fewer CompareAndSwap operations to store
    // changes in state, which in turn makes for fewer retries.
    uint8_t last_8;
    // A count of the number of detected rollovers. Using this as bits 47-32
    // of the upper half of a 64-bit value results in a 48-bit tick counter.
    // This extends the total rollover period from about 49 days to about 8800
    // years while still allowing it to be stored with last_8 in a single
    // 32-bit value.
    uint16_t rollovers;
  } as_values;
552
};
553
std::atomic<int32_t> g_last_time_and_rollovers{0};
554 555 556 557 558 559 560 561 562 563 564 565 566
static_assert(sizeof(LastTimeAndRolloversState) <=
                  sizeof(g_last_time_and_rollovers),
              "LastTimeAndRolloversState does not fit in a single atomic word");

// We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
// because it returns the number of milliseconds since Windows has started,
// which will roll over the 32-bit value every ~49 days.  We try to track
// rollover ourselves, which works if TimeTicks::Now() is called at least every
// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
TimeTicks RolloverProtectedNow() {
  LastTimeAndRolloversState state;
  DWORD now;  // DWORD is always unsigned 32 bits.

567 568 569 570 571
  // Fetch the "now" and "last" tick values, updating "last" with "now" and
  // incrementing the "rollovers" counter if the tick-value has wrapped back
  // around. Atomic operations ensure that both "last" and "rollovers" are
  // always updated together.
  int32_t original = g_last_time_and_rollovers.load(std::memory_order_acquire);
572 573 574 575 576 577 578 579 580 581 582 583
  while (true) {
    state.as_opaque_32 = original;
    now = g_tick_function();
    uint8_t now_8 = static_cast<uint8_t>(now >> 24);
    if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
    state.as_values.last_8 = now_8;

    // If the state hasn't changed, exit the loop.
    if (state.as_opaque_32 == original) break;

    // Save the changed state. If the existing value is unchanged from the
    // original, exit the loop.
584 585 586 587
    if (g_last_time_and_rollovers.compare_exchange_weak(
            original, state.as_opaque_32, std::memory_order_acq_rel)) {
      break;
    }
588 589

    // Another thread has done something in between so retry from the top.
590
    // {original} has been updated by the {compare_exchange_weak}.
591
  }
592

593 594 595 596
  return TimeTicks() +
         TimeDelta::FromMilliseconds(
             now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
}
597

598 599
// Discussion of tick counter options on Windows:
//
600 601
// (1) CPU cycle counter. (Retrieved via RDTSC)
// The CPU counter provides the highest resolution time stamp and is the least
602 603 604 605
// expensive to retrieve. However, on older CPUs, two issues can affect its
// reliability: First it is maintained per processor and not synchronized
// between processors. Also, the counters will change frequency due to thermal
// and power changes, and stop in some states.
606 607
//
// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
608 609 610 611 612 613 614 615 616 617
// resolution (<1 microsecond) time stamp. On most hardware running today, it
// auto-detects and uses the constant-rate RDTSC counter to provide extremely
// efficient and reliable time stamps.
//
// On older CPUs where RDTSC is unreliable, it falls back to using more
// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
// PM timer, and can involve system calls; and all this is up to the HAL (with
// some help from ACPI). According to
// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
// worst case, it gets the counter from the rollover interrupt on the
618 619 620 621 622
// programmable interrupt timer. In best cases, the HAL may conclude that the
// RDTSC counter runs at a constant frequency, then it uses that instead. On
// multiprocessor machines, it will try to verify the values returned from
// RDTSC on each processor are consistent with each other, and apply a handful
// of workarounds for known buggy hardware. In other words, QPC is supposed to
623 624
// give consistent results on a multiprocessor computer, but for older CPUs it
// can be unreliable due bugs in BIOS or HAL.
625
//
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
// milliseconds) time stamp but is comparatively less expensive to retrieve and
// more reliable. Time::EnableHighResolutionTimer() and
// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
// this timer; and also other Windows applications can alter it, affecting this
// one.

TimeTicks InitialTimeTicksNowFunction();

// See "threading notes" in InitializeNowFunctionPointer() for details on how
// concurrent reads/writes to these globals has been made safe.
using TimeTicksNowFunction = decltype(&TimeTicks::Now);
TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
int64_t g_qpc_ticks_per_second = 0;

TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
  // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
  // InitializeNowFunctionPointer(), has happened by this point.
644
  std::atomic_thread_fence(std::memory_order_acquire);
645 646 647 648 649 650 651 652

  DCHECK_GT(g_qpc_ticks_per_second, 0);

  // If the QPC Value is below the overflow threshold, we proceed with
  // simple multiply and divide.
  if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
    return TimeDelta::FromMicroseconds(
        qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
653
  }
654 655 656 657 658 659 660 661 662
  // Otherwise, calculate microseconds in a round about manner to avoid
  // overflow and precision issues.
  int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
  int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
  return TimeDelta::FromMicroseconds(
      (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
      ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
       g_qpc_ticks_per_second));
}
663

664
TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
665

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
void InitializeTimeTicksNowFunctionPointer() {
  LARGE_INTEGER ticks_per_sec = {};
  if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;

  // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
  // the low-resolution clock.
  //
  // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
  // will still use the low-resolution clock. A CPU lacking a non-stop time
  // counter will cause Windows to provide an alternate QPC implementation that
  // works, but is expensive to use. Certain Athlon CPUs are known to make the
  // QPC implementation unreliable.
  //
  // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
  // ~72% of users fall within this category.
  TimeTicksNowFunction now_function;
  CPU cpu;
683
  if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter()) {
684 685 686
    now_function = &RolloverProtectedNow;
  } else {
    now_function = &QPCNow;
687 688
  }

689 690 691 692 693 694 695 696 697 698
  // Threading note 1: In an unlikely race condition, it's possible for two or
  // more threads to enter InitializeNowFunctionPointer() in parallel. This is
  // not a problem since all threads should end up writing out the same values
  // to the global variables.
  //
  // Threading note 2: A release fence is placed here to ensure, from the
  // perspective of other threads using the function pointers, that the
  // assignment to |g_qpc_ticks_per_second| happens before the function pointers
  // are changed.
  g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
699
  std::atomic_thread_fence(std::memory_order_release);
700 701
  g_time_ticks_now_function = now_function;
}
702

703 704 705 706
TimeTicks InitialTimeTicksNowFunction() {
  InitializeTimeTicksNowFunctionPointer();
  return g_time_ticks_now_function();
}
707

708
}  // namespace
709

710
// static
711 712
TimeTicks TimeTicks::Now() {
  // Make sure we never return 0 here.
713
  TimeTicks ticks(g_time_ticks_now_function());
714
  DCHECK(!ticks.IsNull());
715 716 717
  return ticks;
}

718
// static
719 720 721 722
bool TimeTicks::IsHighResolution() {
  if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
    InitializeTimeTicksNowFunctionPointer();
  return g_time_ticks_now_function == &QPCNow;
723 724
}

725 726 727 728
#else  // V8_OS_WIN

TimeTicks TimeTicks::Now() {
  int64_t ticks;
729
#if V8_OS_DARWIN
730 731 732
  static struct mach_timebase_info info;
  if (info.denom == 0) {
    kern_return_t result = mach_timebase_info(&info);
733
    DCHECK_EQ(KERN_SUCCESS, result);
734 735 736 737 738 739
    USE(result);
  }
  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
           info.numer / info.denom);
#elif V8_OS_SOLARIS
  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
740 741
#elif V8_OS_FUCHSIA
  ticks = zx_clock_get_monotonic() / Time::kNanosecondsPerMicrosecond;
742
#elif V8_OS_POSIX
743
  ticks = ClockNow(CLOCK_MONOTONIC);
johnx's avatar
johnx committed
744 745
#elif V8_OS_STARBOARD
  ticks = SbTimeGetMonotonicNow();
746
#else
747
#error platform does not implement TimeTicks::Now.
748
#endif  // V8_OS_DARWIN
749 750 751 752
  // Make sure we never return 0 here.
  return TimeTicks(ticks + 1);
}

753
// static
754
bool TimeTicks::IsHighResolution() {
755
#if V8_OS_DARWIN
756
  return true;
757 758
#elif V8_OS_FUCHSIA
  return true;
759
#elif V8_OS_POSIX
760
  static const bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
761 762 763 764 765
  return is_high_resolution;
#else
  return true;
#endif
}
766

767 768
#endif  // V8_OS_WIN

769 770

bool ThreadTicks::IsSupported() {
johnx's avatar
johnx committed
771 772 773 774 775 776 777 778
#if V8_OS_STARBOARD
#if SB_API_VERSION >= 12
  return SbTimeIsTimeThreadNowSupported();
#elif SB_HAS(TIME_THREAD_NOW)
  return true;
#else
  return false;
#endif
779 780 781
#elif defined(__PASE__)
  // Thread CPU time accounting is unavailable in PASE
  return false;
johnx's avatar
johnx committed
782
#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
783
    defined(V8_OS_DARWIN) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
lpy's avatar
lpy committed
784 785 786
  return true;
#elif defined(V8_OS_WIN)
  return IsSupportedWin();
787
#else
lpy's avatar
lpy committed
788
  return false;
789 790 791 792 793
#endif
}


ThreadTicks ThreadTicks::Now() {
johnx's avatar
johnx committed
794 795 796 797 798 799 800 801 802 803
#if V8_OS_STARBOARD
#if SB_API_VERSION >= 12
  if (SbTimeIsTimeThreadNowSupported())
    return ThreadTicks(SbTimeGetMonotonicThreadNow());
  UNREACHABLE();
#elif SB_HAS(TIME_THREAD_NOW)
  return ThreadTicks(SbTimeGetMonotonicThreadNow());
#else
  UNREACHABLE();
#endif
804
#elif V8_OS_DARWIN
805
  return ThreadTicks(ComputeThreadTicks());
806 807
#elif V8_OS_FUCHSIA
  return ThreadTicks(GetFuchsiaThreadTicks());
808 809 810
#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
  defined(V8_OS_ANDROID)
  return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
811 812
#elif V8_OS_SOLARIS
  return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
813 814
#elif V8_OS_WIN
  return ThreadTicks::GetForThread(::GetCurrentThread());
815 816 817 818 819
#else
  UNREACHABLE();
#endif
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841

#if V8_OS_WIN
ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
  DCHECK(IsSupported());

  // Get the number of TSC ticks used by the current thread.
  ULONG64 thread_cycle_time = 0;
  ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);

  // Get the frequency of the TSC.
  double tsc_ticks_per_second = TSCTicksPerSecond();
  if (tsc_ticks_per_second == 0)
    return ThreadTicks();

  // Return the CPU time of the current thread.
  double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
  return ThreadTicks(
      static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
}

// static
bool ThreadTicks::IsSupportedWin() {
842
  static bool is_supported = base::CPU().has_non_stop_time_stamp_counter();
843 844 845 846 847 848 849 850 851
  return is_supported;
}

// static
void ThreadTicks::WaitUntilInitializedWin() {
  while (TSCTicksPerSecond() == 0)
    ::Sleep(10);
}

852 853 854 855 856 857
#ifdef V8_HOST_ARCH_ARM64
#define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
#else
#define ReadCycleCounter() __rdtsc()
#endif

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
double ThreadTicks::TSCTicksPerSecond() {
  DCHECK(IsSupported());

  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
  // frequency, because there is no guarantee that the TSC frequency is equal to
  // the performance counter frequency.

  // The TSC frequency is cached in a static variable because it takes some time
  // to compute it.
  static double tsc_ticks_per_second = 0;
  if (tsc_ticks_per_second != 0)
    return tsc_ticks_per_second;

  // Increase the thread priority to reduces the chances of having a context
  // switch during a reading of the TSC and the performance counter.
  int previous_priority = ::GetThreadPriority(::GetCurrentThread());
  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);

  // The first time that this function is called, make an initial reading of the
  // TSC and the performance counter.
878
  static const uint64_t tsc_initial = ReadCycleCounter();
879 880 881 882
  static const uint64_t perf_counter_initial = QPCNowRaw();

  // Make a another reading of the TSC and the performance counter every time
  // that this function is called.
883
  uint64_t tsc_now = ReadCycleCounter();
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
  uint64_t perf_counter_now = QPCNowRaw();

  // Reset the thread priority.
  ::SetThreadPriority(::GetCurrentThread(), previous_priority);

  // Make sure that at least 50 ms elapsed between the 2 readings. The first
  // time that this function is called, we don't expect this to be the case.
  // Note: The longer the elapsed time between the 2 readings is, the more
  //   accurate the computed TSC frequency will be. The 50 ms value was
  //   chosen because local benchmarks show that it allows us to get a
  //   stddev of less than 1 tick/us between multiple runs.
  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
  //   this will never fail on systems that run XP or later.
  //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
  LARGE_INTEGER perf_counter_frequency = {};
  ::QueryPerformanceFrequency(&perf_counter_frequency);
  DCHECK_GE(perf_counter_now, perf_counter_initial);
  uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
  double elapsed_time_seconds =
      perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);

  const double kMinimumEvaluationPeriodSeconds = 0.05;
  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
    return 0;

  // Compute the frequency of the TSC.
  DCHECK_GE(tsc_now, tsc_initial);
  uint64_t tsc_ticks = tsc_now - tsc_initial;
  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;

  return tsc_ticks_per_second;
}
916
#undef ReadCycleCounter
917 918
#endif  // V8_OS_WIN

919 920
}  // namespace base
}  // namespace v8