Commit 2201516f authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[cleanup] Clean-up CAS loops

Clean-up a couple of CAS loops to avoid loading after a compare_exchange
(which updates the old value), and to loosen the memory ordering to
acquire-release to avoid unnecessary fences.

Change-Id: Ifb8e5e5136f687ca5a71417a5d131a7023add054
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2050390
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66235}
parent 9094a41e
......@@ -130,13 +130,14 @@ class AsAtomicImpl {
static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
T old_value = Relaxed_Load(addr);
T new_value, old_value_before_cas;
do {
old_value = Relaxed_Load(addr);
if ((old_value & mask) == bits) return false;
new_value = (old_value & ~mask) | bits;
} while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
old_value_before_cas = old_value;
old_value = Release_CompareAndSwap(addr, old_value, new_value);
} while (old_value != old_value_before_cas);
return true;
}
......
......@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_
#include <atomic>
#include <list>
#include <map>
#include <memory>
......@@ -660,12 +661,11 @@ class MemoryChunk : public BasicMemoryChunk {
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0;
do {
old_mark = chunk->high_water_mark_;
} while (
(new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
while ((new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(
old_mark, new_mark, std::memory_order_acq_rel)) {
}
}
static inline void MoveExternalBackingStoreBytes(
......@@ -1453,15 +1453,14 @@ class MemoryAllocator {
// The use of atomic primitives does not guarantee correctness (wrt.
// desired semantics) by default. The loop here ensures that we update the
// values only if they did not change in between.
Address ptr = kNullAddress;
do {
ptr = lowest_ever_allocated_;
} while ((low < ptr) &&
!lowest_ever_allocated_.compare_exchange_weak(ptr, low));
do {
ptr = highest_ever_allocated_;
} while ((high > ptr) &&
!highest_ever_allocated_.compare_exchange_weak(ptr, high));
Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
ptr, low, std::memory_order_acq_rel)) {
}
ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
ptr, high, std::memory_order_acq_rel)) {
}
}
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
......
......@@ -126,12 +126,12 @@ inline void DebugCheckZero(void* start, size_t byte_length) {
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
while (true) {
uint64_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
if (reserved_address_space_.compare_exchange_weak(
old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
......@@ -457,10 +457,9 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
// permissions for the entire range (to be RW), so the operating system
// should deal with that raciness. We know we succeeded when we can
// compare/swap the old length with the new length.
size_t old_length = 0;
size_t old_length = byte_length_.load(std::memory_order_relaxed);
size_t new_length = 0;
while (true) {
old_length = byte_length_.load(std::memory_order_acquire);
size_t current_pages = old_length / wasm::kWasmPageSize;
// Check if we have exceed the supplied maximum.
......
......@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <atomic>
#include <limits>
#include "src/heap/heap-inl.h"
......@@ -174,12 +175,12 @@ void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
size_t reservation_limit = kAddressSpaceLimit;
size_t old_count = reserved_address_space_.load(std::memory_order_relaxed;
while (true) {
size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
if (reserved_address_space_.compare_exchange_weak(
old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment