Commit 2201516f authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[cleanup] Clean-up CAS loops

Clean-up a couple of CAS loops to avoid loading after a compare_exchange
(which updates the old value), and to loosen the memory ordering to
acquire-release to avoid unnecessary fences.

Change-Id: Ifb8e5e5136f687ca5a71417a5d131a7023add054
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2050390
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66235}
parent 9094a41e
...@@ -130,13 +130,14 @@ class AsAtomicImpl { ...@@ -130,13 +130,14 @@ class AsAtomicImpl {
static bool SetBits(T* addr, T bits, T mask) { static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType)); STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
DCHECK_EQ(bits & ~mask, static_cast<T>(0)); DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value; T old_value = Relaxed_Load(addr);
T new_value; T new_value, old_value_before_cas;
do { do {
old_value = Relaxed_Load(addr);
if ((old_value & mask) == bits) return false; if ((old_value & mask) == bits) return false;
new_value = (old_value & ~mask) | bits; new_value = (old_value & ~mask) | bits;
} while (Release_CompareAndSwap(addr, old_value, new_value) != old_value); old_value_before_cas = old_value;
old_value = Release_CompareAndSwap(addr, old_value, new_value);
} while (old_value != old_value_before_cas);
return true; return true;
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_H_ #ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_ #define V8_HEAP_SPACES_H_
#include <atomic>
#include <list> #include <list>
#include <map> #include <map>
#include <memory> #include <memory>
...@@ -660,12 +661,11 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -660,12 +661,11 @@ class MemoryChunk : public BasicMemoryChunk {
// to another chunk. See the comment to Page::FromAllocationAreaAddress. // to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0; intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
do { while ((new_mark > old_mark) &&
old_mark = chunk->high_water_mark_; !chunk->high_water_mark_.compare_exchange_weak(
} while ( old_mark, new_mark, std::memory_order_acq_rel)) {
(new_mark > old_mark) && }
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
} }
static inline void MoveExternalBackingStoreBytes( static inline void MoveExternalBackingStoreBytes(
...@@ -1453,15 +1453,14 @@ class MemoryAllocator { ...@@ -1453,15 +1453,14 @@ class MemoryAllocator {
// The use of atomic primitives does not guarantee correctness (wrt. // The use of atomic primitives does not guarantee correctness (wrt.
// desired semantics) by default. The loop here ensures that we update the // desired semantics) by default. The loop here ensures that we update the
// values only if they did not change in between. // values only if they did not change in between.
Address ptr = kNullAddress; Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
do { while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
ptr = lowest_ever_allocated_; ptr, low, std::memory_order_acq_rel)) {
} while ((low < ptr) && }
!lowest_ever_allocated_.compare_exchange_weak(ptr, low)); ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
do { while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
ptr = highest_ever_allocated_; ptr, high, std::memory_order_acq_rel)) {
} while ((high > ptr) && }
!highest_ever_allocated_.compare_exchange_weak(ptr, high));
} }
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) { void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
......
...@@ -126,12 +126,12 @@ inline void DebugCheckZero(void* start, size_t byte_length) { ...@@ -126,12 +126,12 @@ inline void DebugCheckZero(void* start, size_t byte_length) {
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) { bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit; uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
while (true) { while (true) {
uint64_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false; if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false; if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count, if (reserved_address_space_.compare_exchange_weak(
old_count + num_bytes)) { old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true; return true;
} }
} }
...@@ -457,10 +457,9 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages, ...@@ -457,10 +457,9 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
// permissions for the entire range (to be RW), so the operating system // permissions for the entire range (to be RW), so the operating system
// should deal with that raciness. We know we succeeded when we can // should deal with that raciness. We know we succeeded when we can
// compare/swap the old length with the new length. // compare/swap the old length with the new length.
size_t old_length = 0; size_t old_length = byte_length_.load(std::memory_order_relaxed);
size_t new_length = 0; size_t new_length = 0;
while (true) { while (true) {
old_length = byte_length_.load(std::memory_order_acquire);
size_t current_pages = old_length / wasm::kWasmPageSize; size_t current_pages = old_length / wasm::kWasmPageSize;
// Check if we have exceed the supplied maximum. // Check if we have exceed the supplied maximum.
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include <atomic>
#include <limits> #include <limits>
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
...@@ -174,12 +175,12 @@ void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory, ...@@ -174,12 +175,12 @@ void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) { bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
size_t reservation_limit = kAddressSpaceLimit; size_t reservation_limit = kAddressSpaceLimit;
size_t old_count = reserved_address_space_.load(std::memory_order_relaxed;
while (true) { while (true) {
size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false; if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false; if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count, if (reserved_address_space_.compare_exchange_weak(
old_count + num_bytes)) { old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true; return true;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment