Commit 658609c9 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Avoid fences when inserting into slot sets within the GC

This avoids emitting the costly barriers on arm.

Bug: chromium:651354
Change-Id: Ibb29e58f7c41aab37ed5c4971b2a754b4ecd7155
Reviewed-on: https://chromium-review.googlesource.com/533337
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46011}
parent 02a80f0d
......@@ -6,6 +6,7 @@
#define V8_ATOMIC_UTILS_H_
#include <limits.h>
#include <type_traits>
#include "src/base/atomicops.h"
#include "src/base/macros.h"
......@@ -265,19 +266,23 @@ class AsAtomic32 {
}
template <typename T>
static void Release_Store(T* addr, T new_value) {
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static void Relaxed_Store(T* addr, T new_value) {
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static T Release_CompareAndSwap(T* addr, T old_value, T new_value) {
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
return to_return_type<T>(base::Release_CompareAndSwap(
to_storage_addr(addr), to_storage_type(old_value),
......@@ -330,19 +335,23 @@ class AsAtomicWord {
}
template <typename T>
static void Release_Store(T* addr, T new_value) {
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static void Relaxed_Store(T* addr, T new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static T Release_CompareAndSwap(T* addr, T old_value, T new_value) {
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Release_CompareAndSwap(
to_storage_addr(addr), to_storage_type(old_value),
......
......@@ -1535,9 +1535,11 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
Page::FromAddress(slot), slot);
}
}
}
......@@ -1635,9 +1637,11 @@ class YoungGenerationRecordMigratedSlotVisitor final
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
Page::FromAddress(slot), slot);
}
}
}
......
......@@ -21,14 +21,16 @@ class RememberedSet : public AllStatic {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
template <AccessMode access_mode = AccessMode::ATOMIC>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type>();
SlotSet* slot_set = chunk->slot_set<type, access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSlotSet<type>();
}
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
Page::kPageSize);
}
// Given a page and a slot in that page, this function returns true if
......
......@@ -52,17 +52,21 @@ class SlotSet : public Malloced {
// The slot offset specifies a slot at address page_start_ + slot_offset.
// This method should only be called on the main thread because concurrent
// allocation of the bucket is not thread-safe.
//
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
template <AccessMode access_mode = AccessMode::ATOMIC>
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
if (bucket == nullptr) {
bucket = AllocateBucket();
StoreBucket(&buckets_[bucket_index], bucket);
StoreBucket<access_mode>(&buckets_[bucket_index], bucket);
}
uint32_t mask = 1u << bit_index;
if ((LoadCell(&bucket[cell_index]) & mask) == 0) {
SetCellBits(&bucket[cell_index], mask);
if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
SetCellBits<access_mode>(&bucket[cell_index], mask);
}
}
......@@ -267,16 +271,27 @@ class SlotSet : public Malloced {
DeleteArray<uint32_t>(bucket);
}
template <AccessMode access_mode = AccessMode::ATOMIC>
Bucket LoadBucket(Bucket* bucket) {
return base::AsAtomicWord::Acquire_Load(bucket);
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicWord::Acquire_Load(bucket);
return *bucket;
}
void StoreBucket(Bucket* bucket, uint32_t* value) {
base::AsAtomicWord::Release_Store(bucket, value);
template <AccessMode access_mode = AccessMode::ATOMIC>
void StoreBucket(Bucket* bucket, Bucket value) {
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomicWord::Release_Store(bucket, value);
} else {
*bucket = value;
}
}
template <AccessMode access_mode = AccessMode::ATOMIC>
uint32_t LoadCell(uint32_t* cell) {
return base::AsAtomic32::Acquire_Load(cell);
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomic32::Acquire_Load(cell);
return *cell;
}
void StoreCell(uint32_t* cell, uint32_t value) {
......@@ -287,8 +302,13 @@ class SlotSet : public Malloced {
base::AsAtomic32::SetBits(cell, 0u, mask);
}
template <AccessMode access_mode = AccessMode::ATOMIC>
void SetCellBits(uint32_t* cell, uint32_t mask) {
base::AsAtomic32::SetBits(cell, mask, mask);
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomic32::SetBits(cell, mask, mask);
} else {
*cell = (*cell & ~mask) | mask;
}
}
// Converts the slot offset into bucket/cell/bit index.
......
......@@ -536,10 +536,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
chunk->slot_set_[OLD_TO_NEW].SetValue(nullptr);
chunk->slot_set_[OLD_TO_OLD].SetValue(nullptr);
chunk->typed_slot_set_[OLD_TO_NEW].SetValue(nullptr);
chunk->typed_slot_set_[OLD_TO_OLD].SetValue(nullptr);
base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
......@@ -1232,12 +1234,13 @@ template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
if (!slot_set_[type].TrySetValue(nullptr, slot_set)) {
SlotSet* old_slot_set = base::AsAtomicWord::Release_CompareAndSwap(
&slot_set_[type], nullptr, slot_set);
if (old_slot_set != nullptr) {
delete[] slot_set;
slot_set = slot_set_[type].Value();
DCHECK(slot_set);
return slot_set;
slot_set = old_slot_set;
}
DCHECK(slot_set);
return slot_set;
}
......@@ -1246,10 +1249,10 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
SlotSet* slot_set = slot_set_[type].Value();
SlotSet* slot_set = base::AsAtomicWord::Acquire_Load(&slot_set_[type]);
if (slot_set) {
delete[] slot_set;
slot_set_[type].SetValue(nullptr);
base::AsAtomicWord::Release_Store(&slot_set_[type], nullptr);
}
}
......@@ -1258,14 +1261,15 @@ template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
TypedSlotSet* slot_set = new TypedSlotSet(address());
if (!typed_slot_set_[type].TrySetValue(nullptr, slot_set)) {
delete slot_set;
slot_set = typed_slot_set_[type].Value();
DCHECK(slot_set);
return slot_set;
TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
TypedSlotSet* old_value = base::AsAtomicWord::Release_CompareAndSwap(
&typed_slot_set_[type], nullptr, typed_slot_set);
if (old_value != nullptr) {
delete typed_slot_set;
typed_slot_set = old_value;
}
return slot_set;
DCHECK(typed_slot_set);
return typed_slot_set;
}
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
......@@ -1273,10 +1277,11 @@ template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
TypedSlotSet* typed_slot_set = typed_slot_set_[type].Value();
TypedSlotSet* typed_slot_set =
base::AsAtomicWord::Acquire_Load(&typed_slot_set_[type]);
if (typed_slot_set) {
delete typed_slot_set;
typed_slot_set_[type].SetValue(nullptr);
base::AsAtomicWord::Release_Store(&typed_slot_set_[type], nullptr);
}
}
......
......@@ -447,14 +447,18 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
template <RememberedSetType type>
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
return slot_set_[type].Value();
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicWord::Acquire_Load(&slot_set_[type]);
return slot_set_[type];
}
template <RememberedSetType type>
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
return typed_slot_set_[type].Value();
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicWord::Acquire_Load(&typed_slot_set_[type]);
return typed_slot_set_[type];
}
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
......@@ -618,9 +622,8 @@ class MemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
base::AtomicValue<SlotSet*> slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
base::AtomicValue<TypedSlotSet*>
typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
SkipList* skip_list_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment