Commit d843cda7 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[sandbox] Move ExternalPointerTable entry logic into new Entry class

This CL introduces a new ExternalPointerTable::Entry class and moves all
low-level logic related to entry management into this class.

Bug: v8:10391
Change-Id: Ib7eb05da1d277cb665503e98b3f074520e572bad
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3829485Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82825}
parent 75391be2
......@@ -422,18 +422,18 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = {
(HasMarkBit ? kExternalPointerMarkBit : 0))
enum ExternalPointerTag : uint64_t {
// Empty tag value. Mostly used as placeholder.
kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
// Tag to use for unsandboxed external pointers, which are still stored as
// raw pointers on the heap.
kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
// External pointer tag that will match any external pointer. Use with care!
kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
// The free entry tag has all type bits set so every type check with a
// different type fails. It also doesn't have the mark bit set as free
// entries are (by definition) not alive.
kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
// Evacuation entries are used during external pointer table compaction.
kEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM)
};
......
......@@ -17,33 +17,34 @@ namespace internal {
Address ExternalPointerTable::Get(ExternalPointerHandle handle,
ExternalPointerTag tag) const {
uint32_t index = handle_to_index(handle);
Address entry = load_atomic(index);
DCHECK(!is_free(entry));
return entry & ~tag;
uint32_t index = HandleToIndex(handle);
Entry entry = RelaxedLoad(index);
DCHECK(entry.IsRegularEntry());
return entry.Untag(tag);
}
void ExternalPointerTable::Set(ExternalPointerHandle handle, Address value,
ExternalPointerTag tag) {
DCHECK_NE(kNullExternalPointerHandle, handle);
DCHECK_EQ(0, value & kExternalPointerTagMask);
DCHECK(is_marked(tag));
DCHECK(tag & kExternalPointerMarkBit);
uint32_t index = handle_to_index(handle);
store_atomic(index, value | tag);
uint32_t index = HandleToIndex(handle);
Entry entry = Entry::MakeRegularEntry(value, tag);
RelaxedStore(index, entry);
}
Address ExternalPointerTable::Exchange(ExternalPointerHandle handle,
Address value, ExternalPointerTag tag) {
DCHECK_NE(kNullExternalPointerHandle, handle);
DCHECK_EQ(0, value & kExternalPointerTagMask);
DCHECK(is_marked(tag));
DCHECK(tag & kExternalPointerMarkBit);
uint32_t index = handle_to_index(handle);
Address entry = exchange_atomic(index, value | tag);
DCHECK(!is_free(entry));
return entry & ~tag;
uint32_t index = HandleToIndex(handle);
Entry new_entry = Entry::MakeRegularEntry(value, tag);
Entry old_entry = RelaxedExchange(index, new_entry);
DCHECK(old_entry.IsRegularEntry());
return old_entry.Untag(tag);
}
ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
......@@ -78,17 +79,19 @@ ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
DCHECK_LT(freelist_head, capacity());
index = freelist_head;
Address entry = load_atomic(index);
uint32_t new_freelist_head = extract_next_entry_from_freelist_entry(entry);
Entry entry = RelaxedLoad(index);
DCHECK(entry.IsFreelistEntry());
uint32_t new_freelist_head = entry.ExtractNextFreelistEntry();
uint32_t old_val = base::Relaxed_CompareAndSwap(
&freelist_head_, freelist_head, new_freelist_head);
success = old_val == freelist_head;
}
store_atomic(index, initial_value | tag);
Entry entry = Entry::MakeRegularEntry(initial_value, tag);
RelaxedStore(index, entry);
return index_to_handle(index);
return IndexToHandle(index);
}
ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry(
......@@ -111,26 +114,26 @@ ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry(
if (index >= start_of_evacuation_area) return kNullExternalPointerHandle;
Address entry = load_atomic(index);
uint32_t new_freelist_head = extract_next_entry_from_freelist_entry(entry);
Entry entry = RelaxedLoad(index);
DCHECK(entry.IsFreelistEntry());
uint32_t new_freelist_head = entry.ExtractNextFreelistEntry();
uint32_t old_val = base::Relaxed_CompareAndSwap(
&freelist_head_, freelist_head, new_freelist_head);
success = old_val == freelist_head;
}
return index_to_handle(index);
return IndexToHandle(index);
}
uint32_t ExternalPointerTable::FreelistSize() {
Address entry = 0;
while (!is_free(entry)) {
Entry entry;
do {
uint32_t freelist_head = base::Relaxed_Load(&freelist_head_);
if (!freelist_head) {
return 0;
}
entry = load_atomic(freelist_head);
}
uint32_t freelist_size = extract_freelist_size_from_freelist_entry(entry);
if (!freelist_head) return 0;
entry = RelaxedLoad(freelist_head);
} while (!entry.IsFreelistEntry());
uint32_t freelist_size = entry.ExtractFreelistSize();
DCHECK_LE(freelist_size, capacity());
return freelist_size;
}
......@@ -140,7 +143,7 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
static_assert(sizeof(base::Atomic64) == sizeof(Address));
DCHECK_EQ(handle, *reinterpret_cast<ExternalPointerHandle*>(handle_location));
uint32_t index = handle_to_index(handle);
uint32_t index = HandleToIndex(handle);
// Check if the entry should be evacuated for table compaction.
// The current value of the start of the evacuation area is cached in a local
......@@ -153,11 +156,11 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
ExternalPointerHandle new_handle =
AllocateEvacuationEntry(current_start_of_evacuation_area);
if (new_handle) {
DCHECK_LT(handle_to_index(new_handle), current_start_of_evacuation_area);
uint32_t index = handle_to_index(new_handle);
DCHECK_LT(HandleToIndex(new_handle), current_start_of_evacuation_area);
uint32_t index = HandleToIndex(new_handle);
// No need for an atomic store as the entry will only be accessed during
// sweeping.
store(index, make_evacuation_entry(handle_location));
Store(index, Entry::MakeEvacuationEntry(handle_location));
#ifdef DEBUG
// Mark the handle as visited in debug builds to detect double
// initialization of external pointer fields.
......@@ -181,18 +184,19 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
// Even if the entry is marked for evacuation, it still needs to be marked as
// alive as it may be visited during sweeping before being evacuation.
base::Atomic64 old_val = load_atomic(index);
base::Atomic64 new_val = set_mark_bit(old_val);
DCHECK(!is_free(old_val));
Entry old_entry = RelaxedLoad(index);
DCHECK(old_entry.IsRegularEntry());
Entry new_entry = old_entry;
new_entry.SetMarkBit();
// We don't need to perform the CAS in a loop: if the new value is not equal
// to the old value, then the mutator must've just written a new value into
// the entry. This in turn must've set the marking bit already (see
// ExternalPointerTable::Set), so we don't need to do it again.
base::Atomic64* ptr = reinterpret_cast<base::Atomic64*>(entry_address(index));
base::Atomic64 val = base::Relaxed_CompareAndSwap(ptr, old_val, new_val);
DCHECK((val == old_val) || is_marked(val));
USE(val);
Entry entry = RelaxedCompareAndSwap(index, old_entry, new_entry);
DCHECK((entry == old_entry) || entry.IsMarked());
USE(entry);
}
bool ExternalPointerTable::IsCompacting() {
......
......@@ -66,7 +66,7 @@ void ExternalPointerTable::Init(Isolate* isolate) {
// Set up the special null entry. This entry must contain nullptr so that
// empty EmbedderDataSlots represent nullptr.
static_assert(kNullExternalPointerHandle == 0);
store(kNullExternalPointerHandle, kNullAddress);
Store(0, Entry::MakeNullEntry());
}
void ExternalPointerTable::TearDown() {
......@@ -145,24 +145,25 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// Skip the special null entry. This also guarantees that the first block
// will never be decommitted.
// The null entry may have been marked as alive (if any live object was
// referencing it), which is fine, the entry will just keep the bit set.
DCHECK_GE(capacity(), 1);
uint32_t table_end = last_in_use_block + kEntriesPerBlock;
DCHECK(IsAligned(table_end, kEntriesPerBlock));
for (uint32_t i = table_end - 1; i > 0; i--) {
// No other threads are active during sweep, so there is no need to use
// atomic operations here.
Address entry = load(i);
if (is_evacuation_entry(entry)) {
Entry entry = Load(i);
if (entry.IsEvacuationEntry()) {
// Resolve the evacuation entry: take the pointer to the handle from the
// evacuation entry, copy the entry to its new location, and finally
// update the handle to point to the new entry.
Address evacuation_entry = load(i);
ExternalPointerHandle* handle_location =
reinterpret_cast<ExternalPointerHandle*>(
extract_handle_location_from_evacuation_entry(evacuation_entry));
entry.ExtractHandleLocation());
ExternalPointerHandle old_handle = *handle_location;
ExternalPointerHandle new_handle = index_to_handle(i);
ExternalPointerHandle new_handle = IndexToHandle(i);
// For the compaction algorithm to work optimally, double initialization
// of entries is forbidden, see below. This DCHECK can detect double
......@@ -178,11 +179,12 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// external pointer slot is re-initialized, in which case the old_handle
// may now also point before the evacuation area. For that reason,
// re-initialization of external pointer slots is forbidden.
DCHECK_GE(handle_to_index(old_handle), first_block_of_evacuation_area);
DCHECK_LT(handle_to_index(new_handle), first_block_of_evacuation_area);
DCHECK_GE(HandleToIndex(old_handle), first_block_of_evacuation_area);
DCHECK_LT(HandleToIndex(new_handle), first_block_of_evacuation_area);
Address entry_to_evacuate = load(handle_to_index(old_handle));
store(i, clear_mark_bit(entry_to_evacuate));
Entry entry_to_evacuate = Load(HandleToIndex(old_handle));
entry_to_evacuate.ClearMarkBit();
Store(i, entry_to_evacuate);
*handle_location = new_handle;
#ifdef DEBUG
......@@ -191,8 +193,9 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// barriers, so we'd like to avoid them. See the compaction algorithm
// explanation in external-pointer-table.h for more details.
constexpr Address kClobberedEntryMarker = static_cast<Address>(-1);
DCHECK_NE(entry_to_evacuate, kClobberedEntryMarker);
store(handle_to_index(old_handle), kClobberedEntryMarker);
const Entry kClobberedEntry = Entry::Decode(kClobberedEntryMarker);
DCHECK_NE(entry_to_evacuate, kClobberedEntry);
Store(HandleToIndex(old_handle), kClobberedEntry);
#endif // DEBUG
// While we know that the old entry is now free, we don't add it to (the
......@@ -201,14 +204,15 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// that the blocks out of which entries are evacuated will all be
// decommitted anyway after this loop, which is usually the case unless
// compaction was already aborted during marking.
} else if (!is_marked(entry)) {
} else if (!entry.IsMarked()) {
current_freelist_size++;
Address entry =
make_freelist_entry(current_freelist_head, current_freelist_size);
store(i, entry);
Entry entry = Entry::MakeFreelistEntry(current_freelist_head,
current_freelist_size);
Store(i, entry);
current_freelist_head = i;
} else {
store(i, clear_mark_bit(entry));
entry.ClearMarkBit();
Store(i, entry);
}
if (last_in_use_block == i) {
......@@ -311,9 +315,9 @@ uint32_t ExternalPointerTable::Grow(Isolate* isolate) {
uint32_t current_freelist_size = 1;
for (uint32_t i = start; i < last; i++) {
uint32_t next_entry = i + 1;
store(i, make_freelist_entry(next_entry, current_freelist_size++));
Store(i, Entry::MakeFreelistEntry(next_entry, current_freelist_size++));
}
store(last, make_freelist_entry(0, current_freelist_size));
Store(last, Entry::MakeFreelistEntry(0, current_freelist_size));
// This must be a release store to prevent reordering of the preceeding
// stores to the freelist from being reordered past this store. See
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment