Commit 0db34dbe authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Concurrency support for heap book-keeping info (patchset #4...

Revert of [heap] Concurrency support for heap book-keeping info (patchset #4 id:60001 of https://codereview.chromium.org/1340923004/ )

Reason for revert:
crashing: http://build.chromium.org/p/client.v8/builders/V8%20Win32%20-%20debug%20-%203/builds/4716

Original issue's description:
> [heap] Concurrency support for heap book-keeping info.
>
> Adds concurrency support for:
> - MemoryChunk: Fragmentation counters
> - MemoryChunk: High-water mark
> - MemoryAllocator: Lowest and highest ever allocated addresses, size, and
>   capacity
>
> R=hpayer@chromium.org
> BUG=chromium:524425
> LOG=N
>
> Committed: https://crrev.com/63190721cda4966e01d71e92a730ce48ea789fbc
> Cr-Commit-Position: refs/heads/master@{#30749}

TBR=hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1340323002

Cr-Commit-Position: refs/heads/master@{#30752}
parent 4d6c4a3e
...@@ -30,11 +30,6 @@ class AtomicNumber { ...@@ -30,11 +30,6 @@ class AtomicNumber {
base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value)); base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));
} }
V8_INLINE T operator=(T value) {
SetValue(value);
return value;
}
private: private:
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord)); STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
...@@ -46,8 +41,6 @@ class AtomicNumber { ...@@ -46,8 +41,6 @@ class AtomicNumber {
template <typename T> template <typename T>
class AtomicValue { class AtomicValue {
public: public:
AtomicValue() : value_(0) {}
explicit AtomicValue(T initial) explicit AtomicValue(T initial)
: value_(cast_helper<T>::to_storage_type(initial)) {} : value_(cast_helper<T>::to_storage_type(initial)) {}
......
...@@ -322,7 +322,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { ...@@ -322,7 +322,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
void MemoryAllocator::TearDown() { void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator. // Check that spaces were torn down before MemoryAllocator.
DCHECK(size_.Value() == 0); DCHECK(size_ == 0);
// TODO(gc) this will be true again when we fix FreeMemory. // TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0); // DCHECK(size_executable_ == 0);
capacity_ = 0; capacity_ = 0;
...@@ -347,9 +347,9 @@ void MemoryAllocator::FreeNewSpaceMemory(Address addr, ...@@ -347,9 +347,9 @@ void MemoryAllocator::FreeNewSpaceMemory(Address addr,
LOG(isolate_, DeleteEvent("NewSpace", addr)); LOG(isolate_, DeleteEvent("NewSpace", addr));
DCHECK(reservation->IsReserved()); DCHECK(reservation->IsReserved());
const intptr_t size = static_cast<intptr_t>(reservation->size()); const size_t size = reservation->size();
DCHECK(size_.Value() >= size); DCHECK(size_ >= size);
size_.Increment(-size); size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
FreeMemory(reservation, NOT_EXECUTABLE); FreeMemory(reservation, NOT_EXECUTABLE);
} }
...@@ -392,7 +392,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, ...@@ -392,7 +392,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
base::VirtualMemory reservation(size, alignment); base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL; if (!reservation.IsReserved()) return NULL;
size_.Increment(static_cast<intptr_t>(reservation.size())); size_ += reservation.size();
Address base = Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment); RoundUp(static_cast<Address>(reservation.address()), alignment);
controller->TakeControl(&reservation); controller->TakeControl(&reservation);
...@@ -490,7 +490,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -490,7 +490,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->skip_list_ = NULL; chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->set_parallel_sweeping(SWEEPING_DONE); chunk->set_parallel_sweeping(SWEEPING_DONE);
chunk->mutex_ = NULL; chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0; chunk->available_in_small_free_list_ = 0;
...@@ -636,8 +636,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, ...@@ -636,8 +636,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
CodePageGuardSize(); CodePageGuardSize();
// Check executable memory limit. // Check executable memory limit.
if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) > if ((size_executable_ + chunk_size) > capacity_executable_) {
capacity_executable_) {
LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded")); "V8 Executable Allocation capacity exceeded"));
return NULL; return NULL;
...@@ -661,16 +660,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, ...@@ -661,16 +660,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
DCHECK( DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL; if (base == NULL) return NULL;
size_.Increment(static_cast<intptr_t>(chunk_size)); size_ += chunk_size;
// Update executable memory size. // Update executable memory size.
size_executable_.Increment(static_cast<intptr_t>(chunk_size)); size_executable_ += chunk_size;
} else { } else {
base = AllocateAlignedMemory(chunk_size, commit_size, base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable, MemoryChunk::kAlignment, executable,
&reservation); &reservation);
if (base == NULL) return NULL; if (base == NULL) return NULL;
// Update executable memory size. // Update executable memory size.
size_executable_.Increment(static_cast<intptr_t>(chunk_size)); size_executable_ += reservation.size();
} }
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
...@@ -757,20 +756,20 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { ...@@ -757,20 +756,20 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate()); chunk->IsEvacuationCandidate());
intptr_t size; size_t size;
base::VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) { if (reservation->IsReserved()) {
size = static_cast<intptr_t>(reservation->size()); size = reservation->size();
} else { } else {
size = static_cast<intptr_t>(chunk->size()); size = chunk->size();
} }
DCHECK(size_.Value() >= size); DCHECK(size_ >= size);
size_.Increment(-size); size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) { if (chunk->executable() == EXECUTABLE) {
DCHECK(size_executable_.Value() >= size); DCHECK(size_executable_ >= size);
size_executable_.Increment(-size); size_executable_ -= size;
} }
chunk->SetFlag(MemoryChunk::PRE_FREED); chunk->SetFlag(MemoryChunk::PRE_FREED);
...@@ -870,14 +869,13 @@ void MemoryAllocator::RemoveMemoryAllocationCallback( ...@@ -870,14 +869,13 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
#ifdef DEBUG #ifdef DEBUG
void MemoryAllocator::ReportStatistics() { void MemoryAllocator::ReportStatistics() {
intptr_t size = Size(); float pct = static_cast<float>(capacity_ - size_) / capacity_;
float pct = static_cast<float>(capacity_ - size) / capacity_;
PrintF(" capacity: %" V8_PTR_PREFIX PrintF(" capacity: %" V8_PTR_PREFIX
"d" "d"
", used: %" V8_PTR_PREFIX ", used: %" V8_PTR_PREFIX
"d" "d"
", available: %%%d\n\n", ", available: %%%d\n\n",
capacity_, size, static_cast<int>(pct * 100)); capacity_, size_, static_cast<int>(pct * 100));
} }
#endif #endif
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#define V8_HEAP_SPACES_H_ #define V8_HEAP_SPACES_H_
#include "src/allocation.h" #include "src/allocation.h"
#include "src/atomic-utils.h"
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
...@@ -561,14 +560,14 @@ class MemoryChunk { ...@@ -561,14 +560,14 @@ class MemoryChunk {
static const size_t kMinHeaderSize = static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset + kWriteBarrierCounterOffset +
kIntptrSize // intptr_t write_barrier_counter_ kIntptrSize // intptr_t write_barrier_counter_
+ kIntSize // int progress_bar_ + kIntSize // int progress_bar_
+ kPointerSize // AtomicValue high_water_mark_ + kIntSize // int high_water_mark_
+ kPointerSize // base::Mutex* mutex_ + kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_ + kPointerSize // base::AtomicWord parallel_sweeping_
+ 5 * kPointerSize // AtomicNumber free-list statistics + 5 * kIntSize // int free-list statistics
+ kPointerSize // base::AtomicWord next_chunk_ + kPointerSize // base::AtomicWord next_chunk_
+ kPointerSize; // base::AtomicWord prev_chunk_ + kPointerSize; // base::AtomicWord prev_chunk_
// We add some more space to the computed header size to amount for missing // We add some more space to the computed header size to amount for missing
// alignment requirements in our computation. // alignment requirements in our computation.
...@@ -675,23 +674,21 @@ class MemoryChunk { ...@@ -675,23 +674,21 @@ class MemoryChunk {
bool CommitArea(size_t requested); bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk. // Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } size_t CommittedPhysicalMemory() { return high_water_mark_; }
// Should be called when memory chunk is about to be freed. // Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory(); void ReleaseAllocatedMemory();
static inline void UpdateHighWaterMark(Address mark) { static inline void UpdateHighWaterMark(Address mark) {
if (mark == nullptr) return; if (mark == NULL) return;
// Need to subtract one from the mark because when a chunk is full the // Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs // top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationTop. // to another chunk. See the comment to Page::FromAllocationTop.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); int new_mark = static_cast<int>(mark - chunk->address());
intptr_t old_mark = 0; if (new_mark > chunk->high_water_mark_) {
do { chunk->high_water_mark_ = new_mark;
old_mark = chunk->high_water_mark_.Value(); }
} while ((new_mark > old_mark) &&
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
} }
protected: protected:
...@@ -722,17 +719,17 @@ class MemoryChunk { ...@@ -722,17 +719,17 @@ class MemoryChunk {
int progress_bar_; int progress_bar_;
// Assuming the initial allocation on a page is sequential, // Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page. // count highest number of bytes ever allocated on the page.
AtomicValue<intptr_t> high_water_mark_; int high_water_mark_;
base::Mutex* mutex_; base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_; base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics. // PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_small_free_list_; int available_in_small_free_list_;
AtomicNumber<intptr_t> available_in_medium_free_list_; int available_in_medium_free_list_;
AtomicNumber<intptr_t> available_in_large_free_list_; int available_in_large_free_list_;
AtomicNumber<intptr_t> available_in_huge_free_list_; int available_in_huge_free_list_;
AtomicNumber<intptr_t> non_available_small_blocks_; int non_available_small_blocks_;
// next_chunk_ holds a pointer of type MemoryChunk // next_chunk_ holds a pointer of type MemoryChunk
base::AtomicWord next_chunk_; base::AtomicWord next_chunk_;
...@@ -831,22 +828,21 @@ class Page : public MemoryChunk { ...@@ -831,22 +828,21 @@ class Page : public MemoryChunk {
void ResetFreeListStatistics(); void ResetFreeListStatistics();
int LiveBytesFromFreeList() { int LiveBytesFromFreeList() {
return static_cast<int>( return area_size() - non_available_small_blocks_ -
area_size() - non_available_small_blocks() - available_in_small_free_list_ - available_in_medium_free_list_ -
available_in_small_free_list() - available_in_medium_free_list() - available_in_large_free_list_ - available_in_huge_free_list_;
available_in_large_free_list() - available_in_huge_free_list());
} }
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
type name() { return name##_.Value(); } \ type name() { return name##_; } \
void set_##name(type name) { name##_.SetValue(name); } \ void set_##name(type name) { name##_ = name; } \
void add_##name(type name) { name##_.Increment(name); } void add_##name(type name) { name##_ += name; }
FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list)
#undef FRAGMENTATION_STATS_ACCESSORS #undef FRAGMENTATION_STATS_ACCESSORS
...@@ -1132,25 +1128,21 @@ class MemoryAllocator { ...@@ -1132,25 +1128,21 @@ class MemoryAllocator {
// together. // together.
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); }
// Returns allocated executable spaces in bytes.
intptr_t SizeExecutable() { return size_executable_.Value(); }
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
intptr_t Available() { intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
intptr_t size = Size();
return capacity_ < size ? 0 : capacity_ - size; // Returns allocated spaces in bytes.
} intptr_t Size() { return size_; }
// Returns the maximum available executable bytes of heaps. // Returns the maximum available executable bytes of heaps.
intptr_t AvailableExecutable() { intptr_t AvailableExecutable() {
intptr_t executable_size = SizeExecutable(); if (capacity_executable_ < size_executable_) return 0;
if (capacity_executable_ < executable_size) return 0; return capacity_executable_ - size_executable_;
return capacity_executable_ - executable_size;
} }
// Returns allocated executable spaces in bytes.
intptr_t SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have. // Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() { intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
...@@ -1158,9 +1150,9 @@ class MemoryAllocator { ...@@ -1158,9 +1150,9 @@ class MemoryAllocator {
// Returns an indication of whether a pointer is in a space that has // Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator. // been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
return address < lowest_ever_allocated_.Value() || return address < lowest_ever_allocated_ ||
address >= highest_ever_allocated_.Value(); address >= highest_ever_allocated_;
} }
#ifdef DEBUG #ifdef DEBUG
...@@ -1240,22 +1232,22 @@ class MemoryAllocator { ...@@ -1240,22 +1232,22 @@ class MemoryAllocator {
Isolate* isolate_; Isolate* isolate_;
// Maximum space size in bytes. // Maximum space size in bytes.
intptr_t capacity_; size_t capacity_;
// Maximum subset of capacity_ that can be executable // Maximum subset of capacity_ that can be executable
intptr_t capacity_executable_; size_t capacity_executable_;
// Allocated space size in bytes. // Allocated space size in bytes.
AtomicNumber<intptr_t> size_; size_t size_;
// Allocated executable space size in bytes. // Allocated executable space size in bytes.
AtomicNumber<intptr_t> size_executable_; size_t size_executable_;
// We keep the lowest and highest addresses allocated as a quick way // We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is // of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated // conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end // to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end. // and exclusive on the high end.
AtomicValue<void*> lowest_ever_allocated_; void* lowest_ever_allocated_;
AtomicValue<void*> highest_ever_allocated_; void* highest_ever_allocated_;
struct MemoryAllocationCallbackRegistration { struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
...@@ -1278,16 +1270,8 @@ class MemoryAllocator { ...@@ -1278,16 +1270,8 @@ class MemoryAllocator {
PagedSpace* owner); PagedSpace* owner);
void UpdateAllocatedSpaceLimits(void* low, void* high) { void UpdateAllocatedSpaceLimits(void* low, void* high) {
// The use of atomic primitives does not guarantee correctness (wrt. lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
// desired semantics) by default. The loop here ensures that we update the highest_ever_allocated_ = Max(highest_ever_allocated_, high);
// values only if they did not change in between.
void* ptr = nullptr;
do {
ptr = lowest_ever_allocated_.Value();
} while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
do {
ptr = highest_ever_allocated_.Value();
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
} }
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment