Commit 3005fe4a authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Tracks the maximum usage of executable memory allocated by V8 and allows the...

Tracks the maximum usage of executable memory allocated by V8 and allows the histogram data to be gathered and reported. 

This patch is contains only the usage tracking logic from 3030048 (already LGTM'd). It does not implement the RWX Limit. 

BUG=52122
TEST=Check the V8.ExecutableMemoryMax histogram in the Chrome about:histograms page

Review URL: http://codereview.chromium.org/3161015


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5299 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 85718e89
...@@ -220,21 +220,22 @@ void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) { ...@@ -220,21 +220,22 @@ void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
void Page::FlipMeaningOfInvalidatedWatermarkFlag() { void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED; watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
} }
bool Page::IsWatermarkValid() { bool Page::IsWatermarkValid() {
return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_; return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
} }
void Page::InvalidateWatermark(bool value) { void Page::InvalidateWatermark(bool value) {
if (value) { if (value) {
flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_; flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
watermark_invalidated_mark_;
} else { } else {
flags_ = (flags_ & ~WATERMARK_INVALIDATED) | flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
(watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED); (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
} }
ASSERT(IsWatermarkValid() == !value); ASSERT(IsWatermarkValid() == !value);
...@@ -242,15 +243,15 @@ void Page::InvalidateWatermark(bool value) { ...@@ -242,15 +243,15 @@ void Page::InvalidateWatermark(bool value) {
bool Page::GetPageFlag(PageFlag flag) { bool Page::GetPageFlag(PageFlag flag) {
return (flags_ & flag) != 0; return (flags_ & (1 << flag)) != 0;
} }
void Page::SetPageFlag(PageFlag flag, bool value) { void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) { if (value) {
flags_ |= flag; flags_ |= (1 << flag);
} else { } else {
flags_ &= ~flag; flags_ &= ~(1 << flag);
} }
} }
...@@ -289,6 +290,15 @@ void Page::SetIsLargeObjectPage(bool is_large_object_page) { ...@@ -289,6 +290,15 @@ void Page::SetIsLargeObjectPage(bool is_large_object_page) {
SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page); SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
} }
bool Page::IsPageExecutable() {
return GetPageFlag(IS_EXECUTABLE);
}
void Page::SetIsPageExecutable(bool is_page_executable) {
SetPageFlag(IS_EXECUTABLE, is_page_executable);
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// MemoryAllocator // MemoryAllocator
......
...@@ -41,7 +41,7 @@ namespace internal { ...@@ -41,7 +41,7 @@ namespace internal {
&& (info).top <= (space).high() \ && (info).top <= (space).high() \
&& (info).limit == (space).high()) && (info).limit == (space).high())
intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED; intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// HeapObjectIterator // HeapObjectIterator
...@@ -266,6 +266,7 @@ void CodeRange::TearDown() { ...@@ -266,6 +266,7 @@ void CodeRange::TearDown() {
// //
int MemoryAllocator::capacity_ = 0; int MemoryAllocator::capacity_ = 0;
int MemoryAllocator::size_ = 0; int MemoryAllocator::size_ = 0;
int MemoryAllocator::size_executable_ = 0;
VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
...@@ -292,6 +293,8 @@ int MemoryAllocator::Pop() { ...@@ -292,6 +293,8 @@ int MemoryAllocator::Pop() {
} }
void *executable_memory_histogram = NULL;
bool MemoryAllocator::Setup(int capacity) { bool MemoryAllocator::Setup(int capacity) {
capacity_ = RoundUp(capacity, Page::kPageSize); capacity_ = RoundUp(capacity, Page::kPageSize);
...@@ -308,6 +311,9 @@ bool MemoryAllocator::Setup(int capacity) { ...@@ -308,6 +311,9 @@ bool MemoryAllocator::Setup(int capacity) {
if (max_nof_chunks_ > kMaxNofChunks) return false; if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0; size_ = 0;
size_executable_ = 0;
executable_memory_histogram =
StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50);
ChunkInfo info; // uninitialized element. ChunkInfo info; // uninitialized element.
for (int i = max_nof_chunks_ - 1; i >= 0; i--) { for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
chunks_.Add(info); chunks_.Add(info);
...@@ -353,6 +359,16 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested, ...@@ -353,6 +359,16 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
} }
int alloced = static_cast<int>(*allocated); int alloced = static_cast<int>(*allocated);
size_ += alloced; size_ += alloced;
if (executable == EXECUTABLE) {
size_executable_ += alloced;
static int size_executable_max_observed_ = 0;
if (size_executable_max_observed_ < size_executable_) {
size_executable_max_observed_ = size_executable_;
StatsTable::AddHistogramSample(executable_memory_histogram,
size_executable_);
}
}
#ifdef DEBUG #ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced); ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif #endif
...@@ -361,7 +377,9 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested, ...@@ -361,7 +377,9 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
} }
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { void MemoryAllocator::FreeRawMemory(void* mem,
size_t length,
Executability executable) {
#ifdef DEBUG #ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length); ZapBlock(reinterpret_cast<Address>(mem), length);
#endif #endif
...@@ -372,6 +390,7 @@ void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { ...@@ -372,6 +390,7 @@ void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
} }
Counters::memory_allocated.Decrement(static_cast<int>(length)); Counters::memory_allocated.Decrement(static_cast<int>(length));
size_ -= static_cast<int>(length); size_ -= static_cast<int>(length);
if (executable == EXECUTABLE) size_executable_ -= length;
ASSERT(size_ >= 0); ASSERT(size_ >= 0);
} }
...@@ -425,7 +444,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, ...@@ -425,7 +444,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
if (*allocated_pages == 0) { if (*allocated_pages == 0) {
FreeRawMemory(chunk, chunk_size); FreeRawMemory(chunk, chunk_size, owner->executable());
LOG(DeleteEvent("PagedChunk", chunk)); LOG(DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL); return Page::FromAddress(NULL);
} }
...@@ -591,7 +610,7 @@ void MemoryAllocator::DeleteChunk(int chunk_id) { ...@@ -591,7 +610,7 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
Counters::memory_allocated.Decrement(static_cast<int>(c.size())); Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
} else { } else {
LOG(DeleteEvent("PagedChunk", c.address())); LOG(DeleteEvent("PagedChunk", c.address()));
FreeRawMemory(c.address(), c.size()); FreeRawMemory(c.address(), c.size(), c.owner()->executable());
} }
c.init(NULL, 0, NULL); c.init(NULL, 0, NULL);
Push(chunk_id); Push(chunk_id);
...@@ -2552,7 +2571,7 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, ...@@ -2552,7 +2571,7 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
if (mem == NULL) return NULL; if (mem == NULL) return NULL;
LOG(NewEvent("LargeObjectChunk", mem, *chunk_size)); LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
if (*chunk_size < requested) { if (*chunk_size < requested) {
MemoryAllocator::FreeRawMemory(mem, *chunk_size); MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
LOG(DeleteEvent("LargeObjectChunk", mem)); LOG(DeleteEvent("LargeObjectChunk", mem));
return NULL; return NULL;
} }
...@@ -2590,7 +2609,12 @@ void LargeObjectSpace::TearDown() { ...@@ -2590,7 +2609,12 @@ void LargeObjectSpace::TearDown() {
LargeObjectChunk* chunk = first_chunk_; LargeObjectChunk* chunk = first_chunk_;
first_chunk_ = first_chunk_->next(); first_chunk_ = first_chunk_->next();
LOG(DeleteEvent("LargeObjectChunk", chunk->address())); LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size()); Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Executability executable =
page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
MemoryAllocator::FreeRawMemory(chunk->address(),
chunk->size(),
executable);
} }
size_ = 0; size_ = 0;
...@@ -2654,6 +2678,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size, ...@@ -2654,6 +2678,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// low order bit should already be clear. // low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0); ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true); page->SetIsLargeObjectPage(true);
page->SetIsPageExecutable(executable);
page->SetRegionMarks(Page::kAllRegionsCleanMarks); page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address); return HeapObject::FromAddress(object_address);
} }
...@@ -2768,6 +2793,10 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -2768,6 +2793,10 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
previous = current; previous = current;
current = current->next(); current = current->next();
} else { } else {
Page* page = Page::FromAddress(RoundUp(current->address(),
Page::kPageSize));
Executability executable =
page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Address chunk_address = current->address(); Address chunk_address = current->address();
size_t chunk_size = current->size(); size_t chunk_size = current->size();
...@@ -2783,7 +2812,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -2783,7 +2812,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkCompactCollector::ReportDeleteIfNeeded(object); MarkCompactCollector::ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size); size_ -= static_cast<int>(chunk_size);
page_count_--; page_count_--;
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size); MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
LOG(DeleteEvent("LargeObjectChunk", chunk_address)); LOG(DeleteEvent("LargeObjectChunk", chunk_address));
} }
} }
......
...@@ -197,6 +197,10 @@ class Page { ...@@ -197,6 +197,10 @@ class Page {
inline void SetIsLargeObjectPage(bool is_large_object_page); inline void SetIsLargeObjectPage(bool is_large_object_page);
inline bool IsPageExecutable();
inline void SetIsPageExecutable(bool is_page_executable);
// Returns the offset of a given address to this page. // Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) { INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address()); int offset = static_cast<int>(a - address());
...@@ -256,13 +260,16 @@ class Page { ...@@ -256,13 +260,16 @@ class Page {
STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
enum PageFlag { enum PageFlag {
IS_NORMAL_PAGE = 1 << 0, IS_NORMAL_PAGE = 0,
WAS_IN_USE_BEFORE_MC = 1 << 1, WAS_IN_USE_BEFORE_MC,
// Page allocation watermark was bumped by preallocation during scavenge. // Page allocation watermark was bumped by preallocation during scavenge.
// Correct watermark can be retrieved by CachedAllocationWatermark() method // Correct watermark can be retrieved by CachedAllocationWatermark() method
WATERMARK_INVALIDATED = 1 << 2 WATERMARK_INVALIDATED,
IS_EXECUTABLE,
NUM_PAGE_FLAGS // Must be last
}; };
static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
// To avoid an additional WATERMARK_INVALIDATED flag clearing pass during // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
// scavenge we just invalidate the watermark on each old space page after // scavenge we just invalidate the watermark on each old space page after
...@@ -291,7 +298,7 @@ class Page { ...@@ -291,7 +298,7 @@ class Page {
inline void ClearGCFields(); inline void ClearGCFields();
static const int kAllocationWatermarkOffsetShift = 3; static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
static const uint32_t kAllocationWatermarkOffsetMask = static const uint32_t kAllocationWatermarkOffsetMask =
((1 << kAllocationWatermarkOffsetBits) - 1) << ((1 << kAllocationWatermarkOffsetBits) - 1) <<
...@@ -557,7 +564,9 @@ class MemoryAllocator : public AllStatic { ...@@ -557,7 +564,9 @@ class MemoryAllocator : public AllStatic {
static void* AllocateRawMemory(const size_t requested, static void* AllocateRawMemory(const size_t requested,
size_t* allocated, size_t* allocated,
Executability executable); Executability executable);
static void FreeRawMemory(void* buf, size_t length); static void FreeRawMemory(void* buf,
size_t length,
Executability executable);
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
...@@ -565,6 +574,9 @@ class MemoryAllocator : public AllStatic { ...@@ -565,6 +574,9 @@ class MemoryAllocator : public AllStatic {
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
static int Size() { return size_; } static int Size() { return size_; }
// Returns allocated executable spaces in bytes.
static int SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have. // Returns maximum available bytes that the old space can have.
static int MaxAvailable() { static int MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize; return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
...@@ -628,6 +640,8 @@ class MemoryAllocator : public AllStatic { ...@@ -628,6 +640,8 @@ class MemoryAllocator : public AllStatic {
// Allocated space size in bytes. // Allocated space size in bytes.
static int size_; static int size_;
// Allocated executable space size in bytes.
static int size_executable_;
// The initial chunk of virtual memory. // The initial chunk of virtual memory.
static VirtualMemory* initial_chunk_; static VirtualMemory* initial_chunk_;
...@@ -2058,7 +2072,7 @@ class LargeObjectChunk { ...@@ -2058,7 +2072,7 @@ class LargeObjectChunk {
LargeObjectChunk* next() { return next_; } LargeObjectChunk* next() { return next_; }
void set_next(LargeObjectChunk* chunk) { next_ = chunk; } void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
size_t size() { return size_; } size_t size() { return size_ & ~Page::kPageFlagMask; }
void set_size(size_t size_in_bytes) { size_ = size_in_bytes; } void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
// Returns the object in this chunk. // Returns the object in this chunk.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment