Commit a7c85f33 authored by Yang Guo's avatar Yang Guo Committed by Commit Bot

Remove unused statistic collection code.

Bug: chromium:797045
Change-Id: I21171f72c2989819bbc2f22ca9349fd972176095
Reviewed-on: https://chromium-review.googlesource.com/842482Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Yang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50290}
parent b44f8205
......@@ -526,39 +526,6 @@ struct CommentStatistic {
};
#endif
class NumberAndSizeInfo BASE_EMBEDDED {
public:
NumberAndSizeInfo() : number_(0), bytes_(0) {}
int number() const { return number_; }
void increment_number(int num) { number_ += num; }
int bytes() const { return bytes_; }
void increment_bytes(int size) { bytes_ += size; }
void clear() {
number_ = 0;
bytes_ = 0;
}
private:
int number_;
int bytes_;
};
// HistogramInfo class for recording a single "bar" of a histogram. This
// class is used for collecting statistics to print to the log file.
class HistogramInfo : public NumberAndSizeInfo {
public:
HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
private:
const char* name_;
};
class Heap {
public:
// Declare all the root indices. This defines the root list order.
......
......@@ -1103,15 +1103,6 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) {
}
}
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
size_t size = Size();
float pct = static_cast<float>(capacity_ - size) / capacity_;
PrintF(" capacity: %zu , used: %" PRIuS ", available: %%%d\n\n",
capacity_, size, static_cast<int>(pct * 100));
}
#endif
size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
......@@ -1882,29 +1873,11 @@ bool NewSpace::SetUp(size_t initial_semispace_capacity,
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
ResetAllocationInfo();
// Allocate and set up the histogram arrays if necessary.
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
#define SET_NAME(name) \
allocated_histogram_[name].set_name(#name); \
promoted_histogram_[name].set_name(#name);
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
return true;
}
void NewSpace::TearDown() {
if (allocated_histogram_) {
DeleteArray(allocated_histogram_);
allocated_histogram_ = nullptr;
}
if (promoted_histogram_) {
DeleteArray(promoted_histogram_);
promoted_histogram_ = nullptr;
}
allocation_info_.Reset(nullptr, nullptr);
to_space_.TearDown();
......@@ -2559,85 +2532,6 @@ void SemiSpaceIterator::Initialize(Address start, Address end) {
limit_ = end;
}
#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
static void ClearHistograms(Isolate* isolate) {
// We reset the name each time, though it hasn't changed.
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
#undef DEF_TYPE_NAME
#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
#undef CLEAR_HISTOGRAM
isolate->js_spill_information()->Clear();
}
static int CollectHistogramInfo(HeapObject* obj) {
Isolate* isolate = obj->GetIsolate();
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
DCHECK_NOT_NULL(isolate->heap_histograms()[type].name());
isolate->heap_histograms()[type].increment_number(1);
isolate->heap_histograms()[type].increment_bytes(obj->Size());
if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
JSObject::cast(obj)
->IncrementSpillStatistics(isolate->js_spill_information());
}
return obj->Size();
}
static void ReportHistogram(Isolate* isolate, bool print_spill) {
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (isolate->heap_histograms()[i].number() > 0) {
PrintF(" %-34s%10d (%10d bytes)\n",
isolate->heap_histograms()[i].name(),
isolate->heap_histograms()[i].number(),
isolate->heap_histograms()[i].bytes());
}
}
PrintF("\n");
// Summarize string types.
int string_number = 0;
int string_bytes = 0;
#define INCREMENT(type, size, name, camel_name) \
string_number += isolate->heap_histograms()[type].number(); \
string_bytes += isolate->heap_histograms()[type].bytes();
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
string_bytes);
}
if (FLAG_collect_heap_spill_statistics && print_spill) {
isolate->js_spill_information()->Print();
}
}
#endif // DEBUG
void NewSpace::RecordAllocation(HeapObject* obj) {
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
allocated_histogram_[type].increment_number(1);
allocated_histogram_[type].increment_bytes(obj->Size());
}
void NewSpace::RecordPromotion(HeapObject* obj) {
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
promoted_histogram_[type].increment_number(1);
promoted_histogram_[type].increment_bytes(obj->Size());
}
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
......@@ -3173,23 +3067,6 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
return SweepAndRetryAllocation(size_in_bytes);
}
#ifdef DEBUG
void PagedSpace::ReportStatistics() {
int pct = static_cast<int>(Available() * 100 / Capacity());
PrintF(" capacity: %" PRIuS ", waste: %" PRIuS
", available: %" PRIuS ", %%%d\n",
Capacity(), Waste(), Available(), pct);
heap()->mark_compact_collector()->EnsureSweepingCompleted();
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next())
CollectHistogramInfo(obj);
ReportHistogram(heap()->isolate(), true);
}
#endif
// -----------------------------------------------------------------------------
// MapSpace implementation
......@@ -3529,25 +3406,6 @@ void LargeObjectSpace::Print() {
}
}
void LargeObjectSpace::ReportStatistics() {
PrintF(" size: %" PRIuS "\n", size_);
int num_objects = 0;
ClearHistograms(heap()->isolate());
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
num_objects++;
CollectHistogramInfo(obj);
}
PrintF(
" number of objects %d, "
"size of objects %" PRIuS "\n",
num_objects, objects_size_);
if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
}
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
......
......@@ -1398,11 +1398,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
#endif
private:
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
......@@ -2190,9 +2185,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Print meta info and objects in this space.
void Print() override;
// Reports statistics for the space
void ReportStatistics();
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
......@@ -2529,9 +2521,7 @@ class NewSpace : public SpaceWithLinearArea {
: SpaceWithLinearArea(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
reservation_() {}
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
......@@ -2724,12 +2714,6 @@ class NewSpace : public SpaceWithLinearArea {
void Print() override { to_space_.Print(); }
#endif
// Record the allocation or promotion of a heap object. Note that we don't
// record every single allocation, but only those that happen in the
// to space during a scavenge GC.
void RecordAllocation(HeapObject* obj);
void RecordPromotion(HeapObject* obj);
// Return whether the operation succeeded.
bool CommitFromSpaceIfNeeded() {
if (from_space_.is_committed()) return true;
......@@ -2769,9 +2753,6 @@ class NewSpace : public SpaceWithLinearArea {
SemiSpace from_space_;
VirtualMemory reservation_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
......@@ -2964,7 +2945,6 @@ class LargeObjectSpace : public Space {
#ifdef DEBUG
void Print() override;
void ReportStatistics();
#endif
private:
......
......@@ -2543,9 +2543,6 @@ Isolate::Isolate(bool enable_serializer)
thread_manager_->isolate_ = this;
#ifdef DEBUG
// heap_histograms_ initializes itself.
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
non_disposed_isolates_.Increment(1);
#endif // DEBUG
......
......@@ -1008,12 +1008,6 @@ class Isolate {
static size_t non_disposed_isolates() {
return non_disposed_isolates_.Value();
}
HistogramInfo* heap_histograms() { return heap_histograms_; }
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
......@@ -1577,8 +1571,6 @@ class Isolate {
#ifdef DEBUG
static base::AtomicNumber<size_t> non_disposed_isolates_;
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment