Commit 2e316502 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Removes spaces.h include from heap.h

Together with the presubmit rule of prohibiting src/heap/* includes except for
heap.h this now properly hides all heap internals.

R=ulan@chromium.org
BUG=

Review-Url: https://codereview.chromium.org/2314783002
Cr-Commit-Position: refs/heads/master@{#39211}
parent 7e60d08d
......@@ -172,11 +172,11 @@ Space* Heap::space(int idx) {
}
Address* Heap::NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
return new_space_->allocation_top_address();
}
Address* Heap::NewSpaceAllocationLimitAddress() {
return new_space_.allocation_limit_address();
return new_space_->allocation_limit_address();
}
Address* Heap::OldSpaceAllocationTopAddress() {
......@@ -190,7 +190,7 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
bool Heap::HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
intptr_t adjusted_allocation_limit = limit - new_space_->Capacity();
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
......@@ -333,7 +333,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
if (large_object) {
space = LO_SPACE;
} else {
allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
......@@ -472,6 +472,11 @@ void Heap::FinalizeExternalString(String* string) {
}
}
Address Heap::NewSpaceTop() { return new_space_->top(); }
bool Heap::DeoptMaybeTenuredAllocationSites() {
return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
}
bool Heap::InNewSpace(Object* object) {
// Inlined check from NewSpace::Contains.
......@@ -500,7 +505,7 @@ bool Heap::InToSpace(Object* object) {
bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
bool Heap::InNewSpaceSlow(Address address) {
return new_space_.ContainsSlow(address);
return new_space_->ContainsSlow(address);
}
bool Heap::InOldSpaceSlow(Address address) {
......@@ -517,7 +522,7 @@ bool Heap::OldGenerationAllocationLimitReached() {
template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
Address age_mark = new_space_->age_mark();
if (promotion_mode == PROMOTE_MARKED) {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
......
......@@ -96,7 +96,7 @@ Heap::Heap()
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
new_space_(this),
new_space_(nullptr),
old_space_(NULL),
code_space_(NULL),
map_space_(NULL),
......@@ -189,7 +189,7 @@ Heap::Heap()
intptr_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
return new_space_.Capacity() + OldGenerationCapacity();
return new_space_->Capacity() + OldGenerationCapacity();
}
intptr_t Heap::OldGenerationCapacity() {
......@@ -211,14 +211,14 @@ intptr_t Heap::CommittedOldGenerationMemory() {
intptr_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
}
size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedPhysicalMemory() +
return new_space_->CommittedPhysicalMemory() +
old_space_->CommittedPhysicalMemory() +
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
......@@ -300,7 +300,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
......@@ -321,18 +321,18 @@ void Heap::ReportStatisticsBeforeGC() {
// compiled --log-gc is set. The following logic is used to avoid
// double logging.
#ifdef DEBUG
if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
if (FLAG_heap_stats) {
ReportHeapStatistics("Before GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
new_space_->ReportStatistics();
}
if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
#else
if (FLAG_log_gc) {
new_space_.CollectStatistics();
new_space_.ReportStatistics();
new_space_.ClearHistograms();
new_space_->CollectStatistics();
new_space_->ReportStatistics();
new_space_->ClearHistograms();
}
#endif // DEBUG
}
......@@ -349,8 +349,8 @@ void Heap::PrintShortHeapStatistics() {
", available: %6" V8PRIdPTR
" KB"
", committed: %6" V8PRIdPTR " KB\n",
new_space_.Size() / KB, new_space_.Available() / KB,
new_space_.CommittedMemory() / KB);
new_space_->Size() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
......@@ -399,13 +399,13 @@ void Heap::ReportStatisticsAfterGC() {
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
new_space_->CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
new_space_->ReportStatistics();
}
#else
if (FLAG_log_gc) new_space_.ReportStatistics();
if (FLAG_log_gc) new_space_->ReportStatistics();
#endif // DEBUG
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
......@@ -423,7 +423,7 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
deferred_counters_[feature]++;
}
bool Heap::UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
void Heap::GarbageCollectionPrologue() {
{
......@@ -455,7 +455,7 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
if (new_space_.IsAtMaximumCapacity()) {
if (new_space_->IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
......@@ -898,7 +898,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
}
set_current_gc_flags(kNoGCFlags);
new_space_.Shrink();
new_space_->Shrink();
UncommitFromSpace();
}
......@@ -949,7 +949,7 @@ void Heap::EnsureFillerObjectAtTop() {
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
Address to_top = new_space_.top();
Address to_top = new_space_->top();
Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
......@@ -1231,7 +1231,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Memory is exhausted and we will die.
......@@ -1506,18 +1506,18 @@ void Heap::MarkCompactPrologue() {
void Heap::CheckNewSpaceExpansionCriteria() {
if (FLAG_experimental_new_space_growth_heuristic) {
if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
// Grow the size of new space if there is room to grow, and more than 10%
// have survived the last scavenge.
new_space_.Grow();
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
} else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.TotalCapacity()) {
} else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
survived_since_last_expansion_ > new_space_->TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_.Grow();
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
}
......@@ -1640,8 +1640,8 @@ void Heap::Scavenge() {
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
new_space_.ResetAllocationInfo();
new_space_->Flip();
new_space_->ResetAllocationInfo();
// We need to sweep newly copied objects which can be either in the
// to space or promoted to the old generation. For to-space
......@@ -1660,7 +1660,7 @@ void Heap::Scavenge() {
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceStart();
Address new_space_front = new_space_->ToSpaceStart();
promotion_queue_.Initialize();
PromotionMode promotion_mode = CurrentPromotionMode();
......@@ -1757,16 +1757,17 @@ void Heap::Scavenge() {
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessYoungWeakReferences(&weak_object_retainer);
DCHECK(new_space_front == new_space_.top());
DCHECK(new_space_front == new_space_->top());
// Set age mark.
new_space_.set_age_mark(new_space_.top());
new_space_->set_age_mark(new_space_->top());
ArrayBufferTracker::FreeDeadInNewSpace(this);
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
IncrementYoungSurvivorsCounter(
static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
new_space_->Size()));
LOG(isolate_, ResourceEvent("scavenge", "end"));
......@@ -1930,11 +1931,11 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front,
PromotionMode promotion_mode) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_.top());
SemiSpace::AssertValidRange(new_space_front, new_space_->top());
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front != new_space_.top()) {
while (new_space_front != new_space_->top()) {
if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
if (promotion_mode == PROMOTE_MARKED) {
......@@ -1973,7 +1974,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
} while (new_space_front != new_space_.top());
} while (new_space_front != new_space_->top());
return new_space_front;
}
......@@ -4191,7 +4192,7 @@ void Heap::ReduceNewSpaceSize() {
if (ShouldReduceMemory() ||
((allocation_throughput != 0) &&
(allocation_throughput < kLowAllocationThroughput))) {
new_space_.Shrink();
new_space_->Shrink();
UncommitFromSpace();
}
}
......@@ -4555,7 +4556,7 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF("Heap statistics : ");
memory_allocator()->ReportStatistics();
PrintF("To space : ");
new_space_.ReportStatistics();
new_space_->ReportStatistics();
PrintF("Old space : ");
old_space_->ReportStatistics();
PrintF("Code space : ");
......@@ -4574,7 +4575,7 @@ bool Heap::Contains(HeapObject* value) {
return false;
}
return HasBeenSetUp() &&
(new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
lo_space_->Contains(value));
}
......@@ -4584,7 +4585,7 @@ bool Heap::ContainsSlow(Address addr) {
return false;
}
return HasBeenSetUp() &&
(new_space_.ToSpaceContainsSlow(addr) ||
(new_space_->ToSpaceContainsSlow(addr) ||
old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
}
......@@ -4597,7 +4598,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
switch (space) {
case NEW_SPACE:
return new_space_.ToSpaceContains(value);
return new_space_->ToSpaceContains(value);
case OLD_SPACE:
return old_space_->Contains(value);
case CODE_SPACE:
......@@ -4619,7 +4620,7 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
switch (space) {
case NEW_SPACE:
return new_space_.ToSpaceContainsSlow(addr);
return new_space_->ToSpaceContainsSlow(addr);
case OLD_SPACE:
return old_space_->ContainsSlow(addr);
case CODE_SPACE:
......@@ -4682,7 +4683,7 @@ void Heap::Verify() {
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
new_space_.Verify();
new_space_->Verify();
old_space_->Verify(&visitor);
map_space_->Verify(&visitor);
......@@ -4701,9 +4702,9 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
if (!new_space_.IsFromSpaceCommitted()) return;
for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd())) {
if (!new_space_->IsFromSpaceCommitted()) return;
for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
......@@ -5088,8 +5089,8 @@ bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.SizeAsInt();
*stats->new_space_capacity = new_space_.Capacity();
*stats->new_space_size = new_space_->SizeAsInt();
*stats->new_space_capacity = new_space_->Capacity();
*stats->old_space_size = old_space_->SizeOfObjects();
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
......@@ -5211,7 +5212,7 @@ intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
CHECK(old_gen_size > 0);
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
limit += new_space_.Capacity();
limit += new_space_->Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
......@@ -5352,8 +5353,11 @@ bool Heap::SetUp() {
// Initialize incremental marking.
incremental_marking_ = new IncrementalMarking(this);
new_space_ = new NewSpace(this);
if (new_space_ == nullptr) return false;
// Set up new space.
if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
return false;
}
new_space_top_after_last_gc_ = new_space()->top();
......@@ -5538,7 +5542,7 @@ void Heap::TearDown() {
PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
MaximumCommittedMemory());
PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
new_space_.MaximumCommittedMemory());
new_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
old_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
......@@ -5599,7 +5603,9 @@ void Heap::TearDown() {
delete tracer_;
tracer_ = nullptr;
new_space_.TearDown();
new_space_->TearDown();
delete new_space_;
new_space_ = nullptr;
if (old_space_ != NULL) {
delete old_space_;
......
......@@ -16,9 +16,8 @@
#include "src/base/atomic-utils.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
// TODO(mstarzinger): One more include to kill!
#include "src/heap/spaces.h"
#include "src/list.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
......@@ -326,7 +325,9 @@ class HistogramTimer;
class Isolate;
class MemoryAllocator;
class MemoryReducer;
class ObjectIterator;
class ObjectStats;
class Page;
class PagedSpace;
class Scavenger;
class ScavengeJob;
......@@ -401,6 +402,95 @@ class PromotionQueue {
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
class AllocationResult {
public:
// Implicit constructor from Object*.
AllocationResult(Object* object) // NOLINT
: object_(object) {
// AllocationResults can't return Smis, which are used to represent
// failure and the space to retry in.
CHECK(!object->IsSmi());
}
AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
return AllocationResult(space);
}
inline bool IsRetry() { return object_->IsSmi(); }
template <typename T>
bool To(T** obj) {
if (IsRetry()) return false;
*obj = T::cast(object_);
return true;
}
Object* ToObjectChecked() {
CHECK(!IsRetry());
return object_;
}
inline AllocationSpace RetrySpace();
private:
explicit AllocationResult(AllocationSpace space)
: object_(Smi::FromInt(static_cast<int>(space))) {}
Object* object_;
};
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
int size;
int count;
void Clear() {
comment = NULL;
size = 0;
count = 0;
}
// Must be small, since an iteration is used for lookup.
static const int kMaxComments = 64;
};
#endif
class NumberAndSizeInfo BASE_EMBEDDED {
public:
NumberAndSizeInfo() : number_(0), bytes_(0) {}
int number() const { return number_; }
void increment_number(int num) { number_ += num; }
int bytes() const { return bytes_; }
void increment_bytes(int size) { bytes_ += size; }
void clear() {
number_ = 0;
bytes_ = 0;
}
private:
int number_;
int bytes_;
};
// HistogramInfo class for recording a single "bar" of a histogram. This
// class is used for collecting statistics to print to the log file.
class HistogramInfo : public NumberAndSizeInfo {
public:
HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
private:
const char* name_;
};
class Heap {
public:
// Declare all the root indices. This defines the root list order.
......@@ -785,9 +875,7 @@ class Heap {
void DeoptMarkedAllocationSites();
bool DeoptMaybeTenuredAllocationSites() {
return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
}
inline bool DeoptMaybeTenuredAllocationSites();
void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
Handle<WeakCell> code);
......@@ -861,9 +949,9 @@ class Heap {
// Getters for spaces. =======================================================
// ===========================================================================
Address NewSpaceTop() { return new_space_.top(); }
inline Address NewSpaceTop();
NewSpace* new_space() { return &new_space_; }
NewSpace* new_space() { return new_space_; }
OldSpace* old_space() { return old_space_; }
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
......@@ -2001,7 +2089,7 @@ class Heap {
int global_ic_age_;
NewSpace new_space_;
NewSpace* new_space_;
OldSpace* old_space_;
OldSpace* code_space_;
MapSpace* map_space_;
......
......@@ -17,6 +17,7 @@
#include "src/base/platform/mutex.h"
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
......@@ -1868,50 +1869,6 @@ class FreeList {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
class AllocationResult {
public:
// Implicit constructor from Object*.
AllocationResult(Object* object) // NOLINT
: object_(object) {
// AllocationResults can't return Smis, which are used to represent
// failure and the space to retry in.
CHECK(!object->IsSmi());
}
AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
return AllocationResult(space);
}
inline bool IsRetry() { return object_->IsSmi(); }
template <typename T>
bool To(T** obj) {
if (IsRetry()) return false;
*obj = T::cast(object_);
return true;
}
Object* ToObjectChecked() {
CHECK(!IsRetry());
return object_;
}
inline AllocationSpace RetrySpace();
private:
explicit AllocationResult(AllocationSpace space)
: object_(Smi::FromInt(static_cast<int>(space))) {}
Object* object_;
};
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
......@@ -2247,41 +2204,6 @@ class PagedSpace : public Space {
friend class HeapTester;
};
class NumberAndSizeInfo BASE_EMBEDDED {
public:
NumberAndSizeInfo() : number_(0), bytes_(0) {}
int number() const { return number_; }
void increment_number(int num) { number_ += num; }
int bytes() const { return bytes_; }
void increment_bytes(int size) { bytes_ += size; }
void clear() {
number_ = 0;
bytes_ = 0;
}
private:
int number_;
int bytes_;
};
// HistogramInfo class for recording a single "bar" of a histogram. This
// class is used for collecting statistics to print to the log file.
class HistogramInfo : public NumberAndSizeInfo {
public:
HistogramInfo() : NumberAndSizeInfo() {}
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
private:
const char* name_;
};
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// -----------------------------------------------------------------------------
......@@ -3022,20 +2944,6 @@ class MemoryChunkIterator BASE_EMBEDDED {
LargePageIterator lo_iterator_;
};
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
int size;
int count;
void Clear() {
comment = NULL;
size = 0;
count = 0;
}
// Must be small, since an iteration is used for lookup.
static const int kMaxComments = 64;
};
#endif
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment