Commit a02dbe42 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Revert 10413-10416 initial memory use reduction due to

test failures.
Review URL: http://codereview.chromium.org/9178014

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10417 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 07b46f47
...@@ -42,11 +42,10 @@ namespace internal { ...@@ -42,11 +42,10 @@ namespace internal {
static const int kEventsBufferSize = 256*KB; static const int kEventsBufferSize = 256*KB;
static const int kTickSamplesBufferChunkSize = 64*KB; static const int kTickSamplesBufferChunkSize = 64*KB;
static const int kTickSamplesBufferChunksCount = 16; static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 32 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), : Thread("v8:ProfEvntProc"),
generator_(generator), generator_(generator),
running_(true), running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord), ticks_buffer_(sizeof(TickSampleEventRecord),
......
...@@ -126,9 +126,6 @@ ShellOptions Shell::options; ...@@ -126,9 +126,6 @@ ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> "; const char* Shell::kPrompt = "d8> ";
const int MB = 1024 * 1024;
#ifndef V8_SHARED #ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) { bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1); const char* name1 = reinterpret_cast<const char*>(key1);
...@@ -1194,11 +1191,14 @@ Handle<String> SourceGroup::ReadFile(const char* name) { ...@@ -1194,11 +1191,14 @@ Handle<String> SourceGroup::ReadFile(const char* name) {
#ifndef V8_SHARED #ifndef V8_SHARED
i::Thread::Options SourceGroup::GetThreadOptions() { i::Thread::Options SourceGroup::GetThreadOptions() {
i::Thread::Options options;
options.name = "IsolateThread";
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests. // which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some // The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code. 2Mbytes seems to be enough. // OS-specific padding for thread startup code.
return i::Thread::Options("IsolateThread", 2 * MB); options.stack_size = 2 << 20; // 2 Mb seems to be enough
return options;
} }
......
...@@ -1086,7 +1086,6 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { ...@@ -1086,7 +1086,6 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
MemoryChunk* chunk = MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
desc.instr_size,
EXECUTABLE, EXECUTABLE,
NULL); NULL);
if (chunk == NULL) { if (chunk == NULL) {
......
...@@ -505,6 +505,7 @@ Isolate* Heap::isolate() { ...@@ -505,6 +505,7 @@ Isolate* Heap::isolate() {
#define GC_GREEDY_CHECK() { } #define GC_GREEDY_CHECK() { }
#endif #endif
// Calls the FUNCTION_CALL function and retries it up to three times // Calls the FUNCTION_CALL function and retries it up to three times
// to guarantee that any allocations performed during the call will // to guarantee that any allocations performed during the call will
// succeed if there's enough memory. // succeed if there's enough memory.
......
...@@ -582,11 +582,8 @@ void Heap::ReserveSpace( ...@@ -582,11 +582,8 @@ void Heap::ReserveSpace(
PagedSpace* map_space = Heap::map_space(); PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space(); PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space(); LargeObjectSpace* lo_space = Heap::lo_space();
bool one_old_space_gc_has_been_performed = false;
bool gc_performed = true; bool gc_performed = true;
bool old_space_gc_performed;
while (gc_performed) { while (gc_performed) {
old_space_gc_performed = false;
gc_performed = false; gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) { if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(NEW_SPACE); Heap::CollectGarbage(NEW_SPACE);
...@@ -595,27 +592,22 @@ void Heap::ReserveSpace( ...@@ -595,27 +592,22 @@ void Heap::ReserveSpace(
if (!old_pointer_space->ReserveSpace(pointer_space_size)) { if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(OLD_POINTER_SPACE); Heap::CollectGarbage(OLD_POINTER_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(old_data_space->ReserveSpace(data_space_size))) { if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(OLD_DATA_SPACE); Heap::CollectGarbage(OLD_DATA_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(code_space->ReserveSpace(code_space_size))) { if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(CODE_SPACE); Heap::CollectGarbage(CODE_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(map_space->ReserveSpace(map_space_size))) { if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(MAP_SPACE); Heap::CollectGarbage(MAP_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(cell_space->ReserveSpace(cell_space_size))) { if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(CELL_SPACE); Heap::CollectGarbage(CELL_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
// We add a slack-factor of 2 in order to have space for a series of // We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size. // large-object allocations that are only just larger than the page size.
...@@ -625,17 +617,10 @@ void Heap::ReserveSpace( ...@@ -625,17 +617,10 @@ void Heap::ReserveSpace(
// allocation in the other spaces. // allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size + large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size; data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
// If we already did one GC in order to make space in old space, there is
// no sense in doing another one. We will attempt to force through the
// large object space allocation, which comes directly from the OS,
// regardless of any soft limit.
if (!one_old_space_gc_has_been_performed &&
!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(LO_SPACE); Heap::CollectGarbage(LO_SPACE);
gc_performed = true; gc_performed = true;
} }
if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
} }
} }
......
...@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, ...@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
// It's difficult to filter out slots recorded for large objects. // It's difficult to filter out slots recorded for large objects.
if (chunk->owner()->identity() == LO_SPACE && if (chunk->owner()->identity() == LO_SPACE &&
chunk->size() > Page::kPageSize && chunk->size() > static_cast<size_t>(Page::kPageSize) &&
is_compacting) { is_compacting) {
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
} }
......
...@@ -2887,8 +2887,7 @@ static void SweepPrecisely(PagedSpace* space, ...@@ -2887,8 +2887,7 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) { for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize; Address free_end = object_address + offsets[live_index++] * kPointerSize;
if (free_end != free_start) { if (free_end != free_start) {
space->AddToFreeLists(free_start, space->Free(free_start, static_cast<int>(free_end - free_start));
static_cast<int>(free_end - free_start));
} }
HeapObject* live_object = HeapObject::FromAddress(free_end); HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
...@@ -2914,8 +2913,7 @@ static void SweepPrecisely(PagedSpace* space, ...@@ -2914,8 +2913,7 @@ static void SweepPrecisely(PagedSpace* space,
cells[cell_index] = 0; cells[cell_index] = 0;
} }
if (free_start != p->ObjectAreaEnd()) { if (free_start != p->ObjectAreaEnd()) {
space->AddToFreeLists(free_start, space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
static_cast<int>(p->ObjectAreaEnd() - free_start));
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
} }
...@@ -3208,8 +3206,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3208,8 +3206,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Page* p = evacuation_candidates_[i]; Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue; if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner()); PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->AddToFreeLists(p->ObjectAreaStart(), space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
p->ObjectAreaEnd() - p->ObjectAreaStart());
p->set_scan_on_scavenge(false); p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate(); p->ClearEvacuationCandidate();
...@@ -3526,8 +3523,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3526,8 +3523,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
} }
size_t size = block_address - p->ObjectAreaStart(); size_t size = block_address - p->ObjectAreaStart();
if (cell_index == last_cell_index) { if (cell_index == last_cell_index) {
freed_bytes += static_cast<int>(space->AddToFreeLists( freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
p->ObjectAreaStart(), static_cast<int>(size))); static_cast<int>(size)));
ASSERT_EQ(0, p->LiveBytes()); ASSERT_EQ(0, p->LiveBytes());
return freed_bytes; return freed_bytes;
} }
...@@ -3536,8 +3533,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3536,8 +3533,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
Address free_end = StartOfLiveObject(block_address, cells[cell_index]); Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space. // Free the first free space.
size = free_end - p->ObjectAreaStart(); size = free_end - p->ObjectAreaStart();
freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(), freed_bytes += space->Free(p->ObjectAreaStart(),
static_cast<int>(size)); static_cast<int>(size));
// The start of the current free area is represented in undigested form by // The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and // the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object // the marking bitmap for that cell, which describes where the live object
...@@ -3566,8 +3563,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3566,8 +3563,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// so now we need to find the start of the first live object at the // so now we need to find the start of the first live object at the
// end of the free space. // end of the free space.
free_end = StartOfLiveObject(block_address, cell); free_end = StartOfLiveObject(block_address, cell);
freed_bytes += space->AddToFreeLists( freed_bytes += space->Free(free_start,
free_start, static_cast<int>(free_end - free_start)); static_cast<int>(free_end - free_start));
} }
} }
// Update our undigested record of where the current free area started. // Update our undigested record of where the current free area started.
...@@ -3581,8 +3578,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3581,8 +3578,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// Handle the free space at the end of the page. // Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) { if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell); free_start = DigestFreeStart(free_start, free_start_cell);
freed_bytes += space->AddToFreeLists( freed_bytes += space->Free(free_start,
free_start, static_cast<int>(block_address - free_start)); static_cast<int>(block_address - free_start));
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
......
...@@ -464,8 +464,15 @@ class Thread::PlatformData : public Malloced { ...@@ -464,8 +464,15 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options) Thread::Thread(const Options& options)
: data_(new PlatformData), : data_(new PlatformData),
stack_size_(options.stack_size()) { stack_size_(options.stack_size) {
set_name(options.name()); set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData),
stack_size_(0) {
set_name(name);
} }
...@@ -710,10 +717,8 @@ class SignalSender : public Thread { ...@@ -710,10 +717,8 @@ class SignalSender : public Thread {
FULL_INTERVAL FULL_INTERVAL
}; };
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval) explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), : Thread("SignalSender"),
interval_(interval) {} interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) { static void AddActiveSampler(Sampler* sampler) {
......
...@@ -720,8 +720,15 @@ class Thread::PlatformData : public Malloced { ...@@ -720,8 +720,15 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options) Thread::Thread(const Options& options)
: data_(new PlatformData()), : data_(new PlatformData()),
stack_size_(options.stack_size()) { stack_size_(options.stack_size) {
set_name(options.name()); set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData()),
stack_size_(0) {
set_name(name);
} }
...@@ -1028,10 +1035,8 @@ class SignalSender : public Thread { ...@@ -1028,10 +1035,8 @@ class SignalSender : public Thread {
FULL_INTERVAL FULL_INTERVAL
}; };
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval) explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), : Thread("SignalSender"),
vm_tgid_(getpid()), vm_tgid_(getpid()),
interval_(interval) {} interval_(interval) {}
......
...@@ -473,11 +473,17 @@ class Thread::PlatformData : public Malloced { ...@@ -473,11 +473,17 @@ class Thread::PlatformData : public Malloced {
pthread_t thread_; // Thread handle for pthread. pthread_t thread_; // Thread handle for pthread.
}; };
Thread::Thread(const Options& options) Thread::Thread(const Options& options)
: data_(new PlatformData), : data_(new PlatformData),
stack_size_(options.stack_size()) { stack_size_(options.stack_size) {
set_name(options.name()); set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData),
stack_size_(0) {
set_name(name);
} }
...@@ -730,13 +736,10 @@ class Sampler::PlatformData : public Malloced { ...@@ -730,13 +736,10 @@ class Sampler::PlatformData : public Malloced {
thread_act_t profiled_thread_; thread_act_t profiled_thread_;
}; };
class SamplerThread : public Thread { class SamplerThread : public Thread {
public: public:
static const int kSamplerThreadStackSize = 32 * KB;
explicit SamplerThread(int interval) explicit SamplerThread(int interval)
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), : Thread("SamplerThread"),
interval_(interval) {} interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) { static void AddActiveSampler(Sampler* sampler) {
......
...@@ -512,8 +512,15 @@ class Thread::PlatformData : public Malloced { ...@@ -512,8 +512,15 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options) Thread::Thread(const Options& options)
: data_(new PlatformData()), : data_(new PlatformData()),
stack_size_(options.stack_size()) { stack_size_(options.stack_size) {
set_name(options.name()); set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData()),
stack_size_(0) {
set_name(name);
} }
...@@ -782,10 +789,8 @@ class SignalSender : public Thread { ...@@ -782,10 +789,8 @@ class SignalSender : public Thread {
FULL_INTERVAL FULL_INTERVAL
}; };
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval) explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), : Thread("SignalSender"),
vm_tgid_(getpid()), vm_tgid_(getpid()),
interval_(interval) {} interval_(interval) {}
......
...@@ -369,11 +369,17 @@ class Thread::PlatformData : public Malloced { ...@@ -369,11 +369,17 @@ class Thread::PlatformData : public Malloced {
pthread_t thread_; // Thread handle for pthread. pthread_t thread_; // Thread handle for pthread.
}; };
Thread::Thread(const Options& options) Thread::Thread(const Options& options)
: data_(new PlatformData()), : data_(new PlatformData()),
stack_size_(options.stack_size()) { stack_size_(options.stack_size) {
set_name(options.name()); set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData()),
stack_size_(0) {
set_name(name);
} }
...@@ -620,10 +626,8 @@ class SignalSender : public Thread { ...@@ -620,10 +626,8 @@ class SignalSender : public Thread {
FULL_INTERVAL FULL_INTERVAL
}; };
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval) explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), : Thread("SignalSender"),
interval_(interval) {} interval_(interval) {}
static void InstallSignalHandler() { static void InstallSignalHandler() {
......
...@@ -1526,9 +1526,16 @@ class Thread::PlatformData : public Malloced { ...@@ -1526,9 +1526,16 @@ class Thread::PlatformData : public Malloced {
// handle until it is started. // handle until it is started.
Thread::Thread(const Options& options) Thread::Thread(const Options& options)
: stack_size_(options.stack_size()) { : stack_size_(options.stack_size) {
data_ = new PlatformData(kNoThread); data_ = new PlatformData(kNoThread);
set_name(options.name()); set_name(options.name);
}
Thread::Thread(const char* name)
: stack_size_(0) {
data_ = new PlatformData(kNoThread);
set_name(name);
} }
...@@ -1894,10 +1901,8 @@ class Sampler::PlatformData : public Malloced { ...@@ -1894,10 +1901,8 @@ class Sampler::PlatformData : public Malloced {
class SamplerThread : public Thread { class SamplerThread : public Thread {
public: public:
static const int kSamplerThreadStackSize = 32 * KB;
explicit SamplerThread(int interval) explicit SamplerThread(int interval)
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), : Thread("SamplerThread"),
interval_(interval) {} interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) { static void AddActiveSampler(Sampler* sampler) {
......
...@@ -412,22 +412,16 @@ class Thread { ...@@ -412,22 +412,16 @@ class Thread {
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
}; };
class Options { struct Options {
public: Options() : name("v8:<unknown>"), stack_size(0) {}
Options() : name_("v8:<unknown>"), stack_size_(0) {}
Options(const char* name, int stack_size = 0)
: name_(name), stack_size_(stack_size) {}
const char* name() const { return name_; }
int stack_size() const { return stack_size_; }
private: const char* name;
const char* name_; int stack_size;
int stack_size_;
}; };
// Create new thread. // Create new thread.
explicit Thread(const Options& options); explicit Thread(const Options& options);
explicit Thread(const char* name);
virtual ~Thread(); virtual ~Thread();
// Start new thread by calling the Run() method in the new thread. // Start new thread by calling the Run() method in the new thread.
......
...@@ -612,7 +612,6 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) { ...@@ -612,7 +612,6 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
pages_[LO_SPACE].Add(address); pages_[LO_SPACE].Add(address);
} }
last_object_address_ = address; last_object_address_ = address;
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return address; return address;
} }
...@@ -623,12 +622,7 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) { ...@@ -623,12 +622,7 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
int offset = source_->GetInt(); int offset = source_->GetInt();
ASSERT(!SpaceIsLarge(space)); ASSERT(!SpaceIsLarge(space));
offset <<= kObjectAlignmentBits; offset <<= kObjectAlignmentBits;
Address address = high_water_[space] - offset; return HeapObject::FromAddress(high_water_[space] - offset);
// This assert will fail if kMinimumSpaceSizes is too small for a space,
// because we rely on the fact that all allocation is linear when the VM
// is very young.
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return HeapObject::FromAddress(address);
} }
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "isolate.h" #include "isolate.h"
#include "spaces.h"
#ifndef V8_SNAPSHOT_H_ #ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_ #define V8_SNAPSHOT_H_
...@@ -87,21 +86,6 @@ class Snapshot { ...@@ -87,21 +86,6 @@ class Snapshot {
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
}; };
// These are the sizes of the spaces that are needed in order to unpack the
// VM boot snapshot.
const intptr_t kMinimumSpaceSizes[LAST_SPACE + 1] = {
0, // New space.
512 * 1024, // Old pointer space.
128 * 1024, // Old data space.
256 * 1024, // Code space.
64 * 1024, // Map space.
64 * 1024, // Cell space.
0 // Large object space.
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_SNAPSHOT_H_ #endif // V8_SNAPSHOT_H_
...@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap, ...@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
Executability executable, Executability executable,
PagedSpace* owner) { PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() <= kPageSize); ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner); ASSERT(chunk->owner() == owner);
int object_bytes = owner->IncreaseCapacity(Page::kObjectAreaSize);
static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart()); owner->Free(page->ObjectAreaStart(),
owner->IncreaseCapacity(object_bytes); static_cast<int>(page->ObjectAreaEnd() -
owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes); page->ObjectAreaStart()));
heap->incremental_marking()->SetOldSpacePageFlags(chunk); heap->incremental_marking()->SetOldSpacePageFlags(chunk);
...@@ -257,7 +257,6 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { ...@@ -257,7 +257,6 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
if (new_top > allocation_info_.limit) return NULL; if (new_top > allocation_info_.limit) return NULL;
allocation_info_.top = new_top; allocation_info_.top = new_top;
ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
return HeapObject::FromAddress(current_top); return HeapObject::FromAddress(current_top);
} }
......
This diff is collapsed.
...@@ -502,9 +502,11 @@ class MemoryChunk { ...@@ -502,9 +502,11 @@ class MemoryChunk {
static const int kObjectStartOffset = kBodyOffset - 1 + static const int kObjectStartOffset = kBodyOffset - 1 +
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
intptr_t size() const { return size_; } size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; } void set_size(size_t size) {
size_ = size;
}
Executability executable() { Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
...@@ -656,7 +658,7 @@ class Page : public MemoryChunk { ...@@ -656,7 +658,7 @@ class Page : public MemoryChunk {
Address ObjectAreaStart() { return address() + kObjectStartOffset; } Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page. // Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + size(); } Address ObjectAreaEnd() { return address() + Page::kPageSize; }
// Checks whether an address is page aligned. // Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) { static bool IsAlignedToPageSize(Address a) {
...@@ -675,10 +677,6 @@ class Page : public MemoryChunk { ...@@ -675,10 +677,6 @@ class Page : public MemoryChunk {
return address() + offset; return address() + offset;
} }
// Expand the committed area for pages that are small. This
// happens primarily when the VM is newly booted.
void CommitMore(intptr_t space_needed);
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
// Page size in bytes. This must be a multiple of the OS page size. // Page size in bytes. This must be a multiple of the OS page size.
...@@ -848,10 +846,12 @@ class CodeRange { ...@@ -848,10 +846,12 @@ class CodeRange {
FreeBlock(Address start_arg, size_t size_arg) FreeBlock(Address start_arg, size_t size_arg)
: start(start_arg), size(size_arg) { : start(start_arg), size(size_arg) {
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
ASSERT(size >= static_cast<size_t>(Page::kPageSize));
} }
FreeBlock(void* start_arg, size_t size_arg) FreeBlock(void* start_arg, size_t size_arg)
: start(static_cast<Address>(start_arg)), size(size_arg) { : start(static_cast<Address>(start_arg)), size(size_arg) {
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
ASSERT(size >= static_cast<size_t>(Page::kPageSize));
} }
Address start; Address start;
...@@ -947,9 +947,7 @@ class MemoryAllocator { ...@@ -947,9 +947,7 @@ class MemoryAllocator {
void TearDown(); void TearDown();
Page* AllocatePage(intptr_t object_area_size, Page* AllocatePage(PagedSpace* owner, Executability executable);
PagedSpace* owner,
Executability executable);
LargePage* AllocateLargePage(intptr_t object_size, LargePage* AllocateLargePage(intptr_t object_size,
Executability executable, Executability executable,
...@@ -958,14 +956,10 @@ class MemoryAllocator { ...@@ -958,14 +956,10 @@ class MemoryAllocator {
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
intptr_t Available() { intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
return capacity_ < memory_allocator_reserved_ ?
0 :
capacity_ - memory_allocator_reserved_;
}
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
intptr_t Size() { return memory_allocator_reserved_; } intptr_t Size() { return size_; }
// Returns the maximum available executable bytes of heaps. // Returns the maximum available executable bytes of heaps.
intptr_t AvailableExecutable() { intptr_t AvailableExecutable() {
...@@ -987,7 +981,6 @@ class MemoryAllocator { ...@@ -987,7 +981,6 @@ class MemoryAllocator {
#endif #endif
MemoryChunk* AllocateChunk(intptr_t body_size, MemoryChunk* AllocateChunk(intptr_t body_size,
intptr_t committed_body_size,
Executability executable, Executability executable,
Space* space); Space* space);
...@@ -995,7 +988,6 @@ class MemoryAllocator { ...@@ -995,7 +988,6 @@ class MemoryAllocator {
size_t alignment, size_t alignment,
VirtualMemory* controller); VirtualMemory* controller);
Address AllocateAlignedMemory(size_t requested, Address AllocateAlignedMemory(size_t requested,
size_t committed,
size_t alignment, size_t alignment,
Executability executable, Executability executable,
VirtualMemory* controller); VirtualMemory* controller);
...@@ -1015,12 +1007,6 @@ class MemoryAllocator { ...@@ -1015,12 +1007,6 @@ class MemoryAllocator {
// and false otherwise. // and false otherwise.
bool UncommitBlock(Address start, size_t size); bool UncommitBlock(Address start, size_t size);
void AllocationBookkeeping(Space* owner,
Address base,
intptr_t reserved_size,
intptr_t committed_size,
Executability executable);
// Zaps a contiguous block of memory [start..(start+size)[ thus // Zaps a contiguous block of memory [start..(start+size)[ thus
// filling it up with a recognizable non-NULL bit pattern. // filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size); void ZapBlock(Address start, size_t size);
...@@ -1048,7 +1034,7 @@ class MemoryAllocator { ...@@ -1048,7 +1034,7 @@ class MemoryAllocator {
size_t capacity_executable_; size_t capacity_executable_;
// Allocated space size in bytes. // Allocated space size in bytes.
size_t memory_allocator_reserved_; size_t size_;
// Allocated executable space size in bytes. // Allocated executable space size in bytes.
size_t size_executable_; size_t size_executable_;
...@@ -1393,15 +1379,9 @@ class FreeList BASE_EMBEDDED { ...@@ -1393,15 +1379,9 @@ class FreeList BASE_EMBEDDED {
static const int kMinBlockSize = 3 * kPointerSize; static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize; static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
FreeListNode* PickNodeFromList(FreeListNode** list, FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
int* node_size,
int minimum_size);
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
FreeListNode* FindAbuttingNode(int size_in_bytes,
int* node_size,
Address limit,
FreeListNode** list_head);
PagedSpace* owner_; PagedSpace* owner_;
Heap* heap_; Heap* heap_;
...@@ -1501,8 +1481,6 @@ class PagedSpace : public Space { ...@@ -1501,8 +1481,6 @@ class PagedSpace : public Space {
// free bytes that were not found at all due to lazy sweeping. // free bytes that were not found at all due to lazy sweeping.
virtual intptr_t Waste() { return accounting_stats_.Waste(); } virtual intptr_t Waste() { return accounting_stats_.Waste(); }
virtual int ObjectAlignment() { return kObjectAlignment; }
// Returns the allocation pointer in this space. // Returns the allocation pointer in this space.
Address top() { return allocation_info_.top; } Address top() { return allocation_info_.top; }
Address limit() { return allocation_info_.limit; } Address limit() { return allocation_info_.limit; }
...@@ -1517,7 +1495,7 @@ class PagedSpace : public Space { ...@@ -1517,7 +1495,7 @@ class PagedSpace : public Space {
// the free list or accounted as waste. // the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and // If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made. // no attempt to add area to free list is made.
int AddToFreeLists(Address start, int size_in_bytes) { int Free(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes); int wasted = free_list_.Free(start, size_in_bytes);
accounting_stats_.DeallocateBytes(size_in_bytes - wasted); accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
return size_in_bytes - wasted; return size_in_bytes - wasted;
...@@ -1525,7 +1503,6 @@ class PagedSpace : public Space { ...@@ -1525,7 +1503,6 @@ class PagedSpace : public Space {
// Set space allocation info. // Set space allocation info.
void SetTop(Address top, Address limit) { void SetTop(Address top, Address limit) {
ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
ASSERT(top == limit || ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1)); Page::FromAddress(top) == Page::FromAddress(limit - 1));
allocation_info_.top = top; allocation_info_.top = top;
...@@ -1596,7 +1573,6 @@ class PagedSpace : public Space { ...@@ -1596,7 +1573,6 @@ class PagedSpace : public Space {
return !first_unswept_page_->is_valid(); return !first_unswept_page_->is_valid();
} }
inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
Page* FirstPage() { return anchor_.next_page(); } Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); } Page* LastPage() { return anchor_.prev_page(); }
...@@ -1669,6 +1645,12 @@ class PagedSpace : public Space { ...@@ -1669,6 +1645,12 @@ class PagedSpace : public Space {
// Normal allocation information. // Normal allocation information.
AllocationInfo allocation_info_; AllocationInfo allocation_info_;
// Bytes of each page that cannot be allocated. Possibly non-zero
// for pages in spaces with only fixed-size objects. Always zero
// for pages in spaces with variable sized objects (those pages are
// padded with free-list nodes).
int page_extra_;
bool was_swept_conservatively_; bool was_swept_conservatively_;
// The first page to be swept when the lazy sweeper advances. Is set // The first page to be swept when the lazy sweeper advances. Is set
...@@ -1680,11 +1662,10 @@ class PagedSpace : public Space { ...@@ -1680,11 +1662,10 @@ class PagedSpace : public Space {
// done conservatively. // done conservatively.
intptr_t unswept_free_bytes_; intptr_t unswept_free_bytes_;
// Expands the space by allocating a page. Returns false if it cannot // Expands the space by allocating a fixed number of pages. Returns false if
// allocate a page from OS, or if the hard heap size limit has been hit. The // it cannot allocate requested number of pages from OS, or if the hard heap
// new page will have at least enough committed space to satisfy the object // size limit has been hit.
// size indicated by the allocation_size argument; bool Expand();
bool Expand(intptr_t allocation_size);
// Generic fast case allocation function that tries linear allocation at the // Generic fast case allocation function that tries linear allocation at the
// address denoted by top in allocation_info_. // address denoted by top in allocation_info_.
...@@ -1839,9 +1820,8 @@ class SemiSpace : public Space { ...@@ -1839,9 +1820,8 @@ class SemiSpace : public Space {
anchor_(this), anchor_(this),
current_page_(NULL) { } current_page_(NULL) { }
// Sets up the semispace using the given chunk. After this, call Commit() // Sets up the semispace using the given chunk.
// to make the semispace usable. bool SetUp(Address start, int initial_capacity, int maximum_capacity);
void SetUp(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it // Tear down the space. Heap memory was not allocated by the space, so it
// is not deallocated here. // is not deallocated here.
...@@ -2345,7 +2325,14 @@ class OldSpace : public PagedSpace { ...@@ -2345,7 +2325,14 @@ class OldSpace : public PagedSpace {
intptr_t max_capacity, intptr_t max_capacity,
AllocationSpace id, AllocationSpace id,
Executability executable) Executability executable)
: PagedSpace(heap, max_capacity, id, executable) { } : PagedSpace(heap, max_capacity, id, executable) {
page_extra_ = 0;
}
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
}
public: public:
TRACK_MEMORY("OldSpace") TRACK_MEMORY("OldSpace")
...@@ -2372,11 +2359,16 @@ class FixedSpace : public PagedSpace { ...@@ -2372,11 +2359,16 @@ class FixedSpace : public PagedSpace {
const char* name) const char* name)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes), object_size_in_bytes_(object_size_in_bytes),
name_(name) { } name_(name) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
int object_size_in_bytes() { return object_size_in_bytes_; } // The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd() - page_extra_;
}
virtual int ObjectAlignment() { return object_size_in_bytes_; } int object_size_in_bytes() { return object_size_in_bytes_; }
// Prepares for a mark-compact GC. // Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(); virtual void PrepareForMarkCompact();
......
...@@ -496,6 +496,7 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion( ...@@ -496,6 +496,7 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
Address map_aligned_end = MapEndAlign(end); Address map_aligned_end = MapEndAlign(end);
ASSERT(map_aligned_start == start); ASSERT(map_aligned_start == start);
ASSERT(map_aligned_end == end);
FindPointersToNewSpaceInMaps(map_aligned_start, FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end, map_aligned_end,
...@@ -523,57 +524,52 @@ void StoreBuffer::FindPointersToNewSpaceOnPage( ...@@ -523,57 +524,52 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
RegionCallback region_callback, RegionCallback region_callback,
ObjectSlotCallback slot_callback) { ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart(); Address visitable_start = page->ObjectAreaStart();
Address end_of_page = page->ObjectAreaEnd();
Address visitable_end = visitable_start; Address visitable_end = visitable_start;
Object* free_space_map = heap_->free_space_map(); Object* free_space_map = heap_->free_space_map();
Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
while (true) { // While the page grows (doesn't normally happen). while (visitable_end < end_of_page) {
Address end_of_page = page->ObjectAreaEnd(); Object* o = *reinterpret_cast<Object**>(visitable_end);
while (visitable_end < end_of_page) { // Skip fillers but not things that look like fillers in the special
Object* o = *reinterpret_cast<Object**>(visitable_end); // garbage section which can contain anything.
// Skip fillers but not things that look like fillers in the special if (o == free_space_map ||
// garbage section which can contain anything. o == two_pointer_filler_map ||
if (o == free_space_map || (visitable_end == space->top() && visitable_end != space->limit())) {
o == two_pointer_filler_map || if (visitable_start != visitable_end) {
(visitable_end == space->top() && visitable_end != space->limit())) { // After calling this the special garbage section may have moved.
if (visitable_start != visitable_end) { (this->*region_callback)(visitable_start,
// After calling this the special garbage section may have moved. visitable_end,
(this->*region_callback)(visitable_start, slot_callback);
visitable_end, if (visitable_end >= space->top() && visitable_end < space->limit()) {
slot_callback); visitable_end = space->limit();
if (visitable_end >= space->top() && visitable_end < space->limit()) { visitable_start = visitable_end;
visitable_end = space->limit(); continue;
visitable_start = visitable_end;
continue;
}
}
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else {
// At this point we are either at the start of a filler or we are at
// the point where the space->top() used to be before the
// visit_pointer_region call above. Either way we can skip the
// object at the current spot: We don't promise to visit objects
// allocated during heap traversal, and if space->top() moved then it
// must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
} }
}
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else { } else {
ASSERT(o != free_space_map); // At this point we are either at the start of a filler or we are at
ASSERT(o != two_pointer_filler_map); // the point where the space->top() used to be before the
ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); // visit_pointer_region call above. Either way we can skip the
visitable_end += kPointerSize; // object at the current spot: We don't promise to visit objects
// allocated during heap traversal, and if space->top() moved then it
// must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
} }
} else {
ASSERT(o != free_space_map);
ASSERT(o != two_pointer_filler_map);
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
visitable_end += kPointerSize;
} }
ASSERT(visitable_end >= end_of_page);
// If the page did not grow we are done.
if (end_of_page == page->ObjectAreaEnd()) break;
} }
ASSERT(visitable_end == page->ObjectAreaEnd()); ASSERT(visitable_end == end_of_page);
if (visitable_start != visitable_end) { if (visitable_start != visitable_end) {
(this->*region_callback)(visitable_start, (this->*region_callback)(visitable_start,
visitable_end, visitable_end,
......
...@@ -153,9 +153,11 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) { ...@@ -153,9 +153,11 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
} }
template<typename int_type> // Returns the smallest power of two which is >= x. If you pass in a
inline int RoundUpToPowerOf2(int_type x_argument) { // number that is already a power of two, it is returned as is.
uintptr_t x = static_cast<uintptr_t>(x_argument); // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
// figure 3-3, page 48, where the function is called clp2.
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
ASSERT(x <= 0x80000000u); ASSERT(x <= 0x80000000u);
x = x - 1; x = x - 1;
x = x | (x >> 1); x = x | (x >> 1);
...@@ -163,7 +165,7 @@ inline int RoundUpToPowerOf2(int_type x_argument) { ...@@ -163,7 +165,7 @@ inline int RoundUpToPowerOf2(int_type x_argument) {
x = x | (x >> 4); x = x | (x >> 4);
x = x | (x >> 8); x = x | (x >> 8);
x = x | (x >> 16); x = x | (x >> 16);
return static_cast<int_type>(x + 1); return x + 1;
} }
......
...@@ -1236,14 +1236,17 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { ...@@ -1236,14 +1236,17 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
obj = iterator.next()) { obj = iterator.next()) {
size_of_objects_2 += obj->Size(); size_of_objects_2 += obj->Size();
} }
// Delta must be within 1% of the larger result. // Delta must be within 5% of the larger result.
// TODO(gc): Tighten this up by distinguishing between byte
// arrays that are real and those that merely mark free space
// on the heap.
if (size_of_objects_1 > size_of_objects_2) { if (size_of_objects_1 > size_of_objects_2) {
intptr_t delta = size_of_objects_1 - size_of_objects_2; intptr_t delta = size_of_objects_1 - size_of_objects_2;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, " PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
"Iterator: %" V8_PTR_PREFIX "d, " "Iterator: %" V8_PTR_PREFIX "d, "
"delta: %" V8_PTR_PREFIX "d\n", "delta: %" V8_PTR_PREFIX "d\n",
size_of_objects_1, size_of_objects_2, delta); size_of_objects_1, size_of_objects_2, delta);
CHECK_GT(size_of_objects_1 / 100, delta); CHECK_GT(size_of_objects_1 / 20, delta);
} else { } else {
intptr_t delta = size_of_objects_2 - size_of_objects_1; intptr_t delta = size_of_objects_2 - size_of_objects_1;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, " PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
......
...@@ -526,25 +526,12 @@ static intptr_t MemoryInUse() { ...@@ -526,25 +526,12 @@ static intptr_t MemoryInUse() {
TEST(BootUpMemoryUse) { TEST(BootUpMemoryUse) {
intptr_t initial_memory = MemoryInUse(); intptr_t initial_memory = MemoryInUse();
FLAG_crankshaft = false; // Avoid flakiness.
// Only Linux has the proc filesystem and only if it is mapped. If it's not // Only Linux has the proc filesystem and only if it is mapped. If it's not
// there we just skip the test. // there we just skip the test.
if (initial_memory >= 0) { if (initial_memory >= 0) {
InitializeVM(); InitializeVM();
intptr_t booted_memory = MemoryInUse(); intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) { CHECK_LE(booted_memory - initial_memory, 16 * 1024 * 1024);
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 3700 * 1024); // 3640.
} else {
CHECK_LE(booted_memory - initial_memory, 3300 * 1024); // 3276.
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 2300 * 1024); // 2276.
} else {
CHECK_LE(booted_memory - initial_memory, 2500 * 1024); // 2416
}
}
} }
} }
......
...@@ -140,8 +140,8 @@ TEST(MemoryAllocator) { ...@@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
heap->MaxReserved(), heap->MaxReserved(),
OLD_POINTER_SPACE, OLD_POINTER_SPACE,
NOT_EXECUTABLE); NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage( Page* first_page =
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE); memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page()); first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid()); CHECK(first_page->is_valid());
...@@ -154,8 +154,7 @@ TEST(MemoryAllocator) { ...@@ -154,8 +154,7 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages. // Again, we should get n or n - 1 pages.
Page* other = Page* other =
memory_allocator->AllocatePage( memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid()); CHECK(other->is_valid());
total_pages++; total_pages++;
other->InsertAfter(first_page); other->InsertAfter(first_page);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment