Commit 7cd44cea authored by bak@chromium.org's avatar bak@chromium.org

- Removed a few indirections by making the two SemiSpaces

  part of NewSpace and made NewSpace statically allocated.
- Eliminated indirection in MigrateObject.

Review URL: http://codereview.chromium.org/7619

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@517 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c63477df
......@@ -375,7 +375,6 @@ enum StateTag {
#define OBJECT_SIZE_ALIGN(value) \
((value + kObjectAlignmentMask) & ~kObjectAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
......
......@@ -51,7 +51,7 @@ Object* Heap::AllocateRaw(int size_in_bytes,
Counters::objs_since_last_young.Increment();
#endif
if (NEW_SPACE == space) {
return new_space_->AllocateRaw(size_in_bytes);
return new_space_.AllocateRaw(size_in_bytes);
}
Object* result;
......@@ -100,17 +100,17 @@ Object* Heap::AllocateRawMap(int size_in_bytes) {
bool Heap::InNewSpace(Object* object) {
return new_space_->Contains(object);
return new_space_.Contains(object);
}
bool Heap::InFromSpace(Object* object) {
return new_space_->FromSpaceContains(object);
return new_space_.FromSpaceContains(object);
}
bool Heap::InToSpace(Object* object) {
return new_space_->ToSpaceContains(object);
return new_space_.ToSpaceContains(object);
}
......@@ -118,14 +118,14 @@ bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if:
// - the object has survived a scavenge operation or
// - to space is already 25% full.
return old_address < new_space_->age_mark()
|| (new_space_->Size() + object_size) >= (new_space_->Capacity() >> 2);
return old_address < new_space_.age_mark()
|| (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
}
void Heap::RecordWrite(Address address, int offset) {
if (new_space_->Contains(address)) return;
ASSERT(!new_space_->FromSpaceContains(address));
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
Page::SetRSet(address, offset);
}
......
This diff is collapsed.
......@@ -244,11 +244,11 @@ class Heap : public AllStatic {
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
static Address NewSpaceStart() { return new_space_->start(); }
static uint32_t NewSpaceMask() { return new_space_->mask(); }
static Address NewSpaceTop() { return new_space_->top(); }
static Address NewSpaceStart() { return new_space_.start(); }
static uint32_t NewSpaceMask() { return new_space_.mask(); }
static Address NewSpaceTop() { return new_space_.top(); }
static NewSpace* new_space() { return new_space_; }
static NewSpace* new_space() { return &new_space_; }
static OldSpace* old_pointer_space() { return old_pointer_space_; }
static OldSpace* old_data_space() { return old_data_space_; }
static OldSpace* code_space() { return code_space_; }
......@@ -256,10 +256,10 @@ class Heap : public AllStatic {
static LargeObjectSpace* lo_space() { return lo_space_; }
static Address* NewSpaceAllocationTopAddress() {
return new_space_->allocation_top_address();
return new_space_.allocation_top_address();
}
static Address* NewSpaceAllocationLimitAddress() {
return new_space_->allocation_limit_address();
return new_space_.allocation_limit_address();
}
// Allocates and initializes a new JavaScript object based on a
......@@ -727,7 +727,7 @@ class Heap : public AllStatic {
static const int kMaxMapSpaceSize = 8*MB;
static NewSpace* new_space_;
static NewSpace new_space_;
static OldSpace* old_pointer_space_;
static OldSpace* old_data_space_;
static OldSpace* code_space_;
......@@ -839,7 +839,7 @@ class Heap : public AllStatic {
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
static HeapObject* MigrateObject(HeapObject** source_p,
static HeapObject* MigrateObject(HeapObject* source,
HeapObject* target,
int size);
......
......@@ -308,7 +308,7 @@ Object* NewSpace::AllocateRawInternal(int size_in_bytes,
alloc_info->top = new_top;
#ifdef DEBUG
SemiSpace* space =
(alloc_info == &allocation_info_) ? to_space_ : from_space_;
(alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
ASSERT(space->low() <= alloc_info->top
&& alloc_info->top <= space->high()
&& alloc_info->limit == space->high());
......
......@@ -36,9 +36,9 @@ namespace v8 { namespace internal {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
ASSERT((space)->low() <= (info).top \
&& (info).top <= (space)->high() \
&& (info).limit == (space)->high())
ASSERT((space).low() <= (info).top \
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
// ----------------------------------------------------------------------------
......@@ -760,16 +760,19 @@ void PagedSpace::Print() { }
// -----------------------------------------------------------------------------
// NewSpace implementation
NewSpace::NewSpace(int initial_semispace_capacity,
int maximum_semispace_capacity,
AllocationSpace id)
: Space(id, NOT_EXECUTABLE) {
bool NewSpace::Setup(Address start, int size) {
// Setup new space based on the preallocated memory block defined by
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
int maximum_semispace_capacity = Heap::SemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
maximum_capacity_ = maximum_semispace_capacity;
capacity_ = initial_semispace_capacity;
to_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
from_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
// Allocate and setup the histogram arrays if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
......@@ -781,19 +784,16 @@ NewSpace::NewSpace(int initial_semispace_capacity,
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
#endif
}
bool NewSpace::Setup(Address start, int size) {
ASSERT(size == 2 * maximum_capacity_);
ASSERT(IsAddressAligned(start, size, 0));
if (to_space_ == NULL
|| !to_space_->Setup(start, maximum_capacity_)) {
if (!to_space_.Setup(start, capacity_, maximum_capacity_)) {
return false;
}
if (from_space_ == NULL
|| !from_space_->Setup(start + maximum_capacity_, maximum_capacity_)) {
if (!from_space_.Setup(start + maximum_capacity_,
capacity_,
maximum_capacity_)) {
return false;
}
......@@ -802,8 +802,8 @@ bool NewSpace::Setup(Address start, int size) {
object_mask_ = address_mask_ | kHeapObjectTag;
object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
allocation_info_.top = to_space_->low();
allocation_info_.limit = to_space_->high();
allocation_info_.top = to_space_.low();
allocation_info_.limit = to_space_.high();
mc_forwarding_info_.top = NULL;
mc_forwarding_info_.limit = NULL;
......@@ -831,22 +831,13 @@ void NewSpace::TearDown() {
mc_forwarding_info_.top = NULL;
mc_forwarding_info_.limit = NULL;
if (to_space_ != NULL) {
to_space_->TearDown();
delete to_space_;
to_space_ = NULL;
}
if (from_space_ != NULL) {
from_space_->TearDown();
delete from_space_;
from_space_ = NULL;
}
to_space_.TearDown();
from_space_.TearDown();
}
void NewSpace::Flip() {
SemiSpace* tmp = from_space_;
SemiSpace tmp = from_space_;
from_space_ = to_space_;
to_space_ = tmp;
}
......@@ -857,24 +848,24 @@ bool NewSpace::Double() {
// TODO(1240712): Failure to double the from space can result in
// semispaces of different sizes. In the event of that failure, the
// to space doubling should be rolled back before returning false.
if (!to_space_->Double() || !from_space_->Double()) return false;
if (!to_space_.Double() || !from_space_.Double()) return false;
capacity_ *= 2;
allocation_info_.limit = to_space_->high();
allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
void NewSpace::ResetAllocationInfo() {
allocation_info_.top = to_space_->low();
allocation_info_.limit = to_space_->high();
allocation_info_.top = to_space_.low();
allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::MCResetRelocationInfo() {
mc_forwarding_info_.top = from_space_->low();
mc_forwarding_info_.limit = from_space_->high();
mc_forwarding_info_.top = from_space_.low();
mc_forwarding_info_.limit = from_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
}
......@@ -883,7 +874,7 @@ void NewSpace::MCCommitRelocationInfo() {
// Assumes that the spaces have been flipped so that mc_forwarding_info_ is
// valid allocation info for the to space.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = to_space_->high();
allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
......@@ -897,7 +888,7 @@ void NewSpace::Verify() {
// There should be objects packed in from the low address up to the
// allocation pointer.
Address current = to_space_->low();
Address current = to_space_.low();
while (current < top()) {
HeapObject* object = HeapObject::FromAddress(current);
......@@ -931,22 +922,24 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
SemiSpace::SemiSpace(int initial_capacity,
int maximum_capacity,
AllocationSpace id)
: Space(id, NOT_EXECUTABLE), capacity_(initial_capacity),
maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {
}
bool SemiSpace::Setup(Address start,
int initial_capacity,
int maximum_capacity) {
// Creates a space in the young generation. The constructor does not
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
// memory of size 'capacity' when set up, and does not grow or shrink
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
capacity_ = initial_capacity;
maximum_capacity_ = maximum_capacity;
bool SemiSpace::Setup(Address start, int size) {
ASSERT(size == maximum_capacity_);
if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
return false;
}
start_ = start;
address_mask_ = ~(size - 1);
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTag;
object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
......@@ -1002,7 +995,7 @@ void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
ASSERT(space->ToSpaceContains(start));
ASSERT(space->ToSpaceLow() <= end
&& end <= space->ToSpaceHigh());
space_ = space->to_space_;
space_ = &space->to_space_;
current_ = start;
limit_ = end;
size_func_ = size_func;
......
......@@ -878,19 +878,14 @@ class HistogramInfo BASE_EMBEDDED {
class SemiSpace : public Space {
public:
// Creates a space in the young generation. The constructor does not
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
// memory of size 'capacity' when set up, and does not grow or shrink
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
SemiSpace(int initial_capacity,
int maximum_capacity,
AllocationSpace id);
virtual ~SemiSpace() {}
// Constructor.
SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
start_ = NULL;
age_mark_ = NULL;
}
// Sets up the semispace using the given chunk.
bool Setup(Address start, int size);
bool Setup(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
......@@ -1016,16 +1011,8 @@ class SemiSpaceIterator : public ObjectIterator {
class NewSpace : public Space {
public:
// Create a new space with a given allocation capacity (ie, the capacity of
// *one* of the semispaces). The constructor does not allocate heap memory
// from the OS. When the space is set up, it is given a contiguous chunk of
// memory of size 2 * semispace_capacity. To support fast containment
// testing in the new space, the size of this chunk must be a power of two
// and it must be aligned to its size.
NewSpace(int initial_semispace_capacity,
int maximum_semispace_capacity,
AllocationSpace id);
virtual ~NewSpace() {}
// Constructor.
NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
......@@ -1036,7 +1023,7 @@ class NewSpace : public Space {
// True if the space has been set up but not torn down.
bool HasBeenSetup() {
return to_space_->HasBeenSetup() && from_space_->HasBeenSetup();
return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
}
// Flip the pair of spaces.
......@@ -1069,12 +1056,12 @@ class NewSpace : public Space {
// Return the address of the allocation pointer in the active semispace.
Address top() { return allocation_info_.top; }
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_->low(); }
Address bottom() { return to_space_.low(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_->age_mark(); }
Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_->set_age_mark(mark); }
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
// The start address of the space and a bit mask. Anding an address in the
// new space with the mask will result in the start address.
......@@ -1105,36 +1092,36 @@ class NewSpace : public Space {
void MCCommitRelocationInfo();
// Get the extent of the inactive semispace (for use as a marking stack).
Address FromSpaceLow() { return from_space_->low(); }
Address FromSpaceHigh() { return from_space_->high(); }
Address FromSpaceLow() { return from_space_.low(); }
Address FromSpaceHigh() { return from_space_.high(); }
// Get the extent of the active semispace (to sweep newly copied objects
// during a scavenge collection).
Address ToSpaceLow() { return to_space_->low(); }
Address ToSpaceHigh() { return to_space_->high(); }
Address ToSpaceLow() { return to_space_.low(); }
Address ToSpaceHigh() { return to_space_.high(); }
// Offsets from the beginning of the semispaces.
int ToSpaceOffsetForAddress(Address a) {
return to_space_->SpaceOffsetForAddress(a);
return to_space_.SpaceOffsetForAddress(a);
}
int FromSpaceOffsetForAddress(Address a) {
return from_space_->SpaceOffsetForAddress(a);
return from_space_.SpaceOffsetForAddress(a);
}
// True if the object is a heap object in the address range of the
// respective semispace (not necessarily below the allocation pointer of the
// semispace).
bool ToSpaceContains(Object* o) { return to_space_->Contains(o); }
bool FromSpaceContains(Object* o) { return from_space_->Contains(o); }
bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
bool ToSpaceContains(Address a) { return to_space_->Contains(a); }
bool FromSpaceContains(Address a) { return from_space_->Contains(a); }
bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
#ifdef DEBUG
// Verify the active semispace.
virtual void Verify();
// Print the active semispace.
virtual void Print() { to_space_->Print(); }
virtual void Print() { to_space_.Print(); }
#endif
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
......@@ -1158,8 +1145,8 @@ class NewSpace : public Space {
int maximum_capacity_;
// The semispaces.
SemiSpace* to_space_;
SemiSpace* from_space_;
SemiSpace to_space_;
SemiSpace from_space_;
// Start address and bit mask for containment testing.
Address start_;
......
......@@ -157,27 +157,23 @@ TEST(NewSpace) {
CHECK(Heap::ConfigureHeapDefault());
CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
NewSpace* s = new NewSpace(Heap::InitialSemiSpaceSize(),
Heap::SemiSpaceSize(),
NEW_SPACE);
CHECK(s != NULL);
NewSpace new_space;
void* chunk =
MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
CHECK(chunk != NULL);
Address start = RoundUp(static_cast<Address>(chunk),
Heap::YoungGenerationSize());
CHECK(s->Setup(start, Heap::YoungGenerationSize()));
CHECK(s->HasBeenSetup());
CHECK(new_space.Setup(start, Heap::YoungGenerationSize()));
CHECK(new_space.HasBeenSetup());
while (s->Available() >= Page::kMaxHeapObjectSize) {
Object* obj = s->AllocateRaw(Page::kMaxHeapObjectSize);
while (new_space.Available() >= Page::kMaxHeapObjectSize) {
Object* obj = new_space.AllocateRaw(Page::kMaxHeapObjectSize);
CHECK(!obj->IsFailure());
CHECK(s->Contains(HeapObject::cast(obj)));
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
s->TearDown();
delete s;
new_space.TearDown();
MemoryAllocator::TearDown();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment