Commit a2f85d6c authored by hpayer@chromium.org's avatar hpayer@chromium.org

Promotion is the backup strategy when semi-space copy fails and vice versa.

BUG=
R=ishell@chromium.org

Review URL: https://codereview.chromium.org/356613004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22002 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3247a71c
...@@ -388,8 +388,6 @@ bool Heap::OldGenerationAllocationLimitReached() { ...@@ -388,8 +388,6 @@ bool Heap::OldGenerationAllocationLimitReached() {
bool Heap::ShouldBePromoted(Address old_address, int object_size) { bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if the object has survived a
// scavenge operation.
NewSpacePage* page = NewSpacePage::FromAddress(old_address); NewSpacePage* page = NewSpacePage::FromAddress(old_address);
Address age_mark = new_space_.age_mark(); Address age_mark = new_space_.age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
......
...@@ -1970,14 +1970,49 @@ class ScavengingVisitor : public StaticVisitorBase { ...@@ -1970,14 +1970,49 @@ class ScavengingVisitor : public StaticVisitorBase {
} }
} }
template<int alignment>
static inline bool SemiSpaceCopyObject(Map* map,
HeapObject** slot,
HeapObject* object,
int object_size) {
Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
ASSERT(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation =
heap->new_space()->AllocateRaw(allocation_size);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
*slot = target;
MigrateObject(heap, object, target, object_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
heap->IncrementSemiSpaceCopiedObjectSize(object_size);
return true;
}
return false;
}
template<ObjectContents object_contents, int alignment> template<ObjectContents object_contents, int alignment>
static inline void EvacuateObject(Map* map, static inline bool PromoteObject(Map* map,
HeapObject** slot, HeapObject** slot,
HeapObject* object, HeapObject* object,
int object_size) { int object_size) {
SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); Heap* heap = map->GetHeap();
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size; int allocation_size = object_size;
if (alignment != kObjectAlignment) { if (alignment != kObjectAlignment) {
...@@ -1985,73 +2020,68 @@ class ScavengingVisitor : public StaticVisitorBase { ...@@ -1985,73 +2020,68 @@ class ScavengingVisitor : public StaticVisitorBase {
allocation_size += kPointerSize; allocation_size += kPointerSize;
} }
Heap* heap = map->GetHeap(); AllocationResult allocation;
if (heap->ShouldBePromoted(object->address(), object_size)) { if (object_contents == DATA_OBJECT) {
AllocationResult allocation; ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
allocation = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
if (object_contents == DATA_OBJECT) { HeapObject* target = NULL; // Initialization to please compiler.
ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); if (allocation.To(&target)) {
allocation = heap->old_data_space()->AllocateRaw(allocation_size); if (alignment != kObjectAlignment) {
} else { target = EnsureDoubleAligned(heap, target, allocation_size);
ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
} }
HeapObject* target = NULL; // Initialization to please compiler. // Order is important: slot might be inside of the target if target
if (allocation.To(&target)) { // was allocated over a dead object and slot comes from the store
if (alignment != kObjectAlignment) { // buffer.
target = EnsureDoubleAligned(heap, target, allocation_size); *slot = target;
MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) {
if (map->instance_type() == JS_FUNCTION_TYPE) {
heap->promotion_queue()->insert(
target, JSFunction::kNonWeakFieldsEndOffset);
} else {
heap->promotion_queue()->insert(target, object_size);
} }
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
}
return false;
}
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
*slot = target;
MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) { template<ObjectContents object_contents, int alignment>
if (map->instance_type() == JS_FUNCTION_TYPE) { static inline void EvacuateObject(Map* map,
heap->promotion_queue()->insert( HeapObject** slot,
target, JSFunction::kNonWeakFieldsEndOffset); HeapObject* object,
} else { int object_size) {
heap->promotion_queue()->insert(target, object_size); SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
} SLOW_ASSERT(object->Size() == object_size);
} Heap* heap = map->GetHeap();
heap->IncrementPromotedObjectsSize(object_size); if (!heap->ShouldBePromoted(object->address(), object_size)) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
return; return;
} }
} }
ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation =
heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
// Allocation in the other semi-space may fail due to fragmentation.
// In that case we allocate in the old generation.
if (allocation.IsRetry()) {
if (object_contents == DATA_OBJECT) {
ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
allocation = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
}
HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
if (alignment != kObjectAlignment) { if (PromoteObject<object_contents, alignment>(
target = EnsureDoubleAligned(heap, target, allocation_size); map, slot, object, object_size)) {
return;
} }
// Order is important: slot might be inside of the target if target // If promotion failed, we try to copy the object to the other semi-space
// was allocated over a dead object and slot comes from the store if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
// buffer.
*slot = target; UNREACHABLE();
MigrateObject(heap, object, target, object_size);
heap->IncrementSemiSpaceCopiedObjectSize(object_size);
return;
} }
......
...@@ -1200,10 +1200,8 @@ class Heap { ...@@ -1200,10 +1200,8 @@ class Heap {
void VisitExternalResources(v8::ExternalResourceVisitor* visitor); void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// Helper function that governs the promotion policy from new space to // An object should be promoted if the object has survived a
// old. If the object's old address lies below the new space's age // scavenge operation.
// mark or if we've already filled the bottom 1/16th of the to space,
// we try to promote this object.
inline bool ShouldBePromoted(Address old_address, int object_size); inline bool ShouldBePromoted(Address old_address, int object_size);
void ClearJSFunctionResultCaches(); void ClearJSFunctionResultCaches();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment