Commit 51571d8f authored by hpayer@chromium.org's avatar hpayer@chromium.org

Do not left-trim arrays when concurrent sweeping is active.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/207613004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20238 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 95f61f92
...@@ -567,7 +567,7 @@ BUILTIN(ArrayShift) { ...@@ -567,7 +567,7 @@ BUILTIN(ArrayShift) {
first = isolate->factory()->undefined_value(); first = isolate->factory()->undefined_value();
} }
if (!heap->lo_space()->Contains(*elms_obj)) { if (!heap->CanMoveObjectStart(*elms_obj)) {
array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1)); array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
} else { } else {
// Shift the elements. // Shift the elements.
...@@ -891,8 +891,24 @@ BUILTIN(ArraySplice) { ...@@ -891,8 +891,24 @@ BUILTIN(ArraySplice) {
heap->MoveElements(*elms, delta, 0, actual_start); heap->MoveElements(*elms, delta, 0, actual_start);
} }
elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta)); if (heap->CanMoveObjectStart(*elms_obj)) {
// On the fast path we move the start of the object in memory.
elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta));
} else {
// This is the slow path. We are going to move the elements to the left
// by copying them. For trimmed values we store the hole.
if (elms_obj->IsFixedDoubleArray()) {
Handle<FixedDoubleArray> elms =
Handle<FixedDoubleArray>::cast(elms_obj);
MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
FillWithHoles(*elms, len - delta, len);
} else {
Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
heap->MoveElements(*elms, 0, delta, len - delta);
FillWithHoles(heap, *elms, len - delta, len);
}
}
elms_changed = true; elms_changed = true;
} else { } else {
if (elms_obj->IsFixedDoubleArray()) { if (elms_obj->IsFixedDoubleArray()) {
......
...@@ -3968,6 +3968,21 @@ void Heap::CreateFillerObjectAt(Address addr, int size) { ...@@ -3968,6 +3968,21 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
} }
bool Heap::CanMoveObjectStart(HeapObject* object) {
Address address = object->address();
bool is_in_old_pointer_space = InOldPointerSpace(address);
bool is_in_old_data_space = InOldDataSpace(address);
if (lo_space()->Contains(object)) return false;
// We cannot move the object start if the given old space page is
// concurrently swept.
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
Page::FromAddress(address)->parallel_sweeping() <=
MemoryChunk::PARALLEL_SWEEPING_FINALIZE;
}
void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) { void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
if (incremental_marking()->IsMarking() && if (incremental_marking()->IsMarking() &&
Marking::IsBlack(Marking::MarkBitFrom(address))) { Marking::IsBlack(Marking::MarkBitFrom(address))) {
......
...@@ -1177,6 +1177,8 @@ class Heap { ...@@ -1177,6 +1177,8 @@ class Heap {
// when shortening objects. // when shortening objects.
void CreateFillerObjectAt(Address addr, int size); void CreateFillerObjectAt(Address addr, int size);
bool CanMoveObjectStart(HeapObject* object);
enum InvocationMode { FROM_GC, FROM_MUTATOR }; enum InvocationMode { FROM_GC, FROM_MUTATOR };
// Maintain marking consistency for IncrementalMarking. // Maintain marking consistency for IncrementalMarking.
......
...@@ -4086,6 +4086,7 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) { ...@@ -4086,6 +4086,7 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
if (p->TryParallelSweeping()) { if (p->TryParallelSweeping()) {
SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p); SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
free_list->Concatenate(&private_free_list); free_list->Concatenate(&private_free_list);
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
} }
} }
} }
...@@ -4284,10 +4285,11 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { ...@@ -4284,10 +4285,11 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
PageIterator it(space); PageIterator it(space);
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_IN_PROGRESS) { if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE); p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
p->MarkSweptConservatively(); p->MarkSweptConservatively();
} }
ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
} }
} }
......
...@@ -468,13 +468,16 @@ class MemoryChunk { ...@@ -468,13 +468,16 @@ class MemoryChunk {
intptr_t GetFlags() { return flags_; } intptr_t GetFlags() { return flags_; }
// PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
// PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept or was
// swept by a sweeper thread.
// PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
// sweeping must not be performed on that page. // sweeping must not be performed on that page.
// PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
// page and will not touch the page memory anymore.
// PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
// sweeper thread.
// PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
enum ParallelSweepingState { enum ParallelSweepingState {
PARALLEL_SWEEPING_DONE, PARALLEL_SWEEPING_DONE,
PARALLEL_SWEEPING_FINALIZE,
PARALLEL_SWEEPING_IN_PROGRESS, PARALLEL_SWEEPING_IN_PROGRESS,
PARALLEL_SWEEPING_PENDING PARALLEL_SWEEPING_PENDING
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment