Commit 47684fe8 authored by jgruber's avatar jgruber Committed by Commit bot

[heap] Don't allocate immovable code in LO space during serialization

Background: the first page of each space is implicitly immovable.
Recently, our builtin code objects have reached a size at which we
fill up the first page of code space during initialization. Once
that occurs, newly requested allocations of immovable code are
allocated in a large object space page of 512K.

This CL mitigates these effects by simply marking pages as immovable
during snapshot creation instead of going into LO space.

On snapshot builds, this should just work: deserialized pages are
trimmed and marked immovable when deserialization finishes.

However, non-snapshot builds and allocations of immovable CEntryStub
code at runtime are still affected.

BUG=v8:5831

Review-Url: https://codereview.chromium.org/2635973002
Cr-Commit-Position: refs/heads/master@{#42411}
parent c2e8cb19
...@@ -193,8 +193,7 @@ Handle<Code> CodeStub::GetCode() { ...@@ -193,8 +193,7 @@ Handle<Code> CodeStub::GetCode() {
} }
Activate(code); Activate(code);
DCHECK(!NeedsImmovableCode() || DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code) ||
heap->lo_space()->Contains(code) ||
heap->code_space()->FirstPage()->Contains(code->address())); heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate()); return Handle<Code>(code, isolate());
} }
......
...@@ -3160,6 +3160,11 @@ bool Heap::CanMoveObjectStart(HeapObject* object) { ...@@ -3160,6 +3160,11 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
return Page::FromAddress(address)->SweepingDone(); return Page::FromAddress(address)->SweepingDone();
} }
bool Heap::IsImmovable(HeapObject* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
}
void Heap::AdjustLiveBytes(HeapObject* object, int by) { void Heap::AdjustLiveBytes(HeapObject* object, int by) {
// As long as the inspected object is black and we are currently not iterating // As long as the inspected object is black and we are currently not iterating
// the heap using HeapIterator, we can update the live byte count. We cannot // the heap using HeapIterator, we can update the live byte count. We cannot
...@@ -3401,18 +3406,24 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) { ...@@ -3401,18 +3406,24 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
if (!allocation.To(&result)) return allocation; if (!allocation.To(&result)) return allocation;
if (immovable) { if (immovable) {
Address address = result->address(); Address address = result->address();
MemoryChunk* chunk = MemoryChunk::FromAddress(address);
// Code objects which should stay at a fixed address are allocated either // Code objects which should stay at a fixed address are allocated either
// in the first page of code space (objects on the first page of each space // in the first page of code space (objects on the first page of each space
// are never moved) or in large object space. // are never moved), in large object space, or (during snapshot creation)
if (!code_space_->FirstPage()->Contains(address) && // the containing page is marked as immovable.
MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { if (!Heap::IsImmovable(result) &&
// Discard the first code allocation, which was on a page where it could !code_space_->FirstPage()->Contains(address)) {
// be moved. if (isolate()->serializer_enabled()) {
CreateFillerObjectAt(result->address(), object_size, chunk->MarkNeverEvacuate();
ClearRecordedSlots::kNo); } else {
allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE); // Discard the first code allocation, which was on a page where it could
if (!allocation.To(&result)) return allocation; // be moved.
OnAllocationEvent(result, object_size); CreateFillerObjectAt(result->address(), object_size,
ClearRecordedSlots::kNo);
allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
if (!allocation.To(&result)) return allocation;
OnAllocationEvent(result, object_size);
}
} }
} }
......
...@@ -753,6 +753,8 @@ class Heap { ...@@ -753,6 +753,8 @@ class Heap {
bool CanMoveObjectStart(HeapObject* object); bool CanMoveObjectStart(HeapObject* object);
static bool IsImmovable(HeapObject* object);
// Maintain consistency of live bytes during incremental marking. // Maintain consistency of live bytes during incremental marking.
void AdjustLiveBytes(HeapObject* object, int by); void AdjustLiveBytes(HeapObject* object, int by);
......
...@@ -1474,6 +1474,7 @@ class Isolate { ...@@ -1474,6 +1474,7 @@ class Isolate {
friend class ExecutionAccess; friend class ExecutionAccess;
friend class HandleScopeImplementer; friend class HandleScopeImplementer;
friend class HeapTester;
friend class OptimizingCompileDispatcher; friend class OptimizingCompileDispatcher;
friend class SweeperThread; friend class SweeperThread;
friend class ThreadManager; friend class ThreadManager;
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
V(Regress589413) \ V(Regress589413) \
V(Regress658718) \ V(Regress658718) \
V(Regress670675) \ V(Regress670675) \
V(Regress5831) \
V(WriteBarriersInCopyJSObject) V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \ #define HEAP_TEST(Name) \
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <utility> #include <utility>
#include "src/code-stubs.h"
#include "src/compilation-cache.h" #include "src/compilation-cache.h"
#include "src/context-measure.h" #include "src/context-measure.h"
#include "src/deoptimizer.h" #include "src/deoptimizer.h"
...@@ -6837,5 +6838,57 @@ HEAP_TEST(Regress670675) { ...@@ -6837,5 +6838,57 @@ HEAP_TEST(Regress670675) {
DCHECK(marking->IsStopped()); DCHECK(marking->IsStopped());
} }
HEAP_TEST(Regress5831) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
// Used to ensure that the first code space page remains filled.
Handle<FixedArray> array = isolate->factory()->NewFixedArray(32);
{
// Ensure that the first code space page is full.
CEntryStub stub(isolate, 1);
Handle<Code> code = stub.GetCode();
int i = 0;
array = FixedArray::SetAndGrow(array, i++, code);
while (heap->code_space()->FirstPage()->Contains(code->address())) {
code = isolate->factory()->CopyCode(code);
array = FixedArray::SetAndGrow(array, i++, code);
}
}
class ImmovableCEntryStub : public i::CEntryStub {
public:
explicit ImmovableCEntryStub(i::Isolate* isolate)
: i::CEntryStub(isolate, 3, i::kSaveFPRegs, i::kArgvOnStack, true) {}
bool NeedsImmovableCode() override { return true; }
};
ImmovableCEntryStub stub(isolate);
{
// Make sure the code object has not yet been generated.
Code* code;
CHECK(!stub.FindCodeInCache(&code));
}
// Fake a serializer run.
isolate->serializer_enabled_ = true;
// Generate the code.
Handle<Code> code = stub.GetCode();
CHECK(code->Size() <= i::kMaxRegularHeapObjectSize);
CHECK(!heap->code_space()->FirstPage()->Contains(code->address()));
// Ensure it's not in large object space.
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
CHECK(chunk->owner()->identity() != LO_SPACE);
CHECK(chunk->NeverEvacuate());
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment