Commit 6bfee50e authored by jkummerow's avatar jkummerow Committed by Commit bot

[deserializer] Make large object deserialization GC safe

When black allocation is turned on at deserialization time, then
slots in deserialized objects have to be visited by the incremental
marker. For spaces with reservations, this has always been done; for
large object space with its special handling, this patch adds it.

Additionally, we must ensure that no incremental steps that might
cause incremental marking to finish are performed while there is an
AlwaysAllocateScope around.

BUG=chromium:718859

Review-Url: https://codereview.chromium.org/2868103002
Cr-Commit-Position: refs/heads/master@{#45231}
parent 61101232
...@@ -4238,34 +4238,39 @@ void Heap::FinalizeIncrementalMarkingIfComplete( ...@@ -4238,34 +4238,39 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
} }
} }
void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) { void Heap::RegisterDeserializedObjectsForBlackAllocation(
Reservation* reservations, List<HeapObject*>* large_objects) {
// TODO(hpayer): We do not have to iterate reservations on black objects // TODO(hpayer): We do not have to iterate reservations on black objects
// for marking. We just have to execute the special visiting side effect // for marking. We just have to execute the special visiting side effect
// code that adds objects to global data structures, e.g. for array buffers. // code that adds objects to global data structures, e.g. for array buffers.
if (incremental_marking()->black_allocation()) { if (!incremental_marking()->black_allocation()) return;
// Iterate black objects in old space, code space, map space, and large
// object space for side effects. // Iterate black objects in old space, code space, map space, and large
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { // object space for side effects.
const Heap::Reservation& res = reservations[i]; for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
for (auto& chunk : res) { const Heap::Reservation& res = reservations[i];
Address addr = chunk.start; for (auto& chunk : res) {
while (addr < chunk.end) { Address addr = chunk.start;
HeapObject* obj = HeapObject::FromAddress(addr); while (addr < chunk.end) {
// There might be grey objects due to black to grey transitions in HeapObject* obj = HeapObject::FromAddress(addr);
// incremental marking. E.g. see VisitNativeContextIncremental. // There might be grey objects due to black to grey transitions in
DCHECK( // incremental marking. E.g. see VisitNativeContextIncremental.
ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj))); DCHECK(ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)));
if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) { if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
incremental_marking()->IterateBlackObject(obj); incremental_marking()->IterateBlackObject(obj);
}
addr += obj->Size();
} }
addr += obj->Size();
} }
} }
// We potentially deserialized wrappers which require registering with the }
// embedder as the marker will not find them. // We potentially deserialized wrappers which require registering with the
local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); // embedder as the marker will not find them.
local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
// Large object space doesn't use reservations, so it needs custom handling.
for (HeapObject* object : *large_objects) {
incremental_marking()->IterateBlackObject(object);
} }
} }
...@@ -5508,7 +5513,9 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() { ...@@ -5508,7 +5513,9 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
// The kSoftLimit means that incremental marking should be started soon. // The kSoftLimit means that incremental marking should be started soon.
// The kHardLimit means that incremental marking should be started immediately. // The kHardLimit means that incremental marking should be started immediately.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() { Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (!incremental_marking()->CanBeActivated() || // Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (!incremental_marking()->CanBeActivated() || always_allocate() ||
PromotedSpaceSizeOfObjects() <= PromotedSpaceSizeOfObjects() <=
IncrementalMarking::kActivationThreshold) { IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start. // Incremental marking is disabled or it is too early to start.
......
...@@ -1245,7 +1245,8 @@ class Heap { ...@@ -1245,7 +1245,8 @@ class Heap {
void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason); void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
void RegisterReservationsForBlackAllocation(Reservation* reservations); void RegisterDeserializedObjectsForBlackAllocation(
Reservation* reservations, List<HeapObject*>* large_objects);
IncrementalMarking* incremental_marking() { return incremental_marking_; } IncrementalMarking* incremental_marking() { return incremental_marking_; }
......
...@@ -1118,8 +1118,10 @@ size_t IncrementalMarking::StepSizeToMakeProgress() { ...@@ -1118,8 +1118,10 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
} }
void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() { void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING)) { (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
return; return;
} }
......
...@@ -148,7 +148,8 @@ MaybeHandle<Object> Deserializer::DeserializePartial( ...@@ -148,7 +148,8 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
DeserializeDeferredObjects(); DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer); DeserializeEmbedderFields(embedder_fields_deserializer);
isolate->heap()->RegisterReservationsForBlackAllocation(reservations_); isolate->heap()->RegisterDeserializedObjectsForBlackAllocation(
reservations_, &deserialized_large_objects_);
// There's no code deserialized here. If this assert fires then that's // There's no code deserialized here. If this assert fires then that's
// changed and logging should be added to notify the profiler et al of the // changed and logging should be added to notify the profiler et al of the
...@@ -172,7 +173,8 @@ MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) { ...@@ -172,7 +173,8 @@ MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
DeserializeDeferredObjects(); DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects(); FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root)); result = Handle<HeapObject>(HeapObject::cast(root));
isolate->heap()->RegisterReservationsForBlackAllocation(reservations_); isolate->heap()->RegisterDeserializedObjectsForBlackAllocation(
reservations_, &deserialized_large_objects_);
} }
CommitPostProcessedObjects(isolate); CommitPostProcessedObjects(isolate);
return scope.CloseAndEscape(result); return scope.CloseAndEscape(result);
......
...@@ -1126,6 +1126,87 @@ TEST(CodeSerializerLargeCodeObject) { ...@@ -1126,6 +1126,87 @@ TEST(CodeSerializerLargeCodeObject) {
source.Dispose(); source.Dispose();
} }
TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
FLAG_serialize_toplevel = true;
FLAG_always_opt = false;
// This test relies on (full-codegen) code objects going to large object
// space. Once FCG goes away, it must either be redesigned (to put some
// other large deserialized object into LO space), or it can be deleted.
FLAG_ignition = false;
const char* filter_flag = "--turbo-filter=NOTHING";
FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
FLAG_black_allocation = true;
FLAG_manual_evacuation_candidates_selection = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
Vector<const uint8_t> source = ConstructSource(
STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
STATIC_CHAR_VECTOR("for (var i = 0; i < Object.prototype; i++);"),
STATIC_CHAR_VECTOR("} j=7; var s = 'happy_hippo'; j"), 2100);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
// Create a string on an evacuation candidate in old space.
Handle<String> moving_object;
Page* ec_page;
{
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(heap->old_space());
moving_object = isolate->factory()->InternalizeString(
isolate->factory()->NewStringFromAsciiChecked("happy_hippo"));
ec_page = Page::FromAddress(moving_object->address());
}
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_str, Handle<String>(), &cache,
v8::ScriptCompiler::kProduceCodeCache);
CHECK(heap->InSpace(orig->abstract_code(), LO_SPACE));
// Pretend that incremental marking is on when deserialization begins.
heap::ForceEvacuationCandidate(ec_page);
heap::SimulateIncrementalMarking(heap, false);
IncrementalMarking* marking = heap->incremental_marking();
marking->StartBlackAllocationForTesting();
CHECK(marking->IsCompacting());
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*moving_object));
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
copy = CompileScript(isolate, source_str, Handle<String>(), &cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
// We should have missed a write barrier. Complete incremental marking
// to flush out the bug.
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectAllGarbage();
Handle<JSFunction> copy_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
copy, isolate->native_context());
Handle<Object> copy_result =
Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
int result_int;
CHECK(copy_result->ToInt32(&result_int));
CHECK_EQ(7, result_int);
delete cache;
source.Dispose();
}
TEST(CodeSerializerLargeStrings) { TEST(CodeSerializerLargeStrings) {
FLAG_serialize_toplevel = true; FLAG_serialize_toplevel = true;
LocalContext context; LocalContext context;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment