Commit fc663faa authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

Abort optimized code compilation gracefully when code allocation fails.

Currently we are throwing an out-of-memory fatal error.

Bug: chromium:840329
Change-Id: I736dee890b6a338b458c9a4cc1c3fbb95e95742b
Reviewed-on: https://chromium-review.googlesource.com/1050285
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53123}
parent 93bcce68
......@@ -383,19 +383,24 @@ Handle<Code> CodeGenerator::FinalizeCode() {
unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
}
Handle<Code> result = isolate()->factory()->NewCode(
MaybeHandle<Code> maybe_code = isolate()->factory()->TryNewCode(
desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
source_positions, deopt_data, kMovable, info()->stub_key(), true,
frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
handler_table_offset_);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
tasm()->AbortedCodeGeneration();
return Handle<Code>();
}
isolate()->counters()->total_compiled_code_size()->Increment(
result->raw_instruction_size());
code->raw_instruction_size());
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(result->raw_instruction_start(),
CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
*source_positions));
return result;
return code;
}
......
This diff is collapsed.
......@@ -735,6 +735,20 @@ class V8_EXPORT_PRIVATE Factory {
int safepoint_table_offset = 0,
int handler_table_offset = 0);
// Like NewCode, this function allocates a new code object (fully
// initialized). It may return an empty handle if the allocation does not
// succeed.
V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryNewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_reference,
int32_t builtin_index = Builtins::kNoBuiltinId,
MaybeHandle<ByteArray> maybe_source_position_table =
MaybeHandle<ByteArray>(),
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
Movability movability = kMovable, uint32_t stub_key = 0,
bool is_turbofanned = false, int stack_slots = 0,
int safepoint_table_offset = 0, int handler_table_offset = 0);
// Allocates a new, empty code object for use by builtin deserialization. The
// given {size} argument specifies the size of the entire code object.
// Can only be used when code space is unprotected and requires manual
......
......@@ -4522,10 +4522,36 @@ void Heap::DisableInlineAllocation() {
}
}
HeapObject* Heap::AllocateRawWithRetry(int size, AllocationSpace space,
AllocationAlignment alignment) {
AllocationResult alloc = AllocateRaw(size, space, alignment);
HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
int object_size) {
// Code objects which should stay at a fixed address are allocated either
// in the first page of code space, in large object space, or (during
// snapshot creation) the containing page is marked as immovable.
DCHECK(heap_object);
DCHECK(code_space_->Contains(heap_object));
DCHECK_GE(object_size, 0);
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
code_space_->FirstPage()->Contains(heap_object->address())) {
MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
CreateFillerObjectAt(heap_object->address(), object_size,
ClearRecordedSlots::kNo);
heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
UnprotectAndRegisterMemoryChunk(heap_object);
ZapCodeObject(heap_object->address(), object_size);
OnAllocationEvent(heap_object, object_size);
}
}
return heap_object;
}
HeapObject* Heap::AllocateRawWithLigthRetry(int size, AllocationSpace space,
AllocationAlignment alignment) {
HeapObject* result;
AllocationResult alloc = AllocateRaw(size, space, alignment);
if (alloc.To(&result)) {
DCHECK(result != exception());
return result;
......@@ -4540,6 +4566,15 @@ HeapObject* Heap::AllocateRawWithRetry(int size, AllocationSpace space,
return result;
}
}
return nullptr;
}
HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
AllocationAlignment alignment) {
AllocationResult alloc;
HeapObject* result = AllocateRawWithLigthRetry(size, space, alignment);
if (result) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
......
......@@ -2157,7 +2157,22 @@ class Heap {
int size_in_bytes, AllocationSpace space,
AllocationAlignment aligment = kWordAligned);
HeapObject* AllocateRawWithRetry(
// This method will try to perform an allocation of a given size in a given
// space. If the allocation fails, a regular full garbage collection is
// triggered and the allocation is retried. This is performed multiple times.
// If after that retry procedure the allocation still fails nullptr is
// returned.
HeapObject* AllocateRawWithLigthRetry(
int size, AllocationSpace space,
AllocationAlignment alignment = kWordAligned);
// This method will try to perform an allocation of a given size in a given
// space. If the allocation fails, a regular full garbage collection is
// triggered and the allocation is retried. This is performed multiple times.
// If after that retry procedure the allocation still fails a "hammer"
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
HeapObject* AllocateRawWithRetryOrFail(
int size, AllocationSpace space,
AllocationAlignment alignment = kWordAligned);
HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
......@@ -2166,6 +2181,11 @@ class Heap {
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
AllocationSpace space);
// Takes a code object and checks if it is on memory which is not subject to
// compaction. This method will return a new code object on an immovable
// memory location if the original code object was movable.
HeapObject* EnsureImmovableCode(HeapObject* heap_object, int object_size);
// Allocates a partial map for bootstrapping.
V8_WARN_UNUSED_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment