Commit fdc1745e authored by ulan's avatar ulan Committed by Commit bot

Respect old space allocation limit in PagedSpace::AllocateRaw.

BUG=v8:3976
LOG=NO

Review URL: https://codereview.chromium.org/1025643002

Cr-Commit-Position: refs/heads/master@{#27364}
parent 9cbf6c7d
......@@ -698,18 +698,12 @@ void Heap::CompletelyClearInstanceofCache() {
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()), daf_(isolate) {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
DCHECK(heap_->always_allocate_scope_depth_ == 0);
heap_->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_depth_--;
DCHECK(heap_->always_allocate_scope_depth_ == 0);
}
......
......@@ -1516,6 +1516,10 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(isolate());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
......
......@@ -2618,7 +2618,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
if (object != NULL) return object;
return object;
}
// Try to expand the space and allocate in the new next page.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment