Commit 8bef2652 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Add a CodePageCollectionMemoryModificationScope to collect unprotected...

[heap] Add a CodePageCollectionMemoryModificationScope to collect unprotected code pages on the fly.

Bug: chromium:774108
Change-Id: I95bfe672df13a6b8f40ba258c9ea42c6b18f6138
Reviewed-on: https://chromium-review.googlesource.com/949482Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51765}
parent 903c631f
......@@ -300,7 +300,14 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
// NEW_SPACE is not allowed here.
UNREACHABLE();
}
if (allocation.To(&object)) {
if (space == CODE_SPACE) {
// Unprotect the memory chunk of the object if it was not unprotected
// already.
UnprotectAndRegisterMemoryChunk(object);
ZapCodeObject(object->address(), size_in_bytes);
}
OnAllocationEvent(object, size_in_bytes);
}
......@@ -644,6 +651,7 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
DCHECK(!heap_->unprotected_memory_chunks_registry_enabled());
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetReadAndWritable();
......@@ -659,6 +667,7 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
}
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
DCHECK(!heap_->unprotected_memory_chunks_registry_enabled());
if (heap_->write_protect_code_memory()) {
heap_->decrement_code_space_memory_modification_scope_depth();
heap_->code_space()->SetReadAndExecutable();
......@@ -673,6 +682,24 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
}
}
CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap* heap)
: heap_(heap) {
if (heap_->write_protect_code_memory() &&
!heap_->code_space_memory_modification_scope_depth()) {
heap_->EnableUnprotectedMemoryChunksRegistry();
}
}
CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope() {
if (heap_->write_protect_code_memory() &&
!heap_->code_space_memory_modification_scope_depth()) {
heap_->ProtectUnprotectedMemoryChunks();
heap_->DisableUnprotectedMemoryChunksRegistry();
}
}
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
MemoryChunk* chunk)
: chunk_(chunk),
......
......@@ -233,7 +233,8 @@ Heap::Heap()
use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr)
pending_layout_change_object_(nullptr),
unprotected_memory_chunks_registry_enabled_(false)
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
,
allocation_timeout_(0)
......@@ -2162,6 +2163,27 @@ void Heap::ComputeFastPromotionMode(double survival_rate) {
}
}
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
if (unprotected_memory_chunks_registry_enabled_ &&
unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetReadAndWritable();
}
}
void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
}
void Heap::ProtectUnprotectedMemoryChunks() {
DCHECK(unprotected_memory_chunks_registry_enabled_);
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
(*chunk)->SetReadAndExecutable();
}
unprotected_memory_chunks_.clear();
}
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
......@@ -3187,6 +3209,7 @@ AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
if (movability == kImmovable) {
Address address = result->address();
MemoryChunk* chunk = MemoryChunk::FromAddress(address);
......@@ -3205,6 +3228,9 @@ AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
if (!allocation.To(&result)) return allocation;
OnAllocationEvent(result, object_size);
// The old allocation was discarded. We have to unprotect the new
// allocation target.
UnprotectAndRegisterMemoryChunk(result);
}
}
}
......@@ -3239,7 +3265,8 @@ AllocationResult Heap::AllocateCode(
int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
Code* code = nullptr;
CodeSpaceMemoryModificationScope code_allocation(this);
CodePageCollectionMemoryModificationScope code_allocation(this);
AllocationResult allocation = AllocateCode(object_size, movability);
if (!allocation.To(&code)) return allocation;
......@@ -4976,6 +5003,14 @@ void Heap::ZapFromSpace() {
}
}
void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
}
#endif
}
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
......
......@@ -8,6 +8,7 @@
#include <cmath>
#include <map>
#include <unordered_map>
#include <unordered_set>
#include <vector>
// Clients of this interface shouldn't depend on lots of heap internals.
......@@ -818,6 +819,23 @@ class Heap {
code_space_memory_modification_scope_depth_--;
}
void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
void UnprotectAndRegisterMemoryChunk(HeapObject* object);
void ProtectUnprotectedMemoryChunks();
void EnableUnprotectedMemoryChunksRegistry() {
unprotected_memory_chunks_registry_enabled_ = true;
}
void DisableUnprotectedMemoryChunksRegistry() {
unprotected_memory_chunks_registry_enabled_ = false;
}
bool unprotected_memory_chunks_registry_enabled() {
return unprotected_memory_chunks_registry_enabled_;
}
inline HeapState gc_state() { return gc_state_; }
void SetGCState(HeapState state);
......@@ -1861,6 +1879,9 @@ class Heap {
// Fill in bogus values in from space
void ZapFromSpace();
// Zaps the memory of a code object.
void ZapCodeObject(Address start_address, int size_in_bytes);
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
......@@ -2601,6 +2622,9 @@ class Heap {
HeapObject* pending_layout_change_object_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
bool unprotected_memory_chunks_registry_enabled_;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
......@@ -2707,6 +2731,18 @@ class CodeSpaceMemoryModificationScope {
Heap* heap_;
};
// The CodePageCollectionMemoryModificationScope can only be used by the main
// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
// already active.
class CodePageCollectionMemoryModificationScope {
public:
explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
inline ~CodePageCollectionMemoryModificationScope();
private:
Heap* heap_;
};
// The CodePageMemoryModificationScope does not check if tansitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads.
......
......@@ -1774,6 +1774,13 @@ void PagedSpace::FreeLinearAllocationArea() {
InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
if (identity() == CODE_SPACE) {
heap_->UnprotectAndRegisterMemoryChunk(
MemoryChunk::FromAddress(current_top));
}
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
......@@ -1849,13 +1856,6 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_GE(new_node_size, size_in_bytes);
#ifdef DEBUG
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
Smi::FromInt(kCodeZapValue);
}
#endif
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
......@@ -1863,7 +1863,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
IncreaseAllocatedBytes(new_node_size, Page::FromAddress(new_node->address()));
Page* page = Page::FromAddress(new_node->address());
IncreaseAllocatedBytes(new_node_size, page);
Address start = new_node->address();
Address end = new_node->address() + new_node_size;
......@@ -1871,6 +1872,9 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
if (identity() == CODE_SPACE) {
heap_->UnprotectAndRegisterMemoryChunk(page);
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment