Commit 14917b65 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Implemented write protected code pages.

This CL provides the basic infrastructure on the GC side for write protected code pages.

The only thing missing on the GC side is the out-of-line free list implementation. In this
CL sweeper threads and the mutator need to synchronize when page protection mode changes.
This would not be necessary if the sweepr use and out-of-line free list.

Code allocation is currently protected by a CodeSpaceMemoryModificationScope. This may
go away with a unification of code space allocation and initialization that will happen
later.

One thing missing in this CL: freshly added pages are still read+write+executable. This
also needs to change: WIP

Bug: chromium:774108,v8:6792
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: Ib8d1ed1c09cc144791e462277bf43a4641e1490d
Reviewed-on: https://chromium-review.googlesource.com/716379
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48774}
parent 1ec60472
......@@ -519,7 +519,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return;
}
case v8::ArrayBuffer::Allocator::Protection::kReadWrite: {
base::OS::Unprotect(data, length);
base::OS::SetReadAndWritable(data, length, true);
return;
}
}
......
......@@ -170,7 +170,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
......@@ -260,7 +260,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
......@@ -289,7 +289,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
......
......@@ -116,14 +116,13 @@ void OS::Free(void* address, const size_t size) {
DCHECK_EQ(result, 0);
}
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
void OS::SetReadAndExecutable(void* address, const size_t size) {
#if V8_OS_CYGWIN
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
CHECK_NOT_NULL(
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect));
#else
mprotect(address, size, PROT_READ | PROT_EXEC);
CHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_EXEC));
#endif
}
......@@ -141,12 +140,12 @@ void OS::Guard(void* address, const size_t size) {
#endif // !V8_OS_FUCHSIA
// Make a region of memory readable and writable.
void OS::Unprotect(void* address, const size_t size) {
void OS::SetReadAndWritable(void* address, const size_t size, bool commit) {
#if V8_OS_CYGWIN
DWORD oldprotect;
VirtualProtect(address, size, PAGE_READWRITE, &oldprotect);
CHECK_NOT_NULL(VirtualProtect(address, size, PAGE_READWRITE, &oldprotect));
#else
mprotect(address, size, PROT_READ | PROT_WRITE);
CHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE));
#endif
}
......
......@@ -814,9 +814,10 @@ intptr_t OS::CommitPageSize() {
return 4096;
}
void OS::ProtectCode(void* address, const size_t size) {
void OS::SetReadAndExecutable(void* address, const size_t size) {
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
CHECK_NE(NULL,
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect));
}
void OS::Guard(void* address, const size_t size) {
......@@ -824,9 +825,13 @@ void OS::Guard(void* address, const size_t size) {
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
void OS::Unprotect(void* address, const size_t size) {
LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
USE(result);
void OS::SetReadAndWritable(void* address, const size_t size, bool commit) {
if (commit) {
CHECK(VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE));
} else {
DWORD oldprotect;
CHECK_NE(NULL, VirtualProtect(address, size, PAGE_READWRITE, &oldprotect));
}
}
// static
......
......@@ -177,18 +177,18 @@ class V8_BASE_EXPORT OS {
// PROT_NONE, which also prevents it from being committed.
static void* AllocateGuarded(const size_t requested);
// This is the granularity at which the ProtectCode(...) call can set page
// permissions.
// This is the granularity at which the SetReadAndExecutable(...) call can
// set page permissions.
static intptr_t CommitPageSize();
// Mark code segments non-writable.
static void ProtectCode(void* address, const size_t size);
// Mark a region of memory executable and readable but not writable.
static void SetReadAndExecutable(void* address, const size_t size);
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
// Make a region of memory readable and writable.
static void Unprotect(void* address, const size_t size);
// Make a region of memory non-executable but readable and writable.
static void SetReadAndWritable(void* address, const size_t size, bool commit);
// Generate a random address to be used for hinting mmap().
static void* GetRandomMmapAddr();
......
......@@ -1810,6 +1810,7 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Kind kind,
}
int obj_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
CodeSpaceMemoryModificationScope code_allocation(isolate()->heap());
Handle<Code> code = NewCodeRaw(obj_size, immovable);
DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
isolate()->heap()->memory_allocator()->code_range()->contains(
......
......@@ -607,6 +607,7 @@ DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
#ifdef V8_CONCURRENT_MARKING
#define V8_CONCURRENT_MARKING_BOOL true
#else
......
......@@ -590,6 +590,35 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_.Decrement(1);
}
CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
if (FLAG_write_protect_code_memory) {
heap_->code_space()->SetReadAndWritable();
}
}
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
if (FLAG_write_protect_code_memory) {
heap_->code_space()->SetReadAndExecutable();
}
}
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
MemoryChunk* chunk)
: chunk_(chunk) {
if (FLAG_write_protect_code_memory &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
chunk_->SetReadAndWritable();
}
}
CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (FLAG_write_protect_code_memory &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
chunk_->SetReadAndExecutable();
}
}
} // namespace internal
} // namespace v8
......
......@@ -1914,6 +1914,8 @@ void Heap::Scavenge() {
job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
}
CodeSpaceMemoryModificationScope code_modification(this);
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
this, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
......@@ -3046,6 +3048,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
AllocationResult Heap::CopyCode(Code* code) {
CodeSpaceMemoryModificationScope code_modification(this);
AllocationResult allocation;
HeapObject* result = nullptr;
......
......@@ -2519,6 +2519,23 @@ class AlwaysAllocateScope {
Heap* heap_;
};
class CodeSpaceMemoryModificationScope {
public:
explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
inline ~CodeSpaceMemoryModificationScope();
private:
Heap* heap_;
};
class CodePageMemoryModificationScope {
public:
explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
inline ~CodePageMemoryModificationScope();
private:
MemoryChunk* chunk_;
};
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
......
......@@ -3733,6 +3733,7 @@ void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
CodeSpaceMemoryModificationScope code_modifcation(heap());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
......@@ -4419,6 +4420,11 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0;
// If the page is a code page, the CodePageMemoryModificationScope changes
// the page protection mode from read+execute to read+write while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
......
......@@ -528,6 +528,48 @@ void MemoryChunk::InitializationMemoryFence() {
#endif
}
void MemoryChunk::SetReadAndExecutable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
if (write_unprotect_counter_ == 0) {
// This is a corner case that may happen when we have a
// CodeSpaceMemoryModificationScope open and this page was newly
// added.
return;
}
write_unprotect_counter_--;
DCHECK_LE(write_unprotect_counter_, 1);
if (write_unprotect_counter_ == 0) {
Address protect_start =
address() + MemoryAllocator::CodePageAreaStartOffset();
size_t protect_size = size() - MemoryAllocator::CodePageAreaStartOffset();
DCHECK(
IsAddressAligned(protect_start, MemoryAllocator::GetCommitPageSize()));
base::OS::SetReadAndExecutable(protect_start, protect_size);
}
}
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, 2);
if (write_unprotect_counter_ == 1) {
Address unprotect_start =
address() + MemoryAllocator::CodePageAreaStartOffset();
size_t unprotect_size = size() - MemoryAllocator::CodePageAreaStartOffset();
DCHECK(IsAddressAligned(unprotect_start,
MemoryAllocator::GetCommitPageSize()));
base::OS::SetReadAndWritable(unprotect_start, unprotect_size, false);
}
}
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
......@@ -554,6 +596,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::RecursiveMutex();
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
......@@ -1202,6 +1246,10 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete mutex_;
mutex_ = nullptr;
}
if (page_protection_change_mutex_ != nullptr) {
delete page_protection_change_mutex_;
page_protection_change_mutex_ = nullptr;
}
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
......@@ -1731,6 +1779,20 @@ void PagedSpace::ReleasePage(Page* page) {
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
page->SetReadAndExecutable();
}
}
void PagedSpace::SetReadAndWritable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
page->SetReadAndWritable();
}
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}
......
......@@ -366,6 +366,8 @@ class MemoryChunk {
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+ kPointerSize // base::Mutex* page_protection_change_mutex_
+ kPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kPointerSize // AtomicValue next_chunk_
......@@ -627,6 +629,9 @@ class MemoryChunk {
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
void SetReadAndExecutable();
void SetReadAndWritable();
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
......@@ -679,6 +684,16 @@ class MemoryChunk {
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_;
// This field is only relevant for code pages. It depicts the number of
// times a component requested this page to be read+writeable. The
// counter is decremented when a component resets to read+executable.
// If Value() == 0 => The memory is read and executable.
// If Value() >= 1 => The Memory is read and writable.
// The maximum value can right now only be 2.
uintptr_t write_unprotect_counter_;
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
size_t allocated_bytes_;
......@@ -2115,6 +2130,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void SetReadAndExecutable();
void SetReadAndWritable();
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
......
......@@ -40,7 +40,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
......@@ -452,7 +452,7 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
......
......@@ -545,7 +545,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
......@@ -573,7 +573,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
......
......@@ -547,7 +547,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
......@@ -575,7 +575,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
......
......@@ -42,7 +42,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
......
......@@ -38,7 +38,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
......
......@@ -96,6 +96,7 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
Code* code = builtin_deserializer.DeserializeBuiltin(builtin_id);
DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
......
......@@ -33,7 +33,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
base::OS::SetReadAndExecutable(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment