Commit 6e689c91 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Incrementally sweep code pages to avoid rwx code page memory.

CQ_INCLUDE_TRYBOTS=master.tryserver.v8:v8_linux64_tsan_rel;master.tryserver.v8:v8_linux64_tsan_concurrent_marking_rel_ng;master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel;master.tryserver.chromium.android:android_optional_gpu_tests_rel

Bug: chromium:774108,v8:6792
Change-Id: Ie02287467ef4e47d00058327db7eaf6c97d2fda1
Reviewed-on: https://chromium-review.googlesource.com/782559
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49606}
parent 932aafb9
......@@ -627,7 +627,7 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
}
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
MemoryChunk* chunk, CodePageModificationMode mode)
MemoryChunk* chunk)
: chunk_(chunk),
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
......@@ -635,11 +635,7 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner()->identity() == LO_SPACE &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
if (mode == READ_WRITE_EXECUTABLE) {
chunk_->SetReadWriteAndExecutable();
} else {
chunk_->SetReadAndWritable();
}
chunk_->SetReadAndWritable();
}
}
......
......@@ -872,15 +872,13 @@ void Heap::ProcessPretenuringFeedback() {
void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
CodePageMemoryModificationScope modification_scope(
chunk, CodePageMemoryModificationScope::READ_WRITE);
CodePageMemoryModificationScope modification_scope(chunk);
code->InvalidateEmbeddedObjects();
}
void Heap::InvalidateCodeDeoptimizationData(Code* code) {
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
CodePageMemoryModificationScope modification_scope(
chunk, CodePageMemoryModificationScope::READ_WRITE);
CodePageMemoryModificationScope modification_scope(chunk);
code->set_deoptimization_data(empty_fixed_array());
}
......
......@@ -2633,11 +2633,7 @@ class CodeSpaceMemoryModificationScope {
class CodePageMemoryModificationScope {
public:
enum CodePageModificationMode { READ_WRITE, READ_WRITE_EXECUTABLE };
// TODO(hpayer): Remove set_executable from the constructor. Code pages should
// never be executable and writable at the same time.
inline CodePageMemoryModificationScope(MemoryChunk* chunk,
CodePageModificationMode mode);
explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
inline ~CodePageMemoryModificationScope();
private:
......
......@@ -31,6 +31,7 @@
#include "src/transitions-inl.h"
#include "src/utils-inl.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
......@@ -678,6 +679,8 @@ class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
// Do not sweep code space concurrently.
if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
DCHECK_GE(space_id, FIRST_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
......@@ -694,6 +697,33 @@ class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
class MarkCompactCollector::Sweeper::IncrementalSweeperTask final
: public CancelableTask {
public:
IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
: CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
virtual ~IncrementalSweeperTask() {}
private:
void RunInternal() final {
VMState<GC> state(isolate_);
TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
sweeper_->ScheduleIncrementalSweepingTask();
}
}
}
Isolate* const isolate_;
Sweeper* const sweeper_;
DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
};
void MarkCompactCollector::Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_.Value());
sweeping_in_progress_ = true;
......@@ -724,6 +754,7 @@ void MarkCompactCollector::Sweeper::StartSweeperTasks() {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
});
ScheduleIncrementalSweepingTask();
}
}
......@@ -4440,6 +4471,14 @@ void MarkCompactCollector::Sweeper::SweepSpaceFromTask(
}
}
bool MarkCompactCollector::Sweeper::SweepSpaceIncrementallyFromTask(
AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
return sweeping_list_[identity].empty();
}
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int required_freed_bytes,
int max_pages) {
......@@ -4471,10 +4510,8 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
if (page->SweepingDone()) return 0;
// If the page is a code page, the CodePageMemoryModificationScope changes
// the page protection mode from rx -> rwx while sweeping.
// TODO(hpayer): Allow only rx -> rw transitions.
CodePageMemoryModificationScope code_page_scope(
page, CodePageMemoryModificationScope::READ_WRITE_EXECUTABLE);
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
......@@ -4506,6 +4543,16 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
return max_freed;
}
void MarkCompactCollector::Sweeper::ScheduleIncrementalSweepingTask() {
if (!incremental_sweeper_pending_) {
incremental_sweeper_pending_ = true;
IncrementalSweeperTask* task =
new IncrementalSweeperTask(heap_->isolate(), this);
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
}
}
void MarkCompactCollector::Sweeper::AddPage(
AllocationSpace space, Page* page,
MarkCompactCollector::Sweeper::AddPageMode mode) {
......
......@@ -681,6 +681,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false) {}
......@@ -693,6 +694,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void ScheduleIncrementalSweepingTask();
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
......@@ -707,6 +710,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Page* GetSweptPageSafe(PagedSpace* space);
private:
class IncrementalSweeperTask;
class SweeperTask;
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
......@@ -729,6 +733,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void SweepSpaceFromTask(AllocationSpace identity);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space);
......@@ -743,6 +751,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
......
......@@ -96,8 +96,7 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(
page, CodePageMemoryModificationScope::READ_WRITE);
CodePageMemoryModificationScope memory_modification_scope(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
[this](Address addr) { return CheckAndScavengeObject(heap_, addr); },
......
......@@ -568,24 +568,6 @@ void MemoryChunk::SetReadAndWritable() {
}
}
void MemoryChunk::SetReadWriteAndExecutable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, 3);
Address unprotect_start =
address() + MemoryAllocator::CodePageAreaStartOffset();
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(
base::OS::SetPermissions(unprotect_start, unprotect_size,
base::OS::MemoryPermission::kReadWriteExecute));
}
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
......
......@@ -636,9 +636,6 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
// TODO(hpayer): Remove this method. Memory should never be rwx.
void SetReadWriteAndExecutable();
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment