Commit 84ee9470 authored by ulan's avatar ulan Committed by Commit bot

Workaround for glibc semaphore bug.

Instead of dynamically creating semaphore for each page parallel job,
we create one semaphore for MarkCompact and reuse it.

This patch also removes all instrumentation code that was added to
help with investigation.

BUG=chromium:609249
LOG=NO

Review-Url: https://codereview.chromium.org/1998213002
Cr-Commit-Position: refs/heads/master@{#36407}
parent 80b936ae
......@@ -34,11 +34,10 @@ Semaphore::~Semaphore() {
USE(result);
}
void Semaphore::Signal(const char* caller) {
void Semaphore::Signal() {
kern_return_t result = semaphore_signal(native_handle_);
DCHECK_EQ(KERN_SUCCESS, result);
USE(result);
USE(caller);
}
......@@ -104,12 +103,9 @@ Semaphore::~Semaphore() {
USE(result);
}
void Semaphore::Signal(const char* caller) {
void Semaphore::Signal() {
int result = sem_post(&native_handle_);
if (result != 0) {
V8_Fatal(__FILE__, __LINE__,
"Semaphore signal failure: %d called by '%s'\n", errno, caller);
}
CHECK_EQ(0, result);
}
......@@ -177,12 +173,11 @@ Semaphore::~Semaphore() {
USE(result);
}
void Semaphore::Signal(const char* caller) {
void Semaphore::Signal() {
LONG dummy;
BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
DCHECK(result);
USE(result);
USE(caller);
}
......
......@@ -37,8 +37,7 @@ class Semaphore final {
~Semaphore();
// Increments the semaphore counter.
// TODO(ulan): remove caller parameter once crbug.com/609249 is fixed.
void Signal(const char* caller = nullptr);
void Signal();
// Suspends the calling thread until the semaphore counter is non zero
// and then decrements the semaphore counter.
......
......@@ -2159,7 +2159,7 @@ void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
client_data);
isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
command_received_.Signal("Debug::EnqueueCommandMessage");
command_received_.Signal();
// Set the debug command break flag to have the command processed.
if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand();
......
......@@ -49,19 +49,21 @@ STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: // NOLINT
heap_(heap),
page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
#endif
marking_parity_(ODD_MARKING_PARITY),
was_marked_incrementally_(false),
evacuation_(false),
heap_(heap),
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
code_flusher_(nullptr),
embedder_heap_tracer_(nullptr),
have_code_to_deoptimize_(false),
compacting_(false),
sweeper_(heap) {
}
......@@ -480,7 +482,7 @@ class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
}
pending_sweeper_tasks_->Signal("SweeperTask::Run");
pending_sweeper_tasks_->Signal();
}
Sweeper* sweeper_;
......@@ -585,7 +587,7 @@ bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
base::TimeDelta::FromSeconds(0))) {
return false;
}
pending_sweeper_tasks_semaphore_.Signal("Sweeper::IsSweepingCompleted");
pending_sweeper_tasks_semaphore_.Signal();
return true;
}
......@@ -3352,7 +3354,8 @@ class EvacuationJobTraits {
void MarkCompactCollector::EvacuatePagesInParallel() {
PageParallelJob<EvacuationJobTraits> job(
heap_, heap_->isolate()->cancelable_task_manager());
heap_, heap_->isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
int abandoned_pages = 0;
intptr_t live_bytes = 0;
......@@ -3721,9 +3724,9 @@ int NumberOfPointerUpdateTasks(int pages) {
}
template <PointerDirection direction>
void UpdatePointersInParallel(Heap* heap) {
void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
PageParallelJob<PointerUpdateJobTraits<direction> > job(
heap, heap->isolate()->cancelable_task_manager());
heap, heap->isolate()->cancelable_task_manager(), semaphore);
RememberedSet<direction>::IterateMemoryChunks(
heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
int num_pages = job.NumberOfPages();
......@@ -3752,9 +3755,9 @@ class ToSpacePointerUpdateJobTraits {
}
};
void UpdateToSpacePointersInParallel(Heap* heap) {
void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
PageParallelJob<ToSpacePointerUpdateJobTraits> job(
heap, heap->isolate()->cancelable_task_manager());
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
Address space_end = heap->new_space()->top();
NewSpacePageIterator it(space_start, space_end);
......@@ -3778,17 +3781,17 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
UpdateToSpacePointersInParallel(heap_);
UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
UpdatePointersInParallel<OLD_TO_NEW>(heap_);
UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
}
{
Heap* heap = this->heap();
TRACE_GC(heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdatePointersInParallel<OLD_TO_OLD>(heap_);
UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
}
{
......
......@@ -644,27 +644,6 @@ class MarkCompactCollector {
int* target_fragmentation_percent,
int* max_evacuated_bytes);
#ifdef DEBUG
enum CollectorState {
IDLE,
PREPARE_GC,
MARK_LIVE_OBJECTS,
SWEEP_SPACES,
ENCODE_FORWARDING_ADDRESSES,
UPDATE_POINTERS,
RELOCATE_OBJECTS
};
// The current stage of the collector.
CollectorState state_;
#endif
MarkingParity marking_parity_;
bool was_marked_incrementally_;
bool evacuation_;
// Finishes GC, performs heap verification if enabled.
void Finish();
......@@ -850,6 +829,38 @@ class MarkCompactCollector {
#endif
Heap* heap_;
base::Semaphore page_parallel_job_semaphore_;
#ifdef DEBUG
enum CollectorState {
IDLE,
PREPARE_GC,
MARK_LIVE_OBJECTS,
SWEEP_SPACES,
ENCODE_FORWARDING_ADDRESSES,
UPDATE_POINTERS,
RELOCATE_OBJECTS
};
// The current stage of the collector.
CollectorState state_;
#endif
MarkingParity marking_parity_;
bool was_marked_incrementally_;
bool evacuation_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
bool black_allocation_;
bool have_code_to_deoptimize_;
base::VirtualMemory* marking_deque_memory_;
size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
......@@ -859,17 +870,9 @@ class MarkCompactCollector {
EmbedderHeapTracer* embedder_heap_tracer_;
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<Page*> newspace_evacuation_candidates_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
bool black_allocation_;
Sweeper sweeper_;
friend class Heap;
......
......@@ -33,14 +33,20 @@ class Isolate;
template <typename JobTraits>
class PageParallelJob {
public:
PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager)
// PageParallelJob cannot dynamically create a semaphore because of a bug in
// glibc. See http://crbug.com/609249 and
// https://sourceware.org/bugzilla/show_bug.cgi?id=12674.
// The caller must provide a semaphore with value 0 and ensure that
// the lifetime of the semaphore is the same as the lifetime of the Isolate.
// It is guaranteed that the semaphore value will be 0 after Run() call.
PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager,
base::Semaphore* semaphore)
: heap_(heap),
cancelable_task_manager_(cancelable_task_manager),
items_(nullptr),
num_items_(0),
num_tasks_(0),
pending_tasks_(new base::Semaphore(0)),
finished_tasks_(new base::AtomicNumber<int>(0)) {}
pending_tasks_(semaphore) {}
~PageParallelJob() {
Item* item = items_;
......@@ -49,8 +55,6 @@ class PageParallelJob {
delete item;
item = next;
}
delete pending_tasks_;
delete finished_tasks_;
}
void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
......@@ -85,7 +89,7 @@ class PageParallelJob {
start_index -= num_items_;
}
Task* task = new Task(heap_, items_, num_items_, start_index,
pending_tasks_, per_task_data_callback(i), this);
pending_tasks_, per_task_data_callback(i));
task_ids[i] = task->id();
if (i > 0) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
......@@ -97,18 +101,12 @@ class PageParallelJob {
// Contribute on main thread.
main_task->Run();
delete main_task;
int aborted_tasks = 0;
// Wait for background tasks.
for (int i = 0; i < num_tasks_; i++) {
if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
pending_tasks_->Wait();
} else {
++aborted_tasks;
}
}
int finished_tasks = finished_tasks_->Value();
// TODO(ulan): Remove this check after investigation of crbug.com/609249.
CHECK_EQ(aborted_tasks + finished_tasks, num_tasks_);
if (JobTraits::NeedSequentialFinalization) {
Item* item = items_;
while (item != nullptr) {
......@@ -120,8 +118,6 @@ class PageParallelJob {
}
}
void NotifyFinishedTask() { finished_tasks_->Increment(1); }
private:
static const int kMaxNumberOfTasks = 10;
......@@ -139,16 +135,14 @@ class PageParallelJob {
class Task : public CancelableTask {
public:
Task(Heap* heap, Item* items, int num_items, int start_index,
base::Semaphore* on_finish, typename JobTraits::PerTaskData data,
PageParallelJob<JobTraits>* job)
base::Semaphore* on_finish, typename JobTraits::PerTaskData data)
: CancelableTask(heap->isolate()),
heap_(heap),
items_(items),
num_items_(num_items),
start_index_(start_index),
on_finish_(on_finish),
data_(data),
job_(job) {}
data_(data) {}
virtual ~Task() {}
......@@ -173,8 +167,7 @@ class PageParallelJob {
current = items_;
}
}
job_->NotifyFinishedTask();
on_finish_->Signal("PageParallelJob::Task::RunInternal");
on_finish_->Signal();
}
Heap* heap_;
......@@ -183,7 +176,6 @@ class PageParallelJob {
int start_index_;
base::Semaphore* on_finish_;
typename JobTraits::PerTaskData data_;
PageParallelJob<JobTraits>* job_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
......@@ -193,7 +185,6 @@ class PageParallelJob {
int num_items_;
int num_tasks_;
base::Semaphore* pending_tasks_;
base::AtomicNumber<int>* finished_tasks_;
DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
};
......
......@@ -355,8 +355,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
// v8::Task overrides.
void Run() override {
unmapper_->PerformFreeMemoryOnQueuedChunks();
unmapper_->pending_unmapping_tasks_semaphore_.Signal(
"Unmapper::UnmapFreeMemoryTask::Run");
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
Unmapper* unmapper_;
......
......@@ -560,7 +560,7 @@ class Profiler: public base::Thread {
} else {
buffer_[head_] = *sample;
head_ = Succ(head_);
buffer_semaphore_.Signal("Profiler::Insert"); // Tell we have an element.
buffer_semaphore_.Signal(); // Tell we have an element.
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment