Commit 6cbe01ed authored by yangguo@chromium.org's avatar yangguo@chromium.org

Revert "Flush parallel recompilation queues on context dispose notification."

This reverts r15833.

R=jkummerow@chromium.org
BUG=

Review URL: https://codereview.chromium.org/19647018

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15841 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c3c0be71
......@@ -770,6 +770,7 @@ void Context::Exit() {
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
isolate->set_context_exit_happened(true);
}
......
......@@ -673,11 +673,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
function_info->allows_lazy_compilation() &&
!function_info->optimization_disabled() &&
!isolate()->DebuggerHasBreakPoints()) {
if (FLAG_parallel_recompilation) {
result->MarkForParallelRecompilation();
} else {
result->MarkForLazyRecompilation();
}
result->MarkForLazyRecompilation();
}
return result;
}
......
......@@ -703,16 +703,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
int Heap::NotifyContextDisposed() {
if (FLAG_parallel_recompilation) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
flush_monomorphic_ics_ = true;
return ++contexts_disposed_;
}
void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
......
......@@ -1252,7 +1252,10 @@ class Heap {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();
int NotifyContextDisposed() {
flush_monomorphic_ics_ = true;
return ++contexts_disposed_;
}
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
......
......@@ -1777,6 +1777,7 @@ Isolate::Isolate()
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
......
......@@ -1059,6 +1059,13 @@ class Isolate {
thread_local_top_.top_lookup_result_ = top;
}
bool context_exit_happened() {
return context_exit_happened_;
}
void set_context_exit_happened(bool context_exit_happened) {
context_exit_happened_ = context_exit_happened;
}
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
......@@ -1306,6 +1313,10 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
bool context_exit_happened_;
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
......
......@@ -1290,7 +1290,6 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
shared_info->DisableOptimization("LiveEdit");
}
if (shared_info->debug_info()->IsDebugInfo()) {
......
......@@ -9234,7 +9234,10 @@ void JSFunction::MarkForParallelRecompilation() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(FLAG_parallel_recompilation);
if (!FLAG_parallel_recompilation) {
JSFunction::MarkForLazyRecompilation();
return;
}
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Marking ");
PrintName();
......
......@@ -60,23 +60,12 @@ void OptimizingCompilerThread::Run() {
OS::Sleep(FLAG_parallel_recompilation_delay);
}
switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
case CONTINUE:
break;
case STOP:
if (FLAG_trace_parallel_recompilation) {
time_spent_total_ = OS::Ticks() - epoch;
}
stop_semaphore_->Signal();
return;
case FLUSH:
// Reset input queue semaphore.
delete input_queue_semaphore_;
input_queue_semaphore_ = OS::CreateSemaphore(0);
// Signal for main thread to start flushing.
stop_semaphore_->Signal();
// Return to start of consumer loop.
continue;
if (Acquire_Load(&stop_thread_)) {
stop_semaphore_->Signal();
if (FLAG_trace_parallel_recompilation) {
time_spent_total_ = OS::Ticks() - epoch;
}
return;
}
int64_t compiling_start = 0;
......@@ -113,41 +102,9 @@ void OptimizingCompilerThread::CompileNext() {
}
void OptimizingCompilerThread::FlushQueue(
UnboundQueue<OptimizingCompiler*>* queue,
bool restore_function_code) {
ASSERT(!IsOptimizerThread());
OptimizingCompiler* optimizing_compiler;
// The optimizing compiler is allocated in the CompilationInfo's zone.
while (queue->Dequeue(&optimizing_compiler)) {
CompilationInfo* info = optimizing_compiler->info();
if (restore_function_code) {
Handle<JSFunction> function = info->closure();
function->ReplaceCode(function->shared()->code());
}
delete info;
}
}
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
input_queue_semaphore_->Signal();
FlushQueue(&input_queue_, true);
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
stop_semaphore_->Wait();
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
FlushQueue(&output_queue_, true);
}
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
input_queue_semaphore_->Signal();
stop_semaphore_->Wait();
......@@ -157,8 +114,14 @@ void OptimizingCompilerThread::Stop() {
while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
InstallOptimizedFunctions();
} else {
FlushQueue(&input_queue_, false);
FlushQueue(&output_queue_, false);
OptimizingCompiler* optimizing_compiler;
// The optimizing compiler is allocated in the CompilationInfo's zone.
while (input_queue_.Dequeue(&optimizing_compiler)) {
delete optimizing_compiler->info();
}
while (output_queue_.Dequeue(&optimizing_compiler)) {
delete optimizing_compiler->info();
}
}
if (FLAG_trace_parallel_recompilation) {
......
......@@ -54,13 +54,13 @@ class OptimizingCompilerThread : public Thread {
install_mutex_(OS::CreateMutex()),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
void Run();
void Stop();
void Flush();
void CompileNext();
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
......@@ -92,12 +92,6 @@ class OptimizingCompilerThread : public Thread {
}
private:
enum StopFlag { CONTINUE, STOP, FLUSH };
void FlushQueue(UnboundQueue<OptimizingCompiler*>* queue,
bool restore_function_code);
void CompileNext();
#ifdef DEBUG
int thread_id_;
Mutex* thread_id_mutex_;
......
......@@ -8460,7 +8460,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
while (function->IsInRecompileQueue() ||
while (function->IsMarkedForParallelRecompilation() ||
function->IsInRecompileQueue() ||
function->IsMarkedForInstallingRecompiledCode()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
......
......@@ -77,27 +77,23 @@ class AlwaysOptimizeAllowNativesSyntaxNoInlining {
// Utility class to set --allow-natives-syntax and --nouse-inlining when
// constructed and return to their default state when destroyed.
class AllowNativesSyntaxNoInliningNoParallel {
class AllowNativesSyntaxNoInlining {
public:
AllowNativesSyntaxNoInliningNoParallel()
AllowNativesSyntaxNoInlining()
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
use_inlining_(i::FLAG_use_inlining),
parallel_recompilation_(i::FLAG_parallel_recompilation) {
use_inlining_(i::FLAG_use_inlining) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
i::FLAG_parallel_recompilation = false;
}
~AllowNativesSyntaxNoInliningNoParallel() {
~AllowNativesSyntaxNoInlining() {
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
i::FLAG_use_inlining = use_inlining_;
i::FLAG_parallel_recompilation = parallel_recompilation_;
}
private:
bool allow_natives_syntax_;
bool use_inlining_;
bool parallel_recompilation_;
};
......@@ -347,7 +343,7 @@ TEST(DeoptimizeBinaryOperationADDString) {
const char* f_source = "function f(x, y) { return x + y; };";
{
AllowNativesSyntaxNoInliningNoParallel options;
AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
......@@ -405,7 +401,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
binary_op);
char* f_source = f_source_buffer.start();
AllowNativesSyntaxNoInliningNoParallel options;
AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
......@@ -497,7 +493,7 @@ TEST(DeoptimizeCompare) {
const char* f_source = "function f(x, y) { return x < y; };";
{
AllowNativesSyntaxNoInliningNoParallel options;
AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert compare ic
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
......@@ -544,7 +540,7 @@ TEST(DeoptimizeLoadICStoreIC) {
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
AllowNativesSyntaxNoInliningNoParallel options;
AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
......@@ -624,7 +620,7 @@ TEST(DeoptimizeLoadICStoreICNested) {
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
AllowNativesSyntaxNoInliningNoParallel options;
AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment