Commit d04a7d6d authored by jarin@chromium.org's avatar jarin@chromium.org

More lazy deoptimization in Turbofan (binops, loads/stores)

Deoptimizing binory operations, (Load|Store)(Property|Named),
constructors.

This also fixes safepoint lookup to account for lazily deoptimized code.

BUG=
R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/453383002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23029 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 6c47bc72
......@@ -49,9 +49,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
this->info_->zone(), descriptor, stack_parameter_count);
zone, descriptor, stack_parameter_count, can_deoptimize);
}
......
......@@ -49,9 +49,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
this->info_->zone(), descriptor, stack_parameter_count);
zone, descriptor, stack_parameter_count, can_deoptimize);
}
......
This diff is collapsed.
......@@ -78,9 +78,11 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
Node* BuildArgumentsObject(Variable* arguments);
// Builders for variable load and assignment.
Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op);
Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
BailoutId bailout_id);
Node* BuildVariableDelete(Variable* var);
Node* BuildVariableLoad(Variable* var, ContextualMode mode = CONTEXTUAL);
Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
ContextualMode mode = CONTEXTUAL);
// Builders for accessing the function context.
Node* BuildLoadBuiltinsObject();
......@@ -170,6 +172,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
void VisitForInAssignment(Expression* expr, Node* value);
void BuildLazyBailout(Node* node, BailoutId ast_id);
void BuildLazyBailoutWithPushedNode(Node* node, BailoutId ast_id);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
......@@ -282,6 +285,7 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
// Plug a node into this expression context. Call this function in tail
// position in the Visit functions for expressions.
virtual void ProduceValue(Node* value) = 0;
virtual void ProduceValueWithLazyBailout(Node* value) = 0;
// Unplugs a node from this expression context. Call this to retrieve the
// result of another Visit function that already plugged the context.
......@@ -291,7 +295,8 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
void ReplaceValue() { ProduceValue(ConsumeValue()); }
protected:
AstContext(AstGraphBuilder* owner, Expression::Context kind);
AstContext(AstGraphBuilder* owner, Expression::Context kind,
BailoutId bailout_id);
virtual ~AstContext();
AstGraphBuilder* owner() const { return owner_; }
......@@ -303,6 +308,8 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
int original_height_;
#endif
BailoutId bailout_id_;
private:
Expression::Context kind_;
AstGraphBuilder* owner_;
......@@ -313,10 +320,11 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
// Context to evaluate expression for its side effects only.
class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
public:
explicit AstEffectContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {}
explicit AstEffectContext(AstGraphBuilder* owner, BailoutId bailout_id)
: AstContext(owner, Expression::kEffect, bailout_id) {}
virtual ~AstEffectContext();
virtual void ProduceValue(Node* value) V8_OVERRIDE;
virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
virtual Node* ConsumeValue() V8_OVERRIDE;
};
......@@ -324,10 +332,11 @@ class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
// Context to evaluate expression for its value (and side effects).
class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
public:
explicit AstValueContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kValue) {}
explicit AstValueContext(AstGraphBuilder* owner, BailoutId bailout_id)
: AstContext(owner, Expression::kValue, bailout_id) {}
virtual ~AstValueContext();
virtual void ProduceValue(Node* value) V8_OVERRIDE;
virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
virtual Node* ConsumeValue() V8_OVERRIDE;
};
......@@ -335,10 +344,11 @@ class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
// Context to evaluate expression for a condition value (and side effects).
class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
public:
explicit AstTestContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kTest) {}
explicit AstTestContext(AstGraphBuilder* owner, BailoutId bailout_id)
: AstContext(owner, Expression::kTest, bailout_id) {}
virtual ~AstTestContext();
virtual void ProduceValue(Node* value) V8_OVERRIDE;
virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
virtual Node* ConsumeValue() V8_OVERRIDE;
};
......
......@@ -215,7 +215,9 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
for (int i = 0; i < deopt_count; i++) {
FrameStateDescriptor* descriptor = code()->GetDeoptimizationEntry(i);
data->SetAstId(i, descriptor->bailout_id());
data->SetTranslationIndex(i, Smi::FromInt(0));
CHECK_NE(NULL, deoptimization_states_[i]);
data->SetTranslationIndex(
i, Smi::FromInt(deoptimization_states_[i]->translation_id_));
data->SetArgumentsStackHeight(i, Smi::FromInt(0));
data->SetPc(i, Smi::FromInt(-1));
}
......
......@@ -45,9 +45,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
this->info_->zone(), descriptor, stack_parameter_count);
zone, descriptor, stack_parameter_count, can_deoptimize);
}
......
......@@ -292,6 +292,14 @@ REPLACE_UNIMPLEMENTED(JSDebugger)
#undef REPLACE_UNIMPLEMENTED
static CallDescriptor::DeoptimizationSupport DeoptimizationSupportForNode(
Node* node) {
return OperatorProperties::CanLazilyDeoptimize(node->op())
? CallDescriptor::kCanDeoptimize
: CallDescriptor::kCannotDeoptimize;
}
void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
bool pure) {
BinaryOpICStub stub(isolate(), Token::ADD); // TODO(mstarzinger): Hack.
......@@ -324,7 +332,8 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
void JSGenericLowering::ReplaceWithICStubCall(Node* node,
HydrogenCodeStub* stub) {
CodeStubInterfaceDescriptor* d = stub->GetInterfaceDescriptor();
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
CallDescriptor* desc = linkage()->GetStubCallDescriptor(
d, 0, DeoptimizationSupportForNode(node));
Node* stub_code = CodeConstant(stub->GetCode());
PatchInsertInput(node, 0, stub_code);
PatchOperator(node, common()->Call(desc));
......@@ -355,12 +364,8 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Operator::Property props = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
CallDescriptor::DeoptimizationSupport deopt =
OperatorProperties::CanLazilyDeoptimize(node->op())
? CallDescriptor::kCanDeoptimize
: CallDescriptor::kCannotDeoptimize;
CallDescriptor* desc =
linkage()->GetRuntimeCallDescriptor(f, nargs, props, deopt);
CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
f, nargs, props, DeoptimizationSupportForNode(node));
Node* ref = ExternalConstant(ExternalReference(f, isolate()));
Node* arity = Int32Constant(nargs);
if (!centrystub_constant_.is_set()) {
......@@ -508,7 +513,8 @@ Node* JSGenericLowering::LowerJSCallConstruct(Node* node) {
int arity = OpParameter<int>(node);
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, arity);
CallDescriptor* desc = linkage()->GetStubCallDescriptor(
d, arity, DeoptimizationSupportForNode(node));
Node* stub_code = CodeConstant(stub.GetCode());
Node* construct = NodeProperties::GetValueInput(node, 0);
PatchInsertInput(node, 0, stub_code);
......@@ -524,7 +530,8 @@ Node* JSGenericLowering::LowerJSCallFunction(Node* node) {
CallParameters p = OpParameter<CallParameters>(node);
CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, p.arity - 1);
CallDescriptor* desc = linkage()->GetStubCallDescriptor(
d, p.arity - 1, DeoptimizationSupportForNode(node));
Node* stub_code = CodeConstant(stub.GetCode());
PatchInsertInput(node, 0, stub_code);
PatchOperator(node, common()->Call(desc));
......
......@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_JS_OPERATOR_H_
#define V8_COMPILER_JS_OPERATOR_H_
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/unique.h"
......
......@@ -128,7 +128,8 @@ class LinkageHelper {
template <typename LinkageTraits>
static CallDescriptor* GetStubCallDescriptor(
Zone* zone, CodeStubInterfaceDescriptor* descriptor,
int stack_parameter_count) {
int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize) {
int register_parameter_count = descriptor->GetEnvironmentParameterCount();
int parameter_count = register_parameter_count + stack_parameter_count;
const int code_count = 1;
......@@ -165,9 +166,8 @@ class LinkageHelper {
locations, // locations
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
CallDescriptor::kCannotDeoptimize, // deoptimization
can_deoptimize, // deoptimization
CodeStub::MajorName(descriptor->MajorKey(), false));
// TODO(jarin) should deoptimize!
}
......
......@@ -102,6 +102,14 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
}
CallDescriptor* Linkage::GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize) {
return GetStubCallDescriptor(descriptor, stack_parameter_count,
can_deoptimize, this->info_->zone());
}
//==============================================================================
// Provide unimplemented methods on unsupported architectures, to at least link.
//==============================================================================
......@@ -122,7 +130,8 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
UNIMPLEMENTED();
return NULL;
}
......
......@@ -147,8 +147,13 @@ class Linkage : public ZoneObject {
Operator::Property properties,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
CallDescriptor* GetStubCallDescriptor(CodeStubInterfaceDescriptor* descriptor,
int stack_parameter_count = 0);
CallDescriptor* GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count = 0,
CallDescriptor::DeoptimizationSupport can_deoptimize =
CallDescriptor::kCannotDeoptimize);
static CallDescriptor* GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
......
......@@ -7,6 +7,7 @@
#include "src/v8.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
......@@ -59,6 +60,10 @@ inline int OperatorProperties::GetControlInputCount(Operator* op) {
#undef OPCODE_CASE
return static_cast<ControlOperator*>(op)->ControlInputCount();
default:
// If a node can lazily deoptimize, it needs control dependency.
if (CanLazilyDeoptimize(op)) {
return 1;
}
// Operators that have write effects must have a control
// dependency. Effect dependencies only ensure the correct order of
// write/read operations without consideration of control flow. Without an
......@@ -130,17 +135,52 @@ inline bool OperatorProperties::IsScheduleRoot(Operator* op) {
}
inline bool OperatorProperties::CanLazilyDeoptimize(Operator* op) {
if (op->opcode() == IrOpcode::kCall) {
CallOperator* call_op = reinterpret_cast<CallOperator*>(op);
CallDescriptor* descriptor = call_op->parameter();
return descriptor->CanLazilyDeoptimize();
// TODO(jarin) This function allows turning on lazy deoptimization
// incrementally. It will change as we turn on lazy deopt for
// more nodes.
if (!FLAG_turbo_deoptimization) {
return false;
}
if (op->opcode() == IrOpcode::kJSCallRuntime) {
// TODO(jarin) At the moment, we only support lazy deoptimization for
// the %DeoptimizeFunction runtime function.
Runtime::FunctionId function =
reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
return function == Runtime::kDeoptimizeFunction;
switch (op->opcode()) {
case IrOpcode::kCall: {
CallOperator* call_op = reinterpret_cast<CallOperator*>(op);
CallDescriptor* descriptor = call_op->parameter();
return descriptor->CanLazilyDeoptimize();
}
case IrOpcode::kJSCallRuntime: {
Runtime::FunctionId function =
reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
// TODO(jarin) At the moment, we only support lazy deoptimization for
// the %DeoptimizeFunction runtime function.
return function == Runtime::kDeoptimizeFunction;
}
// JS function calls
case IrOpcode::kJSCallFunction:
case IrOpcode::kJSCallConstruct:
// Binary operations
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSBitwiseAnd:
case IrOpcode::kJSShiftLeft:
case IrOpcode::kJSShiftRight:
case IrOpcode::kJSShiftRightLogical:
case IrOpcode::kJSAdd:
case IrOpcode::kJSSubtract:
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSLoadNamed:
case IrOpcode::kJSStoreNamed:
return true;
default:
return false;
}
return false;
}
......
......@@ -744,6 +744,70 @@ void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
if (!AllocationOk()) return;
}
}
// Meet register constraints for the instruction in the end.
if (!code()->IsGapAt(end)) {
MeetRegisterConstraintsForLastInstructionInBlock(block);
}
}
void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
BasicBlock* block) {
int end = block->last_instruction_index();
Instruction* last_instruction = InstructionAt(end);
for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
InstructionOperand* output_operand = last_instruction->OutputAt(i);
DCHECK(!output_operand->IsConstant());
UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
int output_vreg = output->virtual_register();
LiveRange* range = LiveRangeFor(output_vreg);
bool assigned = false;
if (output->HasFixedPolicy()) {
AllocateFixed(output, -1, false);
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
range->SetSpillOperand(output);
range->SetSpillStartIndex(end);
assigned = true;
}
BasicBlock::Successors successors = block->successors();
for (BasicBlock::Successors::iterator succ = successors.begin();
succ != successors.end(); ++succ) {
DCHECK((*succ)->PredecessorCount() == 1);
int gap_index = (*succ)->first_instruction_index() + 1;
DCHECK(code()->IsGapAt(gap_index));
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
UnallocatedOperand* output_copy =
new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
output_copy->set_virtual_register(output_vreg);
code()->AddGapMove(gap_index, output, output_copy);
}
}
if (!assigned) {
BasicBlock::Successors successors = block->successors();
for (BasicBlock::Successors::iterator succ = successors.begin();
succ != successors.end(); ++succ) {
DCHECK((*succ)->PredecessorCount() == 1);
int gap_index = (*succ)->first_instruction_index() + 1;
range->SetSpillStartIndex(gap_index);
// This move to spill operand is not a real use. Liveness analysis
// and splitting of live ranges do not account for it.
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
GapInstruction* gap = code()->GapAt(gap_index);
ParallelMove* move =
gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
move->AddMove(output, range->GetSpillOperand(), code_zone());
}
}
}
}
......@@ -786,6 +850,8 @@ void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
code()->AddGapMove(gap_index, first_output, output_copy);
}
// Make sure we add a gap move for spilling (if we have not done
// so already).
if (!assigned) {
range->SetSpillStartIndex(gap_index);
......
......@@ -391,6 +391,7 @@ class RegisterAllocator BASE_EMBEDDED {
void MeetRegisterConstraints(BasicBlock* block);
void MeetConstraintsBetween(Instruction* first, Instruction* second,
int gap_index);
void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block);
void ResolvePhis(BasicBlock* block);
// Helper methods for building intervals.
......
......@@ -64,9 +64,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
this->info_->zone(), descriptor, stack_parameter_count);
zone, descriptor, stack_parameter_count, can_deoptimize);
}
......
......@@ -447,8 +447,11 @@ static int FindPatchAddressForReturnAddress(Code* code, int pc) {
int patch_count = input_data->ReturnAddressPatchCount();
for (int i = 0; i < patch_count; i++) {
int return_pc = input_data->ReturnAddressPc(i)->value();
if (pc == return_pc) {
return input_data->PatchedAddressPc(i)->value();
int patch_pc = input_data->PatchedAddressPc(i)->value();
// If the supplied pc matches the return pc or if the address
// has been already patched, return the patch pc.
if (pc == return_pc || pc == patch_pc) {
return patch_pc;
}
}
return -1;
......
......@@ -10726,7 +10726,26 @@ int Code::SourceStatementPosition(Address pc) {
SafepointEntry Code::GetSafepointEntry(Address pc) {
SafepointTable table(this);
return table.FindEntry(pc);
SafepointEntry entry = table.FindEntry(pc);
if (entry.is_valid() || !is_turbofanned()) {
return entry;
}
// If the code is turbofanned, we might be looking for
// an address that was patched by lazy deoptimization.
// In that case look through the patch table, try to
// lookup the original address there, and then use this
// to find the safepoint entry.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(deoptimization_data());
intptr_t offset = pc - instruction_start();
for (int i = 0; i < deopt_data->ReturnAddressPatchCount(); i++) {
if (deopt_data->PatchedAddressPc(i)->value() == offset) {
int original_offset = deopt_data->ReturnAddressPc(i)->value();
return table.FindEntry(instruction_start() + original_offset);
}
}
return SafepointEntry();
}
......@@ -11128,7 +11147,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
if (0 != deopt_count) {
os << " index ast id argc pc";
if (FLAG_print_code_verbose) os << "commands";
if (FLAG_print_code_verbose) os << " commands";
os << "\n";
}
for (int i = 0; i < deopt_count; i++) {
......@@ -11158,7 +11177,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
Vector<char> buf2 = Vector<char>::New(128);
SNPrintF(buf2, "%24s %s ", "", Translation::StringFor(opcode));
SNPrintF(buf2, "%27s %s ", "", Translation::StringFor(opcode));
os << buf2.start();
switch (opcode) {
......@@ -11284,11 +11303,11 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
if (return_address_patch_count != 0) {
os << "Return address patch data (count = " << return_address_patch_count
<< ")\n";
os << "index pc patched_pc\n";
os << " index pc patched_pc\n";
}
for (int i = 0; i < return_address_patch_count; i++) {
Vector<char> buf = Vector<char>::New(128);
SNPrintF(buf, "%6d %6d %10d", i, ReturnAddressPc(i)->value(),
SNPrintF(buf, "%6d %6d %12d\n", i, ReturnAddressPc(i)->value(),
PatchedAddressPc(i)->value());
os << buf.start();
}
......
......@@ -107,13 +107,7 @@
'test-debug/DebugBreakLoop': [PASS, NO_VARIANTS],
# Support for lazy deoptimization is missing.
'test-deoptimization/DeoptimizeSimple': [PASS, NO_VARIANTS],
'test-deoptimization/DeoptimizeSimpleNested': [PASS, NO_VARIANTS],
'test-deoptimization/DeoptimizeSimpleWithArguments': [PASS, NO_VARIANTS],
'test-deoptimization/DeoptimizeBinaryOperation*': [PASS, NO_VARIANTS],
'test-deoptimization/DeoptimizeCompare': [PASS, NO_VARIANTS],
'test-deoptimization/DeoptimizeLoadICStoreIC': [PASS, NO_VARIANTS],
'test-deoptimization/DeoptimizeLoadICStoreICNested': [PASS, NO_VARIANTS],
# Support for breakpoints requires using LoadICs and StoreICs.
'test-debug/BreakPointICStore': [PASS, NO_VARIANTS],
......
......@@ -1713,6 +1713,8 @@ static Handle<JSFunction> Compile(const char* source) {
TEST(BuildScheduleTrivialLazyDeoptCall) {
FLAG_turbo_deoptimization = true;
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
Graph graph(scope.main_zone());
......
......@@ -113,6 +113,8 @@ static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
TEST(DeoptimizeSimple) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -151,6 +153,8 @@ TEST(DeoptimizeSimple) {
TEST(DeoptimizeSimpleWithArguments) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -190,6 +194,8 @@ TEST(DeoptimizeSimpleWithArguments) {
TEST(DeoptimizeSimpleNested) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -215,6 +221,7 @@ TEST(DeoptimizeSimpleNested) {
TEST(DeoptimizeRecursive) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -242,6 +249,7 @@ TEST(DeoptimizeRecursive) {
TEST(DeoptimizeMultiple) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -270,6 +278,7 @@ TEST(DeoptimizeMultiple) {
TEST(DeoptimizeConstructor) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -308,6 +317,7 @@ TEST(DeoptimizeConstructor) {
TEST(DeoptimizeConstructorMultiple) {
i::FLAG_turbo_deoptimization = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -337,6 +347,7 @@ TEST(DeoptimizeConstructorMultiple) {
TEST(DeoptimizeBinaryOperationADDString) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
AllowNativesSyntaxNoInlining options;
LocalContext env;
......@@ -428,6 +439,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
TEST(DeoptimizeBinaryOperationADD) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -441,6 +453,7 @@ TEST(DeoptimizeBinaryOperationADD) {
TEST(DeoptimizeBinaryOperationSUB) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -454,6 +467,7 @@ TEST(DeoptimizeBinaryOperationSUB) {
TEST(DeoptimizeBinaryOperationMUL) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -467,6 +481,7 @@ TEST(DeoptimizeBinaryOperationMUL) {
TEST(DeoptimizeBinaryOperationDIV) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -480,6 +495,7 @@ TEST(DeoptimizeBinaryOperationDIV) {
TEST(DeoptimizeBinaryOperationMOD) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -493,6 +509,7 @@ TEST(DeoptimizeBinaryOperationMOD) {
TEST(DeoptimizeCompare) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -537,6 +554,7 @@ TEST(DeoptimizeCompare) {
TEST(DeoptimizeLoadICStoreIC) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......@@ -617,6 +635,7 @@ TEST(DeoptimizeLoadICStoreIC) {
TEST(DeoptimizeLoadICStoreICNested) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment