Commit c6b122e8 authored by yangguo's avatar yangguo Committed by Commit bot

Revert of [WIP][turbofan] Instruction scheduler for Turbofan. (patchset #7...

Revert of [WIP][turbofan] Instruction scheduler for Turbofan. (patchset #7 id:120001 of https://codereview.chromium.org/1375253002/ )

Reason for revert:
Does not compile

https://build.chromium.org/p/client.v8/builders/V8%20Arm%20-%20debug%20builder/builds/6870/steps/compile/logs/stdio

Original issue's description:
> [turbofan] Instruction scheduler for Turbofan.
>
> Implement machine instruction scheduling after instruction selection.
>
> Currently only works for arm64.
>
> R=danno@chromium.org, bmeurer@chromium.org, titzer@chromium.org
>
> Committed: https://crrev.com/e11bba3acd5188f0e12686b6fcf3e0ab22989216
> Cr-Commit-Position: refs/heads/master@{#32858}

TBR=jarin@chromium.org,bmeurer@chromium.org,baptiste.afsa@arm.com
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.chromium.org/1526913002

Cr-Commit-Position: refs/heads/master@{#32860}
parent 44e401f1
......@@ -750,8 +750,6 @@ source_set("v8_base") {
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-scheduler.cc",
"src/compiler/instruction-scheduler.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
"src/compiler/instruction-selector.h",
......@@ -1298,7 +1296,6 @@ source_set("v8_base") {
"src/crankshaft/ia32/lithium-ia32.h",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
......@@ -1330,7 +1327,6 @@ source_set("v8_base") {
sources += [
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-scheduler-x64.cc",
"src/compiler/x64/instruction-selector-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.h",
......@@ -1389,7 +1385,6 @@ source_set("v8_base") {
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-scheduler-arm.cc",
"src/compiler/arm/instruction-selector-arm.cc",
"src/crankshaft/arm/lithium-arm.cc",
"src/crankshaft/arm/lithium-arm.h",
......@@ -1442,7 +1437,6 @@ source_set("v8_base") {
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
......@@ -1467,7 +1461,6 @@ source_set("v8_base") {
sources += [
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.h",
......@@ -1509,7 +1502,6 @@ source_set("v8_base") {
sources += [
"compiler/mips64/code-generator-mips64.cc",
"compiler/mips64/instruction-codes-mips64.h",
"compiler/mips64/instruction-scheduler-mips64.cc",
"compiler/mips64/instruction-selector-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.h",
......
......@@ -490,7 +490,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kArmAdd:
case kArmAnd:
case kArmBic:
case kArmClz:
case kArmCmp:
case kArmCmn:
case kArmTst:
case kArmTeq:
case kArmOrr:
case kArmEor:
case kArmSub:
case kArmRsb:
case kArmMul:
case kArmMla:
case kArmMls:
case kArmSmmul:
case kArmSmmla:
case kArmUmull:
case kArmSdiv:
case kArmUdiv:
case kArmMov:
case kArmMvn:
case kArmBfc:
case kArmUbfx:
case kArmSxtb:
case kArmSxth:
case kArmSxtab:
case kArmSxtah:
case kArmUxtb:
case kArmUxth:
case kArmUxtab:
case kArmUxtah:
case kArmVcmpF32:
case kArmVaddF32:
case kArmVsubF32:
case kArmVmulF32:
case kArmVmlaF32:
case kArmVmlsF32:
case kArmVdivF32:
case kArmVabsF32:
case kArmVnegF32:
case kArmVsqrtF32:
case kArmVcmpF64:
case kArmVaddF64:
case kArmVsubF64:
case kArmVmulF64:
case kArmVmlaF64:
case kArmVmlsF64:
case kArmVdivF64:
case kArmVmodF64:
case kArmVabsF64:
case kArmVnegF64:
case kArmVsqrtF64:
case kArmVrintmF32:
case kArmVrintmF64:
case kArmVrintpF32:
case kArmVrintpF64:
case kArmVrintzF32:
case kArmVrintzF64:
case kArmVrintaF64:
case kArmVrintnF32:
case kArmVrintnF64:
case kArmVcvtF32F64:
case kArmVcvtF64F32:
case kArmVcvtF64S32:
case kArmVcvtF64U32:
case kArmVcvtS32F64:
case kArmVcvtU32F64:
case kArmVmovLowU32F64:
case kArmVmovLowF64U32:
case kArmVmovHighU32F64:
case kArmVmovHighF64U32:
case kArmVmovF64U32U32:
return kNoOpcodeFlags;
case kArmVldrF32:
case kArmVldrF64:
case kArmLdrb:
case kArmLdrsb:
case kArmLdrh:
case kArmLdrsh:
case kArmLdr:
return kIsLoadOperation;
case kArmVstrF32:
case kArmVstrF64:
case kArmStrb:
case kArmStrh:
case kArmStr:
case kArmPush:
case kArmPoke:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
}
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// TODO(all): Add instruction cost modeling.
return 1;
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -589,7 +589,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchLookupSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
......
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kArm64Add:
case kArm64Add32:
case kArm64And:
case kArm64And32:
case kArm64Bic:
case kArm64Bic32:
case kArm64Clz:
case kArm64Clz32:
case kArm64Cmp:
case kArm64Cmp32:
case kArm64Cmn:
case kArm64Cmn32:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
case kArm64Or32:
case kArm64Orn:
case kArm64Orn32:
case kArm64Eor:
case kArm64Eor32:
case kArm64Eon:
case kArm64Eon32:
case kArm64Sub:
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
case kArm64Smull:
case kArm64Umull:
case kArm64Madd:
case kArm64Madd32:
case kArm64Msub:
case kArm64Msub32:
case kArm64Mneg:
case kArm64Mneg32:
case kArm64Idiv:
case kArm64Idiv32:
case kArm64Udiv:
case kArm64Udiv32:
case kArm64Imod:
case kArm64Imod32:
case kArm64Umod:
case kArm64Umod32:
case kArm64Not:
case kArm64Not32:
case kArm64Lsl:
case kArm64Lsl32:
case kArm64Lsr:
case kArm64Lsr32:
case kArm64Asr:
case kArm64Asr32:
case kArm64Ror:
case kArm64Ror32:
case kArm64Mov32:
case kArm64Sxtb32:
case kArm64Sxth32:
case kArm64Sxtw:
case kArm64Sbfx32:
case kArm64Ubfx:
case kArm64Ubfx32:
case kArm64Ubfiz32:
case kArm64Bfi:
case kArm64Float32Cmp:
case kArm64Float32Add:
case kArm64Float32Sub:
case kArm64Float32Mul:
case kArm64Float32Div:
case kArm64Float32Max:
case kArm64Float32Min:
case kArm64Float32Abs:
case kArm64Float32Sqrt:
case kArm64Float32RoundDown:
case kArm64Float64Cmp:
case kArm64Float64Add:
case kArm64Float64Sub:
case kArm64Float64Mul:
case kArm64Float64Div:
case kArm64Float64Mod:
case kArm64Float64Max:
case kArm64Float64Min:
case kArm64Float64Abs:
case kArm64Float64Neg:
case kArm64Float64Sqrt:
case kArm64Float64RoundDown:
case kArm64Float64RoundTiesAway:
case kArm64Float64RoundTruncate:
case kArm64Float64RoundTiesEven:
case kArm64Float64RoundUp:
case kArm64Float32RoundTiesEven:
case kArm64Float32RoundTruncate:
case kArm64Float32RoundUp:
case kArm64Float32ToFloat64:
case kArm64Float64ToFloat32:
case kArm64Float64ToInt32:
case kArm64Float64ToUint32:
case kArm64Float32ToInt64:
case kArm64Float64ToInt64:
case kArm64Float32ToUint64:
case kArm64Float64ToUint64:
case kArm64Int32ToFloat64:
case kArm64Int64ToFloat32:
case kArm64Int64ToFloat64:
case kArm64Uint32ToFloat64:
case kArm64Uint64ToFloat32:
case kArm64Uint64ToFloat64:
case kArm64Float64ExtractLowWord32:
case kArm64Float64ExtractHighWord32:
case kArm64Float64InsertLowWord32:
case kArm64Float64InsertHighWord32:
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
return kNoOpcodeFlags;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
return kIsBlockTerminator;
case kArm64LdrS:
case kArm64LdrD:
case kArm64Ldrb:
case kArm64Ldrsb:
case kArm64Ldrh:
case kArm64Ldrsh:
case kArm64LdrW:
case kArm64Ldr:
return kIsLoadOperation;
case kArm64ClaimForCallArguments:
case kArm64Poke:
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
case kArm64Strb:
case kArm64Strh:
case kArm64StrW:
case kArm64Str:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
}
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for arm64 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
case kArm64Float32ToFloat64:
case kArm64Float64ToFloat32:
case kArm64Float64ToInt32:
case kArm64Float64ToUint32:
case kArm64Int32ToFloat64:
case kArm64Uint32ToFloat64:
return 3;
case kArm64Float64Add:
case kArm64Float64Sub:
return 2;
case kArm64Float64Mul:
return 3;
case kArm64Float64Div:
return 6;
case kArm64Lsl:
case kArm64Lsl32:
case kArm64Lsr:
case kArm64Lsr32:
case kArm64Asr:
case kArm64Asr32:
case kArm64Ror:
case kArm64Ror32:
return 3;
case kCheckedLoadInt8:
case kCheckedLoadUint8:
case kCheckedLoadInt16:
case kCheckedLoadUint16:
case kCheckedLoadWord32:
case kCheckedLoadWord64:
case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
case kArm64LdrS:
case kArm64LdrD:
case kArm64Ldrb:
case kArm64Ldrsb:
case kArm64Ldrh:
case kArm64Ldrsh:
case kArm64LdrW:
case kArm64Ldr:
return 5;
default:
return 1;
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -451,7 +451,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kIA32Add:
case kIA32And:
case kIA32Cmp:
case kIA32Test:
case kIA32Or:
case kIA32Xor:
case kIA32Sub:
case kIA32Imul:
case kIA32ImulHigh:
case kIA32UmulHigh:
case kIA32Idiv:
case kIA32Udiv:
case kIA32Not:
case kIA32Neg:
case kIA32Shl:
case kIA32Shr:
case kIA32Sar:
case kIA32Ror:
case kIA32Lzcnt:
case kIA32Tzcnt:
case kIA32Popcnt:
case kIA32Lea:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
case kSSEFloat32Max:
case kSSEFloat32Min:
case kSSEFloat32Abs:
case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
case kSSEFloat64Sub:
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
case kSSEFloat64Max:
case kSSEFloat64Min:
case kSSEFloat64Abs:
case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32ToFloat64:
case kSSEFloat64ToFloat32:
case kSSEFloat64ToInt32:
case kSSEFloat64ToUint32:
case kSSEInt32ToFloat64:
case kSSEUint32ToFloat64:
case kSSEFloat64ExtractLowWord32:
case kSSEFloat64ExtractHighWord32:
case kSSEFloat64InsertLowWord32:
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
case kAVXFloat32Add:
case kAVXFloat32Sub:
case kAVXFloat32Mul:
case kAVXFloat32Div:
case kAVXFloat32Max:
case kAVXFloat32Min:
case kAVXFloat64Add:
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
case kAVXFloat64Max:
case kAVXFloat64Min:
case kAVXFloat64Abs:
case kAVXFloat64Neg:
case kAVXFloat32Abs:
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
case kIA32Movsxbl:
case kIA32Movzxbl:
case kIA32Movb:
case kIA32Movsxwl:
case kIA32Movzxwl:
case kIA32Movw:
case kIA32Movl:
case kIA32Movss:
case kIA32Movsd:
// Moves are used for memory load/store operations.
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32StackCheck:
return kIsLoadOperation;
case kIA32Push:
case kIA32Poke:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
}
UNREACHABLE();
return kNoOpcodeFlags;
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// TODO(all): Add instruction cost modeling.
return 1;
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -39,43 +39,39 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define COMMON_ARCH_OPCODE_LIST(V) \
V(ArchCallCodeObject) \
V(ArchTailCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchTailCallJSFunction) \
V(ArchPrepareCallCFunction) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
V(ArchLazyBailout) \
V(ArchJmp) \
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
V(ArchThrowTerminator) \
V(ArchDeoptimize) \
V(ArchRet) \
V(ArchStackPointer) \
V(ArchFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(CheckedLoadInt8) \
V(CheckedLoadUint8) \
V(CheckedLoadInt16) \
V(CheckedLoadUint16) \
V(CheckedLoadWord32) \
V(CheckedLoadWord64) \
V(CheckedLoadFloat32) \
V(CheckedLoadFloat64) \
V(CheckedStoreWord8) \
V(CheckedStoreWord16) \
V(CheckedStoreWord32) \
V(CheckedStoreWord64) \
V(CheckedStoreFloat32) \
V(CheckedStoreFloat64)
#define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \
#define ARCH_OPCODE_LIST(V) \
V(ArchCallCodeObject) \
V(ArchTailCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchTailCallJSFunction) \
V(ArchPrepareCallCFunction) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
V(ArchLazyBailout) \
V(ArchJmp) \
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
V(ArchDeoptimize) \
V(ArchRet) \
V(ArchStackPointer) \
V(ArchFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(CheckedLoadInt8) \
V(CheckedLoadUint8) \
V(CheckedLoadInt16) \
V(CheckedLoadUint16) \
V(CheckedLoadWord32) \
V(CheckedLoadWord64) \
V(CheckedLoadFloat32) \
V(CheckedLoadFloat64) \
V(CheckedStoreWord8) \
V(CheckedStoreWord16) \
V(CheckedStoreWord32) \
V(CheckedStoreWord64) \
V(CheckedStoreFloat32) \
V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
#include "src/base/adapters.h"
namespace v8 {
namespace internal {
namespace compiler {
InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
Zone* zone,
Instruction* instr)
: instr_(instr),
successors_(zone),
unscheduled_predecessors_count_(0),
latency_(GetInstructionLatency(instr)),
total_latency_(-1),
start_cycle_(-1) {
}
void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
ScheduleGraphNode* node) {
successors_.push_back(node);
node->unscheduled_predecessors_count_++;
}
InstructionScheduler::InstructionScheduler(Zone* zone,
InstructionSequence* sequence)
: zone_(zone),
sequence_(sequence),
graph_(zone),
last_side_effect_instr_(nullptr),
pending_loads_(zone),
last_live_in_reg_marker_(nullptr) {
}
void InstructionScheduler::StartBlock(RpoNumber rpo) {
DCHECK(graph_.empty());
DCHECK(last_side_effect_instr_ == nullptr);
DCHECK(pending_loads_.empty());
DCHECK(last_live_in_reg_marker_ == nullptr);
sequence()->StartBlock(rpo);
}
void InstructionScheduler::EndBlock(RpoNumber rpo) {
ScheduleBlock();
sequence()->EndBlock(rpo);
graph_.clear();
last_side_effect_instr_ = nullptr;
pending_loads_.clear();
last_live_in_reg_marker_ = nullptr;
}
void InstructionScheduler::AddInstruction(Instruction* instr) {
ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
if (IsBlockTerminator(instr)) {
// Make sure that basic block terminators are not moved by adding them
// as successor of every instruction.
for (auto node : graph_) {
node->AddSuccessor(new_node);
}
} else if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
last_live_in_reg_marker_->AddSuccessor(new_node);
}
last_live_in_reg_marker_ = new_node;
} else {
if (last_live_in_reg_marker_ != nullptr) {
last_live_in_reg_marker_->AddSuccessor(new_node);
}
// Instructions with side effects and memory operations can't be
// reordered with respect to each other.
if (HasSideEffect(instr)) {
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
for (auto load : pending_loads_) {
load->AddSuccessor(new_node);
}
pending_loads_.clear();
last_side_effect_instr_ = new_node;
} else if (IsLoadOperation(instr)) {
// Load operations can't be reordered with side effects instructions but
// independent loads can be reordered with respect to each other.
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
}
// Look for operand dependencies.
for (auto node : graph_) {
if (HasOperandDependency(node->instruction(), instr)) {
node->AddSuccessor(new_node);
}
}
}
graph_.push_back(new_node);
}
bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
ScheduleGraphNode *node2) const {
return node1->total_latency() > node2->total_latency();
}
void InstructionScheduler::ScheduleBlock() {
ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
// Compute total latencies so that we can schedule the critical path first.
ComputeTotalLatencies();
// Add nodes which don't have dependencies to the ready list.
for (auto node : graph_) {
if (!node->HasUnscheduledPredecessor()) {
ready_list.push_back(node);
}
}
// Go through the ready list and schedule the instructions.
int cycle = 0;
while (!ready_list.empty()) {
auto candidate = ready_list.end();
for (auto iterator = ready_list.begin(); iterator != ready_list.end();
++iterator) {
// Look for the best candidate to schedule.
// We only consider instructions that have all their operands ready and
// we try to schedule the critical path first (we look for the instruction
// with the highest latency on the path to reach the end of the graph).
if (cycle >= (*iterator)->start_cycle()) {
if ((candidate == ready_list.end()) ||
CompareNodes(*iterator, *candidate)) {
candidate = iterator;
}
}
}
if (candidate != ready_list.end()) {
sequence()->AddInstruction((*candidate)->instruction());
for (auto successor : (*candidate)->successors()) {
successor->DropUnscheduledPredecessor();
successor->set_start_cycle(
std::max(successor->start_cycle(),
cycle + (*candidate)->latency()));
if (!successor->HasUnscheduledPredecessor()) {
ready_list.push_back(successor);
}
}
ready_list.erase(candidate);
}
cycle++;
}
}
int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kArchNop:
case kArchStackPointer:
case kArchFramePointer:
case kArchTruncateDoubleToI:
return kNoOpcodeFlags;
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchCallCFunction:
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchLazyBailout:
return kHasSideEffect;
case kArchTailCallCodeObject:
case kArchTailCallJSFunction:
return kHasSideEffect | kIsBlockTerminator;
case kArchDeoptimize:
case kArchJmp:
case kArchLookupSwitch:
case kArchTableSwitch:
case kArchRet:
case kArchThrowTerminator:
return kIsBlockTerminator;
case kCheckedLoadInt8:
case kCheckedLoadUint8:
case kCheckedLoadInt16:
case kCheckedLoadUint16:
case kCheckedLoadWord32:
case kCheckedLoadWord64:
case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
return kIsLoadOperation;
case kCheckedStoreWord8:
case kCheckedStoreWord16:
case kCheckedStoreWord32:
case kCheckedStoreWord64:
case kCheckedStoreFloat32:
case kCheckedStoreFloat64:
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
#define CASE(Name) case k##Name:
TARGET_ARCH_OPCODE_LIST(CASE)
#undef CASE
return GetTargetInstructionFlags(instr);
}
UNREACHABLE();
return kNoOpcodeFlags;
}
bool InstructionScheduler::HasOperandDependency(
const Instruction* instr1, const Instruction* instr2) const {
for (size_t i = 0; i < instr1->OutputCount(); ++i) {
for (size_t j = 0; j < instr2->InputCount(); ++j) {
const InstructionOperand* output = instr1->OutputAt(i);
const InstructionOperand* input = instr2->InputAt(j);
if (output->IsUnallocated() && input->IsUnallocated() &&
(UnallocatedOperand::cast(output)->virtual_register() ==
UnallocatedOperand::cast(input)->virtual_register())) {
return true;
}
if (output->IsConstant() && input->IsUnallocated() &&
(ConstantOperand::cast(output)->virtual_register() ==
UnallocatedOperand::cast(input)->virtual_register())) {
return true;
}
}
}
// TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies?
return false;
}
bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
(instr->flags_mode() == kFlags_branch));
}
void InstructionScheduler::ComputeTotalLatencies() {
for (auto node : base::Reversed(graph_)) {
int max_latency = 0;
for (auto successor : node->successors()) {
DCHECK(successor->total_latency() != -1);
if (successor->total_latency() > max_latency) {
max_latency = successor->total_latency();
}
}
node->set_total_latency(max_latency + node->latency());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
#define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
#include "src/compiler/instruction.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// A set of flags describing properties of the instructions so that the
// scheduler is aware of dependencies between instructions.
enum ArchOpcodeFlags {
kNoOpcodeFlags = 0,
kIsBlockTerminator = 1, // The instruction marks the end of a basic block
// e.g.: jump and return instructions.
kHasSideEffect = 2, // The instruction has some side effects (memory
// store, function call...)
kIsLoadOperation = 4, // The instruction is a memory load.
};
class InstructionScheduler final : public ZoneObject {
public:
InstructionScheduler(Zone* zone, InstructionSequence* sequence);
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
static bool SchedulerSupported();
private:
// A scheduling graph node.
// Represent an instruction and their dependencies.
class ScheduleGraphNode: public ZoneObject {
public:
ScheduleGraphNode(Zone* zone, Instruction* instr);
// Mark the instruction represented by 'node' as a dependecy of this one.
// The current instruction will be registered as an unscheduled predecessor
// of 'node' (i.e. it must be scheduled before 'node').
void AddSuccessor(ScheduleGraphNode* node);
// Check if all the predecessors of this instruction have been scheduled.
bool HasUnscheduledPredecessor() {
return unscheduled_predecessors_count_ != 0;
}
// Record that we have scheduled one of the predecessors of this node.
void DropUnscheduledPredecessor() {
DCHECK(unscheduled_predecessors_count_ > 0);
unscheduled_predecessors_count_--;
}
Instruction* instruction() { return instr_; }
ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
int latency() const { return latency_; }
int total_latency() const { return total_latency_; }
void set_total_latency(int latency) { total_latency_ = latency; }
int start_cycle() const { return start_cycle_; }
void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
private:
Instruction* instr_;
ZoneDeque<ScheduleGraphNode*> successors_;
// Number of unscheduled predecessors for this node.
int unscheduled_predecessors_count_;
// Estimate of the instruction latency (the number of cycles it takes for
// instruction to complete).
int latency_;
// The sum of all the latencies on the path from this node to the end of
// the graph (i.e. a node with no successor).
int total_latency_;
// The scheduler keeps a nominal cycle count to keep track of when the
// result of an instruction is available. This field is updated by the
// scheduler to indicate when the value of all the operands of this
// instruction will be available.
int start_cycle_;
};
// Compare the two nodes and return true if node1 is a better candidate than
// node2 (i.e. node1 should be scheduled before node2).
bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
// Perform scheduling for the current block.
void ScheduleBlock();
// Return the scheduling properties of the given instruction.
int GetInstructionFlags(const Instruction* instr) const;
int GetTargetInstructionFlags(const Instruction* instr) const;
// Return true if instr2 uses any value defined by instr1.
bool HasOperandDependency(const Instruction* instr1,
const Instruction* instr2) const;
// Return true if the instruction is a basic block terminator.
bool IsBlockTerminator(const Instruction* instr) const;
// Check whether the given instruction has side effects (e.g. function call,
// memory store).
bool HasSideEffect(const Instruction* instr) const {
return GetInstructionFlags(instr) & kHasSideEffect;
}
// Return true if the instruction is a memory load.
bool IsLoadOperation(const Instruction* instr) const {
return GetInstructionFlags(instr) & kIsLoadOperation;
}
// Identify nops used as a definition point for live-in registers at
// function entry.
bool IsFixedRegisterParameter(const Instruction* instr) const {
return (instr->arch_opcode() == kArchNop) &&
(instr->OutputCount() == 1) &&
(instr->OutputAt(0)->IsUnallocated()) &&
UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
}
void ComputeTotalLatencies();
static int GetInstructionLatency(const Instruction* instr);
Zone* zone() { return zone_; }
InstructionSequence* sequence() { return sequence_; }
Zone* zone_;
InstructionSequence* sequence_;
ZoneVector<ScheduleGraphNode*> graph_;
// Last side effect instruction encountered while building the graph.
ScheduleGraphNode* last_side_effect_instr_;
// Set of load instructions encountered since the last side effect instruction
// which will be added as predecessors of the next instruction with side
// effects.
ZoneVector<ScheduleGraphNode*> pending_loads_;
// Live-in register markers are nop instructions which are emitted at the
// beginning of a basic block so that the register allocator will find a
// defining instruction for live-in values. They must not be moved.
// All these nops are chained together and added as a predecessor of every
// other instructions in the basic block.
ScheduleGraphNode* last_live_in_reg_marker_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
......@@ -30,13 +30,12 @@ InstructionSelector::InstructionSelector(
source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
current_block_(nullptr),
current_block_(NULL),
instructions_(zone),
defined_(node_count, false, zone),
used_(node_count, false, zone),
virtual_registers_(node_count,
InstructionOperand::kInvalidVirtualRegister, zone),
scheduler_(nullptr) {
InstructionOperand::kInvalidVirtualRegister, zone) {
instructions_.reserve(node_count);
}
......@@ -63,55 +62,17 @@ void InstructionSelector::SelectInstructions() {
}
// Schedule the selected instructions.
if (FLAG_turbo_instruction_scheduling &&
InstructionScheduler::SchedulerSupported()) {
scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
}
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
StartBlock(RpoNumber::FromInt(block->rpo_number()));
sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
AddInstruction(instructions_[start]);
sequence()->AddInstruction(instructions_[start]);
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
}
void InstructionSelector::StartBlock(RpoNumber rpo) {
if (FLAG_turbo_instruction_scheduling &&
InstructionScheduler::SchedulerSupported()) {
DCHECK(scheduler_ != nullptr);
scheduler_->StartBlock(rpo);
} else {
sequence()->StartBlock(rpo);
}
}
void InstructionSelector::EndBlock(RpoNumber rpo) {
if (FLAG_turbo_instruction_scheduling &&
InstructionScheduler::SchedulerSupported()) {
DCHECK(scheduler_ != nullptr);
scheduler_->EndBlock(rpo);
} else {
sequence()->EndBlock(rpo);
}
}
void InstructionSelector::AddInstruction(Instruction* instr) {
if (FLAG_turbo_instruction_scheduling &&
InstructionScheduler::SchedulerSupported()) {
DCHECK(scheduler_ != nullptr);
scheduler_->AddInstruction(instr);
} else {
sequence()->AddInstruction(instr);
sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
}
......@@ -1483,7 +1444,7 @@ void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
void InstructionSelector::VisitThrow(Node* value) {
OperandGenerator g(this);
Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer)
Emit(kArchNop, g.NoOutput()); // TODO(titzer)
}
......
......@@ -9,7 +9,6 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/zone-containers.h"
......@@ -47,10 +46,6 @@ class InstructionSelector final {
// Visit code for the entire graph with the included schedule.
void SelectInstructions();
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
// ===========================================================================
......@@ -258,7 +253,6 @@ class InstructionSelector final {
BoolVector defined_;
BoolVector used_;
IntVector virtual_registers_;
InstructionScheduler* scheduler_;
};
} // namespace compiler
......
......@@ -584,7 +584,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNIMPLEMENTED();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNIMPLEMENTED();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -594,7 +594,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNIMPLEMENTED();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNIMPLEMENTED();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -755,7 +755,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNIMPLEMENTED();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNIMPLEMENTED();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -692,7 +692,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kX64Add:
case kX64Add32:
case kX64And:
case kX64And32:
case kX64Cmp:
case kX64Cmp32:
case kX64Test:
case kX64Test32:
case kX64Or:
case kX64Or32:
case kX64Xor:
case kX64Xor32:
case kX64Sub:
case kX64Sub32:
case kX64Imul:
case kX64Imul32:
case kX64ImulHigh32:
case kX64UmulHigh32:
case kX64Idiv:
case kX64Idiv32:
case kX64Udiv:
case kX64Udiv32:
case kX64Not:
case kX64Not32:
case kX64Neg:
case kX64Neg32:
case kX64Shl:
case kX64Shl32:
case kX64Shr:
case kX64Shr32:
case kX64Sar:
case kX64Sar32:
case kX64Ror:
case kX64Ror32:
case kX64Lzcnt:
case kX64Lzcnt32:
case kX64Tzcnt:
case kX64Tzcnt32:
case kX64Popcnt:
case kX64Popcnt32:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
case kSSEFloat32Abs:
case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat32Max:
case kSSEFloat32Min:
case kSSEFloat32ToFloat64:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
case kSSEFloat64Sub:
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
case kSSEFloat64Abs:
case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat64Max:
case kSSEFloat64Min:
case kSSEFloat64ToFloat32:
case kSSEFloat64ToInt32:
case kSSEFloat64ToUint32:
case kSSEFloat64ToInt64:
case kSSEFloat32ToInt64:
case kSSEFloat64ToUint64:
case kSSEFloat32ToUint64:
case kSSEInt32ToFloat64:
case kSSEInt64ToFloat32:
case kSSEInt64ToFloat64:
case kSSEUint64ToFloat32:
case kSSEUint64ToFloat64:
case kSSEUint32ToFloat64:
case kSSEFloat64ExtractLowWord32:
case kSSEFloat64ExtractHighWord32:
case kSSEFloat64InsertLowWord32:
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
case kAVXFloat32Cmp:
case kAVXFloat32Add:
case kAVXFloat32Sub:
case kAVXFloat32Mul:
case kAVXFloat32Div:
case kAVXFloat32Max:
case kAVXFloat32Min:
case kAVXFloat64Cmp:
case kAVXFloat64Add:
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
case kAVXFloat64Max:
case kAVXFloat64Min:
case kAVXFloat64Abs:
case kAVXFloat64Neg:
case kAVXFloat32Abs:
case kAVXFloat32Neg:
case kX64BitcastFI:
case kX64BitcastDL:
case kX64BitcastIF:
case kX64BitcastLD:
case kX64Lea32:
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
case kX64Movsxbl:
case kX64Movzxbl:
case kX64Movsxwl:
case kX64Movzxwl:
case kX64Movsxlq:
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
: kIsLoadOperation;
case kX64Movb:
case kX64Movw:
return kHasSideEffect;
case kX64Movl:
if (instr->HasOutput()) {
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
: kIsLoadOperation;
} else {
return kHasSideEffect;
}
case kX64Movq:
case kX64Movsd:
case kX64Movss:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck:
return kIsLoadOperation;
case kX64Push:
case kX64Poke:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
}
UNREACHABLE();
return kNoOpcodeFlags;
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// TODO(all): Add instruction cost modeling.
return 1;
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -481,7 +481,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNIMPLEMENTED();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNIMPLEMENTED();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -467,8 +467,6 @@ DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
#if defined(V8_WASM)
// Flags for native WebAssembly.
......
......@@ -531,8 +531,6 @@
'../../src/compiler/instruction-selector-impl.h',
'../../src/compiler/instruction-selector.cc',
'../../src/compiler/instruction-selector.h',
'../../src/compiler/instruction-scheduler.cc',
'../../src/compiler/instruction-scheduler.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
'../../src/compiler/interpreter-assembler.cc',
......@@ -1102,7 +1100,6 @@
'../../src/arm/simulator-arm.h',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
'../../src/compiler/arm/instruction-scheduler-arm.cc',
'../../src/compiler/arm/instruction-selector-arm.cc',
'../../src/crankshaft/arm/lithium-arm.cc',
'../../src/crankshaft/arm/lithium-arm.h',
......@@ -1156,7 +1153,6 @@
'../../src/arm64/utils-arm64.h',
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
'../../src/compiler/arm64/instruction-scheduler-arm64.cc',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
'../../src/crankshaft/arm64/delayed-masm-arm64.cc',
'../../src/crankshaft/arm64/delayed-masm-arm64.h',
......@@ -1198,7 +1194,6 @@
'../../src/ia32/macro-assembler-ia32.h',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
'../../src/compiler/ia32/instruction-scheduler-ia32.cc',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
'../../src/crankshaft/ia32/lithium-codegen-ia32.cc',
'../../src/crankshaft/ia32/lithium-codegen-ia32.h',
......@@ -1237,7 +1232,6 @@
'../../src/x87/macro-assembler-x87.h',
'../../src/compiler/x87/code-generator-x87.cc',
'../../src/compiler/x87/instruction-codes-x87.h',
'../../src/compiler/x87/instruction-scheduler-x87.cc',
'../../src/compiler/x87/instruction-selector-x87.cc',
'../../src/crankshaft/x87/lithium-codegen-x87.cc',
'../../src/crankshaft/x87/lithium-codegen-x87.h',
......@@ -1280,7 +1274,6 @@
'../../src/mips/simulator-mips.h',
'../../src/compiler/mips/code-generator-mips.cc',
'../../src/compiler/mips/instruction-codes-mips.h',
'../../src/compiler/mips/instruction-scheduler-mips.cc',
'../../src/compiler/mips/instruction-selector-mips.cc',
'../../src/crankshaft/mips/lithium-codegen-mips.cc',
'../../src/crankshaft/mips/lithium-codegen-mips.h',
......@@ -1323,7 +1316,6 @@
'../../src/mips64/simulator-mips64.h',
'../../src/compiler/mips64/code-generator-mips64.cc',
'../../src/compiler/mips64/instruction-codes-mips64.h',
'../../src/compiler/mips64/instruction-scheduler-mips64.cc',
'../../src/compiler/mips64/instruction-selector-mips64.cc',
'../../src/crankshaft/mips64/lithium-codegen-mips64.cc',
'../../src/crankshaft/mips64/lithium-codegen-mips64.h',
......@@ -1381,7 +1373,6 @@
'sources': [
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
'../../src/compiler/x64/instruction-scheduler-x64.cc',
'../../src/compiler/x64/instruction-selector-x64.cc',
],
}],
......@@ -1389,7 +1380,6 @@
'sources': [ ### gcmole(arch:ppc) ###
'../../src/compiler/ppc/code-generator-ppc.cc',
'../../src/compiler/ppc/instruction-codes-ppc.h',
'../../src/compiler/ppc/instruction-scheduler-ppc.cc',
'../../src/compiler/ppc/instruction-selector-ppc.cc',
'../../src/crankshaft/ppc/lithium-ppc.cc',
'../../src/crankshaft/ppc/lithium-ppc.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment