Commit 6db78c80 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Drop V8_TURBOFAN_BACKEND and V8_TURBOFAN_TARGET defines.

TurboFan is now a requirement and supported by all backends, so we don't
need those macros (plus all the machinery on top) anymore.

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/1282763002

Cr-Commit-Position: refs/heads/master@{#30082}
parent 0c674828
......@@ -654,61 +654,6 @@ void CodeGenerator::MarkLazyDeoptSite() {
last_lazy_deopt_pc_ = masm()->pc_offset();
}
#if !V8_TURBOFAN_BACKEND
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleArchBranch(Instruction* instr,
BranchInfo* branch) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleArchJump(RpoNumber target) { UNIMPLEMENTED(); }
void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
UNIMPLEMENTED();
}
void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
UNIMPLEMENTED();
}
void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNIMPLEMENTED();
}
#endif // !V8_TURBOFAN_BACKEND
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: masm_(gen->masm()), next_(gen->ools_) {
......
......@@ -777,8 +777,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
#if V8_TURBOFAN_BACKEND
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
......@@ -827,10 +825,8 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
#endif // V8_TURBOFAN_BACKEND
// 32 bit targets do not implement the following instructions.
#if !V8_TURBOFAN_BACKEND_64
#if V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
......@@ -907,7 +903,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
#endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
#endif // V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitFinish(Node* node) {
......@@ -1124,42 +1120,6 @@ void InstructionSelector::AddFrameStateInputs(
DCHECK(value_index == descriptor->GetSize());
}
#if !V8_TURBOFAN_BACKEND
#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
#undef DECLARE_UNIMPLEMENTED_SELECTOR
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTailCall(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::Flag::kNoFlags;
}
#endif // !V8_TURBOFAN_BACKEND
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -1076,9 +1076,6 @@ Handle<Code> Pipeline::GenerateCode() {
GraphReplayPrinter::PrintReplay(data.graph());
}
// Bailout here in case target architecture is not supported.
if (!SupportedTarget()) return Handle<Code>::null();
base::SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
......@@ -1203,7 +1200,6 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
PipelineData* data = this->data_;
DCHECK_NOT_NULL(data->graph());
CHECK(SupportedBackend());
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
......
......@@ -46,9 +46,6 @@ class Pipeline {
InstructionSequence* sequence,
bool run_verifier);
static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
private:
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
......
......@@ -30,29 +30,6 @@
# define V8_INFINITY INFINITY
#endif
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_X87
#define V8_TURBOFAN_BACKEND 1
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_PPC64
// 64-bit TurboFan backends support 64-bit integer arithmetic.
#define V8_TURBOFAN_BACKEND_64 1
#else
#define V8_TURBOFAN_BACKEND_64 0
#endif
#else
#define V8_TURBOFAN_BACKEND 0
#endif
#if V8_TURBOFAN_BACKEND
#define V8_TURBOFAN_TARGET 1
#else
#define V8_TURBOFAN_TARGET 0
#endif
namespace v8 {
namespace base {
......
......@@ -368,8 +368,6 @@ void Int32BinopInputShapeTester::RunRight(
}
#if V8_TURBOFAN_TARGET
TEST(ParametersEqual) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p1 = m.Parameter(1);
......@@ -572,5 +570,3 @@ TEST(RunBinopTester) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); }
}
}
#endif // V8_TURBOFAN_TARGET
......@@ -20,8 +20,6 @@
#include "src/rewriter.h"
#include "src/scopes.h"
#define USE_CRANKSHAFT 0
namespace v8 {
namespace internal {
namespace compiler {
......@@ -156,7 +154,6 @@ class FunctionTester : public InitializedHandleScope {
Handle<JSFunction> Compile(Handle<JSFunction> function) {
// TODO(titzer): make this method private.
#if V8_TURBOFAN_TARGET
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
......@@ -181,19 +178,6 @@ class FunctionTester : public InitializedHandleScope {
CHECK(!code.is_null());
info.context()->native_context()->AddOptimizedCode(*code);
function->ReplaceCode(*code);
#elif USE_CRANKSHAFT
Handle<Code> unoptimized = Handle<Code>(function->code());
Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized,
Compiler::NOT_CONCURRENT);
CHECK(!code.is_null());
#if ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
code->Disassemble("test code", tracing_scope.file());
}
#endif
function->ReplaceCode(*code);
#endif
return function;
}
......@@ -212,7 +196,6 @@ class FunctionTester : public InitializedHandleScope {
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
CHECK(Pipeline::SupportedTarget());
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
......
......@@ -268,7 +268,6 @@ class GraphBuilderTester : public HandleAndZoneScope,
}
virtual byte* Generate() {
if (!Pipeline::SupportedBackend()) return NULL;
if (code_.is_null()) {
Zone* zone = graph()->zone();
CallDescriptor* desc =
......
......@@ -8,8 +8,6 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
......@@ -110,5 +108,3 @@ TEST(ProfileLoop) {
m.Expect(arraysize(expected), expected);
}
}
#endif // V8_TURBOFAN_TARGET
......@@ -8,8 +8,6 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
......@@ -459,4 +457,3 @@ TEST(BranchCombineFloat64Compares) {
}
}
}
#endif // V8_TURBOFAN_TARGET
......@@ -147,7 +147,6 @@ TEST(RunChangeTaggedToInt32) {
ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
int32_t input = *i;
......@@ -167,7 +166,6 @@ TEST(RunChangeTaggedToInt32) {
int32_t result = t.Call(*number);
CHECK_EQ(input, result);
}
}
}
}
......@@ -177,7 +175,6 @@ TEST(RunChangeTaggedToUint32) {
ChangesLoweringTester<uint32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
if (Pipeline::SupportedTarget()) {
FOR_UINT32_INPUTS(i) {
uint32_t input = *i;
......@@ -198,7 +195,6 @@ TEST(RunChangeTaggedToUint32) {
CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
}
}
}
}
......@@ -211,7 +207,7 @@ TEST(RunChangeTaggedToFloat64) {
t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
&result);
if (Pipeline::SupportedTarget()) {
{
FOR_INT32_INPUTS(i) {
int32_t input = *i;
......@@ -234,7 +230,7 @@ TEST(RunChangeTaggedToFloat64) {
}
}
if (Pipeline::SupportedTarget()) {
{
FOR_FLOAT64_INPUTS(i) {
double input = *i;
{
......@@ -257,13 +253,13 @@ TEST(RunChangeBoolToBit) {
ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeBoolToBit());
if (Pipeline::SupportedTarget()) {
{
Object* true_obj = t.heap()->true_value();
int32_t result = t.Call(true_obj);
CHECK_EQ(1, result);
}
if (Pipeline::SupportedTarget()) {
{
Object* false_obj = t.heap()->false_value();
int32_t result = t.Call(false_obj);
CHECK_EQ(0, result);
......@@ -275,122 +271,15 @@ TEST(RunChangeBitToBool) {
ChangesLoweringTester<Object*> t(kMachInt32);
t.BuildAndLower(t.simplified()->ChangeBitToBool());
if (Pipeline::SupportedTarget()) {
{
Object* result = t.Call(1);
Object* true_obj = t.heap()->true_value();
CHECK_EQ(true_obj, result);
}
if (Pipeline::SupportedTarget()) {
{
Object* result = t.Call(0);
Object* false_obj = t.heap()->false_value();
CHECK_EQ(false_obj, result);
}
}
#if V8_TURBOFAN_BACKEND
// TODO(titzer): disabled on ARM
TEST(RunChangeInt32ToTaggedSmi) {
ChangesLoweringTester<Object*> t;
int32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
t.machine()->Load(kMachInt32), &input);
if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
input = *i;
if (!Smi::IsValid(input)) continue;
Object* result = t.Call();
t.CheckNumber(static_cast<double>(input), result);
}
}
}
TEST(RunChangeUint32ToTaggedSmi) {
ChangesLoweringTester<Object*> t;
uint32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
t.machine()->Load(kMachUint32), &input);
if (Pipeline::SupportedTarget()) {
FOR_UINT32_INPUTS(i) {
input = *i;
if (input > static_cast<uint32_t>(Smi::kMaxValue)) continue;
Object* result = t.Call();
double expected = static_cast<double>(input);
t.CheckNumber(expected, result);
}
}
}
TEST(RunChangeInt32ToTagged) {
ChangesLoweringTester<Object*> t;
int32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
t.machine()->Load(kMachInt32), &input);
if (Pipeline::SupportedTarget()) {
for (int m = 0; m < 3; m++) { // Try 3 GC modes.
FOR_INT32_INPUTS(i) {
if (m == 0) CcTest::heap()->EnableInlineAllocation();
if (m == 1) CcTest::heap()->DisableInlineAllocation();
if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
input = *i;
Object* result = t.CallWithPotentialGC<Object>();
t.CheckNumber(static_cast<double>(input), result);
}
}
}
}
TEST(RunChangeUint32ToTagged) {
ChangesLoweringTester<Object*> t;
uint32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
t.machine()->Load(kMachUint32), &input);
if (Pipeline::SupportedTarget()) {
for (int m = 0; m < 3; m++) { // Try 3 GC modes.
FOR_UINT32_INPUTS(i) {
if (m == 0) CcTest::heap()->EnableInlineAllocation();
if (m == 1) CcTest::heap()->DisableInlineAllocation();
if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
input = *i;
Object* result = t.CallWithPotentialGC<Object>();
double expected = static_cast<double>(input);
t.CheckNumber(expected, result);
}
}
}
}
TEST(RunChangeFloat64ToTagged) {
ChangesLoweringTester<Object*> t;
double input;
t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(),
t.machine()->Load(kMachFloat64), &input);
if (Pipeline::SupportedTarget()) {
for (int m = 0; m < 3; m++) { // Try 3 GC modes.
FOR_FLOAT64_INPUTS(i) {
if (m == 0) CcTest::heap()->EnableInlineAllocation();
if (m == 1) CcTest::heap()->DisableInlineAllocation();
if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
input = *i;
Object* result = t.CallWithPotentialGC<Object>();
t.CheckNumber(input, result);
}
}
}
}
#endif // V8_TURBOFAN_BACKEND
......@@ -19,8 +19,6 @@
#include "src/compiler/schedule.h"
#include "test/cctest/cctest.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
......@@ -113,5 +111,3 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
// TODO(titzer): test linkage creation for outgoing stub calls.
}
#endif // V8_TURBOFAN_TARGET
......@@ -21,13 +21,8 @@ static void RunPipeline(Zone* zone, const char* source) {
CompilationInfo info(&parse_info);
Pipeline pipeline(&info);
#if V8_TURBOFAN_TARGET
Handle<Code> code = pipeline.GenerateCode();
CHECK(Pipeline::SupportedTarget());
CHECK(!code.is_null());
#else
USE(pipeline);
#endif
}
......
......@@ -10,8 +10,6 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
#if V8_TURBOFAN_TARGET
static void IsOptimized(const v8::FunctionCallbackInfo<v8::Value>& args) {
JavaScriptFrameIterator it(CcTest::i_isolate());
JavaScriptFrame* frame = it.frame();
......@@ -103,7 +101,6 @@ TEST(DeoptExceptionHandlerFinally) {
#endif
}
#endif
TEST(DeoptTrivial) {
FLAG_allow_natives_syntax = true;
......
......@@ -6,8 +6,6 @@
#include "test/cctest/compiler/function-tester.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
......@@ -574,5 +572,3 @@ TEST(InlineMutuallyRecursive) {
InstallAssertInlineCountHelper(CcTest::isolate());
T.CheckCall(T.Val(42), T.Val(1));
}
#endif // V8_TURBOFAN_TARGET
......@@ -13,8 +13,6 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
#if V8_TURBOFAN_TARGET
using namespace v8::base;
using namespace v8::internal;
using namespace v8::internal::compiler;
......@@ -89,7 +87,7 @@ TEST(CodeGenNop) {
}
#if V8_TURBOFAN_BACKEND_64
#if V8_TARGET_ARCH_64_BIT
static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
switch (index) {
case 0:
......@@ -143,7 +141,7 @@ TEST(CodeGenInt64Binop) {
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TURBOFAN_BACKEND_64
#endif // V8_TARGET_ARCH_64_BIT
TEST(RunGoto) {
......@@ -5281,5 +5279,3 @@ TEST(RunCallCFunction8) {
}
#endif // USE_SIMULATOR
#endif // V8_TURBOFAN_TARGET
......@@ -14,8 +14,6 @@
#include "src/parser.h"
#include "test/cctest/compiler/function-tester.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
......@@ -146,5 +144,3 @@ TEST(RunStringAddTFStub) {
Handle<Object> result = ft.Call(leftArg, rightArg).ToHandleChecked();
CHECK(String::Equals(ft.Val("linksrechts"), Handle<String>::cast(result)));
}
#endif // V8_TURBOFAN_TARGET
......@@ -21178,11 +21178,7 @@ TEST(TurboAsmDisablesNeuter) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
#if V8_TURBOFAN_TARGET
bool should_be_neuterable = !i::FLAG_turbo_asm;
#else
bool should_be_neuterable = true;
#endif
const char* load =
"function Module(stdlib, foreign, heap) {"
" 'use asm';"
......
......@@ -14,41 +14,25 @@ namespace compiler {
// The TARGET_TEST(Case, Name) macro works just like
// TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TEST(Case, Name) TEST(Case, Name)
#else
#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
#endif
// The TARGET_TEST_F(Case, Name) macro works just like
// TEST_F(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
#else
#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
#endif
// The TARGET_TEST_P(Case, Name) macro works just like
// TEST_P(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
#else
#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
#endif
// The TARGET_TYPED_TEST(Case, Name) macro works just like
// TYPED_TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
#else
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
#endif
} // namespace compiler
} // namespace internal
......
......@@ -1004,7 +1004,6 @@ TEST_F(JSTypedLoweringTest, JSLoadDynamicContext) {
}
}
#if V8_TURBOFAN_TARGET
// -----------------------------------------------------------------------------
// JSAdd
......@@ -1104,8 +1103,6 @@ TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
input0, input1, input2, _, context, frame_state, effect, control));
}
#endif // V8_TURBOFAN_TARGET
// -----------------------------------------------------------------------------
// JSCreateWithContext
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment