Commit 1adad897 authored by dcarney@chromium.org's avatar dcarney@chromium.org

[turbofan] don't call out to c

R=bmeurer@chromium.org

BUG=

Review URL: https://codereview.chromium.org/587273002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24122 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent dd72c26a
......@@ -136,12 +136,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallAddress: {
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArchCallCodeObject: {
if (instr->InputAt(0)->IsImmediate()) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
......@@ -169,13 +163,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArchDrop: {
int words = MiscField::decode(instr->opcode());
__ Drop(words);
DCHECK_LT(0, words);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArchJmp:
__ b(code_->GetLabel(i.InputBlock(0)));
DCHECK_EQ(LeaveCC, i.OutputSBit());
......
......@@ -68,10 +68,8 @@ class ArmOperandGenerator FINAL : public OperandGenerator {
case kArmStrh:
return value >= -255 && value <= 255;
case kArchCallAddress:
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchDrop:
case kArchJmp:
case kArchNop:
case kArchRet:
......@@ -803,9 +801,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
opcode = kArchCallAddress;
break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
......@@ -825,13 +820,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
// Caller clean up of stack for C-style calls.
if (descriptor->kind() == CallDescriptor::kCallAddress &&
!buffer.pushed_nodes.empty()) {
DCHECK(deoptimization == NULL && continuation == NULL);
Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
}
}
......
......@@ -131,11 +131,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) {
case kArchCallAddress: {
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm(), i.InputRegister(0));
break;
}
case kArchCallCodeObject: {
if (instr->InputAt(0)->IsImmediate()) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
......@@ -163,11 +158,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AddSafepointAndDeopt(instr);
break;
}
case kArchDrop: {
int words = MiscField::decode(instr->opcode());
__ Drop(words);
break;
}
case kArchJmp:
__ B(code_->GetLabel(i.InputBlock(0)));
break;
......
......@@ -630,12 +630,8 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
InitializeCallBuffer(call, &buffer, true, false);
// Push the arguments to the stack.
bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
int aligned_push_count = buffer.pushed_nodes.size();
if (is_c_frame && pushed_count_uneven) {
aligned_push_count++;
}
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
......@@ -650,8 +646,7 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
slot--;
}
// Now all pushes can be done in pairs.
......@@ -669,9 +664,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
opcode = kArchCallAddress;
break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
......@@ -691,12 +683,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
// Caller clean up of stack for C-style calls.
if (is_c_frame && aligned_push_count > 0) {
DCHECK(deoptimization == NULL && continuation == NULL);
Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL);
}
}
} // namespace compiler
......
......@@ -111,15 +111,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
IA32OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallAddress:
if (HasImmediateInput(instr, 0)) {
// TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
__ call(reinterpret_cast<byte*>(i.InputInt32(0)),
RelocInfo::RUNTIME_ENTRY);
} else {
__ call(i.InputRegister(0));
}
break;
case kArchCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
......@@ -142,11 +133,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AddSafepointAndDeopt(instr);
break;
}
case kArchDrop: {
int words = MiscField::decode(instr->opcode());
__ add(esp, Immediate(kPointerSize * words));
break;
}
case kArchJmp:
__ jmp(code()->GetLabel(i.InputBlock(0)));
break;
......
......@@ -531,9 +531,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
opcode = kArchCallAddress;
break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
......@@ -553,13 +550,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
// Caller clean up of stack for C-style calls.
if (descriptor->kind() == CallDescriptor::kCallAddress &&
buffer.pushed_nodes.size() > 0) {
DCHECK(deoptimization == NULL && continuation == NULL);
Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
}
}
} // namespace compiler
......
......@@ -29,10 +29,8 @@ namespace compiler {
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define ARCH_OPCODE_LIST(V) \
V(ArchCallAddress) \
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchDrop) \
V(ArchJmp) \
V(ArchNop) \
V(ArchRet) \
......
......@@ -5,12 +5,6 @@
#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#ifdef USE_SIMULATOR
#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
#else
#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
#endif
#include "src/v8.h"
#include "src/compiler/common-operator.h"
......@@ -375,21 +369,6 @@ class RawMachineAssembler : public GraphBuilder {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
// Call to C.
Node* CallC(Node* function_address, MachineType return_type,
MachineType* arg_types, Node** args, int n_args) {
CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), machine_sig());
Node** passed_args = zone()->NewArray<Node*>(n_args + 1);
passed_args[0] = function_address;
for (int i = 0; i < n_args; ++i) {
passed_args[i + 1] = args[i];
}
return NewNode(common()->Call(descriptor), n_args + 1, passed_args);
}
#endif
// Parameters.
Node* Parameter(size_t index);
......
......@@ -216,15 +216,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AddSafepointAndDeopt(instr);
break;
}
case kArchCallAddress:
if (HasImmediateInput(instr, 0)) {
Immediate64 imm = i.InputImmediate64(0);
DCHECK_EQ(kImm64Value, imm.type);
__ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64);
} else {
__ call(i.InputRegister(0));
}
break;
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
......@@ -236,11 +227,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AddSafepointAndDeopt(instr);
break;
}
case kArchDrop: {
int words = MiscField::decode(instr->opcode());
__ addq(rsp, Immediate(kPointerSize * words));
break;
}
case kArchJmp:
__ jmp(code_->GetLabel(i.InputBlock(0)));
break;
......
......@@ -691,9 +691,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
opcode = kArchCallAddress;
break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
......@@ -713,15 +710,6 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
// Caller clean up of stack for C-style calls.
if (descriptor->kind() == CallDescriptor::kCallAddress &&
!buffer.pushed_nodes.empty()) {
DCHECK(deoptimization == NULL && continuation == NULL);
Emit(kArchDrop |
MiscField::encode(static_cast<int>(buffer.pushed_nodes.size())),
NULL);
}
}
} // namespace compiler
......
......@@ -80,11 +80,6 @@
##############################################################################
# TurboFan compiler failures.
# TODO(dcarney): C calls are broken all over the place.
'test-run-machops/RunCall*': [SKIP],
'test-run-machops/RunLoadImmIndex': [SKIP],
'test-run-machops/RunSpillLotsOfThingsWithCall': [SKIP],
# TODO(sigurds): The schedule is borked with multiple inlinees,
# and cannot handle free-floating loops yet
'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
......
......@@ -3567,82 +3567,6 @@ TEST(RunAddTree) {
}
#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
static int Seven() { return 7; }
static int UnaryMinus(int a) { return -a; }
static int APlusTwoB(int a, int b) { return a + 2 * b; }
TEST(RunCallSeven) {
for (int i = 0; i < 2; i++) {
bool call_direct = i == 0;
void* function_address =
reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
RawMachineAssemblerTester<int32_t> m;
Node** args = NULL;
MachineType* arg_types = NULL;
Node* function = call_direct
? m.PointerConstant(function_address)
: m.LoadFromPointer(&function_address, kMachPtr);
m.Return(m.CallC(function, kMachInt32, arg_types, args, 0));
CHECK_EQ(7, m.Call());
}
}
TEST(RunCallUnaryMinus) {
for (int i = 0; i < 2; i++) {
bool call_direct = i == 0;
void* function_address =
reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&UnaryMinus));
RawMachineAssemblerTester<int32_t> m(kMachInt32);
Node* args[] = {m.Parameter(0)};
MachineType arg_types[] = {kMachInt32};
Node* function = call_direct
? m.PointerConstant(function_address)
: m.LoadFromPointer(&function_address, kMachPtr);
m.Return(m.CallC(function, kMachInt32, arg_types, args, 1));
FOR_INT32_INPUTS(i) {
int a = *i;
CHECK_EQ(-a, m.Call(a));
}
}
}
TEST(RunCallAPlusTwoB) {
for (int i = 0; i < 2; i++) {
bool call_direct = i == 0;
void* function_address =
reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&APlusTwoB));
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* args[] = {m.Parameter(0), m.Parameter(1)};
MachineType arg_types[] = {kMachInt32, kMachInt32};
Node* function = call_direct
? m.PointerConstant(function_address)
: m.LoadFromPointer(&function_address, kMachPtr);
m.Return(m.CallC(function, kMachInt32, arg_types, args, 2));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int a = *i;
int b = *j;
int result = m.Call(a, b);
CHECK_EQ(a + 2 * b, result);
}
}
}
}
#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
static const int kFloat64CompareHelperTestCases = 15;
static const int kFloat64CompareHelperNodeType = 4;
......@@ -4030,39 +3954,6 @@ TEST(RunNewSpaceConstantsInPhi) {
}
#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
TEST(RunSpillLotsOfThingsWithCall) {
static const int kInputSize = 1000;
RawMachineAssemblerTester<void> m;
Node* accs[kInputSize];
int32_t outputs[kInputSize];
Node* one = m.Int32Constant(1);
Node* acc = one;
for (int i = 0; i < kInputSize; i++) {
acc = m.Int32Add(acc, one);
accs[i] = acc;
}
// If the spill slot computation is wrong, it might load from the c frame
{
void* func = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
Node** args = NULL;
MachineType* arg_types = NULL;
m.CallC(m.PointerConstant(func), kMachInt32, arg_types, args, 0);
}
for (int i = 0; i < kInputSize; i++) {
m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
}
m.Return(one);
m.Call();
for (int i = 0; i < kInputSize; i++) {
CHECK_EQ(outputs[i], i + 2);
}
}
#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
TEST(RunInt32AddWithOverflowP) {
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment