Commit ebb0f9e5 authored by chunyang.dai's avatar chunyang.dai Committed by Commit bot

X87: enable the X87 turbofan support.

    This patch includes the following changes.
     1, Enable the turbofan backend support for X87 platform. It depends on previous CL: 3fdfebd2.
     2, Enable the test cases which are disabled because turbofan for X87 was not enabled.

BUG=v8:4135
LOG=N

Review URL: https://codereview.chromium.org/1179763004

Cr-Commit-Position: refs/heads/master@{#29049}
parent 8e1c3a75
......@@ -21,6 +21,8 @@
#include "src/compiler/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC
#include "src/compiler/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_X87
#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
......
weiliang.lin@intel.com
chunyang.dai@intel.com
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/scopes.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/macro-assembler-x87.h"
namespace v8 {
namespace internal {
namespace compiler {
#define __ masm()->
// Adds X87 specific methods for decoding operands.
class X87OperandConverter : public InstructionOperandConverter {
public:
X87OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
Operand InputOperand(size_t index, int extra = 0) {
return ToOperand(instr_->InputAt(index), extra);
}
Immediate InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
Operand OutputOperand() { return ToOperand(instr_->Output()); }
Operand ToOperand(InstructionOperand* op, int extra = 0) {
if (op->IsRegister()) {
DCHECK(extra == 0);
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
DCHECK(extra == 0);
UNIMPLEMENTED();
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
// The linkage computes where all spill slots are located.
FrameOffset offset = linkage()->GetFrameOffset(
AllocatedOperand::cast(op)->index(), frame(), extra);
return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
}
Operand HighOperand(InstructionOperand* op) {
DCHECK(op->IsDoubleStackSlot());
return ToOperand(op, kPointerSize);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
case Constant::kFloat32:
return Immediate(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Immediate(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
case Constant::kExternalReference:
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
return Immediate(constant.ToHeapObject());
case Constant::kInt64:
break;
case Constant::kRpoNumber:
return Immediate::CodeRelativeOffset(ToLabel(operand));
}
UNREACHABLE();
return Immediate(-1);
}
static size_t NextOffset(size_t* offset) {
size_t i = *offset;
(*offset)++;
return i;
}
static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
STATIC_ASSERT(0 == static_cast<int>(times_1));
STATIC_ASSERT(1 == static_cast<int>(times_2));
STATIC_ASSERT(2 == static_cast<int>(times_4));
STATIC_ASSERT(3 == static_cast<int>(times_8));
int scale = static_cast<int>(mode - one);
DCHECK(scale >= 0 && scale < 4);
return static_cast<ScaleFactor>(scale);
}
Operand MemoryOperand(size_t* offset) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
switch (mode) {
case kMode_MR: {
Register base = InputRegister(NextOffset(offset));
int32_t disp = 0;
return Operand(base, disp);
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
int32_t disp = InputInt32(NextOffset(offset));
return Operand(base, disp);
}
case kMode_MR1:
case kMode_MR2:
case kMode_MR4:
case kMode_MR8: {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1, mode);
int32_t disp = 0;
return Operand(base, index, scale, disp);
}
case kMode_MR1I:
case kMode_MR2I:
case kMode_MR4I:
case kMode_MR8I: {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
int32_t disp = InputInt32(NextOffset(offset));
return Operand(base, index, scale, disp);
}
case kMode_M1:
case kMode_M2:
case kMode_M4:
case kMode_M8: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1, mode);
int32_t disp = 0;
return Operand(index, scale, disp);
}
case kMode_M1I:
case kMode_M2I:
case kMode_M4I:
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
int32_t disp = InputInt32(NextOffset(offset));
return Operand(index, scale, disp);
}
case kMode_MI: {
int32_t disp = InputInt32(NextOffset(offset));
return Operand(Immediate(disp));
}
case kMode_None:
UNREACHABLE();
return Operand(no_reg, 0);
}
UNREACHABLE();
return Operand(no_reg, 0);
}
Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
};
namespace {
bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final { __ xor_(result_, result_); }
private:
Register const result_;
};
class OutOfLineLoadFloat final : public OutOfLineCode {
public:
OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
DCHECK(result_.code() == 0);
USE(result_);
__ fstp(0);
__ push(Immediate(0xffffffff));
__ push(Immediate(0x7fffffff));
__ fld_d(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
}
private:
X87Register const result_;
};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
X87Register input)
: OutOfLineCode(gen), result_(result), input_(input) {}
void Generate() final {
UNIMPLEMENTED();
USE(result_);
USE(input_);
}
private:
Register const result_;
X87Register const input_;
};
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
do { \
auto result = i.OutputDoubleRegister(); \
auto offset = i.InputRegister(0); \
DCHECK(result.code() == 0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
__ j(above_equal, ool->entry()); \
__ fstp(0); \
__ asm_instr(i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
Label done; \
DCHECK(i.InputDoubleRegister(2).code() == 0); \
__ j(above_equal, &done, Label::kNear); \
__ asm_instr(i.MemoryOperand(3)); \
__ bind(&done); \
} while (false)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
Label done; \
__ j(above_equal, &done, Label::kNear); \
if (instr->InputAt(2)->IsRegister()) { \
__ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
} else { \
__ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
} \
__ bind(&done); \
} while (false)
void CodeGenerator::AssembleDeconstructActivationRecord() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ mov(esp, ebp);
__ pop(ebp);
int32_t bytes_to_pop =
descriptor->IsJSFunctionCall()
? static_cast<int32_t>(descriptor->JSParameterCount() *
kPointerSize)
: 0;
__ pop(Operand(esp, bytes_to_pop));
__ add(esp, Immediate(bytes_to_pop));
}
}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X87OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
__ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
}
RecordCallPosition(instr);
bool double_result =
instr->HasOutput() && instr->Output()->IsDoubleRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
}
__ fninit();
if (double_result) {
__ fld_d(Operand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
} else {
__ fld1();
}
break;
}
case kArchTailCallCodeObject: {
AssembleDeconstructActivationRecord();
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
__ jmp(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
}
break;
}
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
bool double_result =
instr->HasOutput() && instr->Output()->IsDoubleRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
}
__ fninit();
if (double_result) {
__ fld_d(Operand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
} else {
__ fld1();
}
break;
}
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
AssembleDeconstructActivationRecord();
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
break;
}
case kArchRet:
AssembleReturn();
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), ebp);
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), esp);
break;
case kArchTruncateDoubleToI: {
auto input = i.InputDoubleRegister(0);
USE(input);
DCHECK(input.code() == 0);
auto result_reg = i.OutputRegister();
__ TruncateX87TOSToI(result_reg);
break;
}
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
} else {
__ add(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87And:
if (HasImmediateInput(instr, 1)) {
__ and_(i.InputOperand(0), i.InputImmediate(1));
} else {
__ and_(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87Cmp:
if (HasImmediateInput(instr, 1)) {
__ cmp(i.InputOperand(0), i.InputImmediate(1));
} else {
__ cmp(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87Test:
if (HasImmediateInput(instr, 1)) {
__ test(i.InputOperand(0), i.InputImmediate(1));
} else {
__ test(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87Imul:
if (HasImmediateInput(instr, 1)) {
__ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
} else {
__ imul(i.OutputRegister(), i.InputOperand(1));
}
break;
case kX87ImulHigh:
__ imul(i.InputRegister(1));
break;
case kX87UmulHigh:
__ mul(i.InputRegister(1));
break;
case kX87Idiv:
__ cdq();
__ idiv(i.InputOperand(1));
break;
case kX87Udiv:
__ Move(edx, Immediate(0));
__ div(i.InputOperand(1));
break;
case kX87Not:
__ not_(i.OutputOperand());
break;
case kX87Neg:
__ neg(i.OutputOperand());
break;
case kX87Or:
if (HasImmediateInput(instr, 1)) {
__ or_(i.InputOperand(0), i.InputImmediate(1));
} else {
__ or_(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87Xor:
if (HasImmediateInput(instr, 1)) {
__ xor_(i.InputOperand(0), i.InputImmediate(1));
} else {
__ xor_(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87Sub:
if (HasImmediateInput(instr, 1)) {
__ sub(i.InputOperand(0), i.InputImmediate(1));
} else {
__ sub(i.InputRegister(0), i.InputOperand(1));
}
break;
case kX87Shl:
if (HasImmediateInput(instr, 1)) {
__ shl(i.OutputOperand(), i.InputInt5(1));
} else {
__ shl_cl(i.OutputOperand());
}
break;
case kX87Shr:
if (HasImmediateInput(instr, 1)) {
__ shr(i.OutputOperand(), i.InputInt5(1));
} else {
__ shr_cl(i.OutputOperand());
}
break;
case kX87Sar:
if (HasImmediateInput(instr, 1)) {
__ sar(i.OutputOperand(), i.InputInt5(1));
} else {
__ sar_cl(i.OutputOperand());
}
break;
case kX87Ror:
if (HasImmediateInput(instr, 1)) {
__ ror(i.OutputOperand(), i.InputInt5(1));
} else {
__ ror_cl(i.OutputOperand());
}
break;
case kX87Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
case kX87LoadFloat64Constant: {
InstructionOperand* source = instr->InputAt(0);
InstructionOperand* destination = instr->Output();
DCHECK(source->IsConstant());
X87OperandConverter g(this, NULL);
Constant src_constant = g.ToConstant(source);
DCHECK_EQ(Constant::kFloat64, src_constant.type());
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
UNREACHABLE();
}
break;
}
case kX87Float32Cmp: {
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ FCmp();
__ lea(esp, Operand(esp, 2 * kFloatSize));
break;
}
case kX87Float32Add: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, kFloatSize));
__ faddp();
// Clear stack.
__ lea(esp, Operand(esp, 2 * kFloatSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float32Sub: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fsubp();
// Clear stack.
__ lea(esp, Operand(esp, 2 * kFloatSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float32Mul: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fmulp();
// Clear stack.
__ lea(esp, Operand(esp, 2 * kFloatSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float32Div: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fdivp();
// Clear stack.
__ lea(esp, Operand(esp, 2 * kFloatSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float32Max: {
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = below;
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fld(1);
__ fld(1);
__ FCmp();
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
__ fld(0);
__ fldz();
__ FCmp();
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
__ fadd(1);
__ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left);
__ fld(0);
__ fld(0);
__ FCmp(); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ fxch();
__ bind(&return_left);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kFloatSize));
break;
}
case kX87Float32Min: {
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = above;
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fld(1);
__ fld(1);
__ FCmp();
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
__ fld(0);
__ fldz();
__ FCmp();
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
// Push st0 and st1 to stack, then pop them to temp registers and OR them,
// load it to left.
__ push(eax);
__ fld(1);
__ fld(1);
__ sub(esp, Immediate(2 * kPointerSize));
__ fstp_s(MemOperand(esp, 0));
__ fstp_s(MemOperand(esp, kPointerSize));
__ pop(eax);
__ xor_(MemOperand(esp, 0), eax);
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ pop(eax); // restore esp
__ pop(eax); // restore esp
__ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left);
__ fld(0);
__ fld(0);
__ FCmp(); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ fxch();
__ bind(&return_left);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kFloatSize));
break;
}
case kX87Float32Sqrt: {
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ fsqrt();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
case kX87Float32Abs: {
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ fabs();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
case kX87Float64Add: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ fld_d(MemOperand(esp, kDoubleSize));
__ faddp();
// Clear stack.
__ lea(esp, Operand(esp, 2 * kDoubleSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float64Sub: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fsub_d(MemOperand(esp, 0));
// Clear stack.
__ lea(esp, Operand(esp, 2 * kDoubleSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float64Mul: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fmul_d(MemOperand(esp, 0));
// Clear stack.
__ lea(esp, Operand(esp, 2 * kDoubleSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float64Div: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fdiv_d(MemOperand(esp, 0));
// Clear stack.
__ lea(esp, Operand(esp, 2 * kDoubleSize));
// Restore the default value of control word.
__ X87SetFPUCW(0x037F);
break;
}
case kX87Float64Mod: {
FrameScope frame_scope(&masm_, StackFrame::MANUAL);
__ mov(eax, esp);
__ PrepareCallCFunction(4, eax);
__ fstp(0);
__ fld_d(MemOperand(eax, 0));
__ fstp_d(Operand(esp, 1 * kDoubleSize));
__ fld_d(MemOperand(eax, kDoubleSize));
__ fstp_d(Operand(esp, 0));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
4);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
case kX87Float64Max: {
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = below;
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fld_d(MemOperand(esp, 0));
__ fld(1);
__ fld(1);
__ FCmp();
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
__ fld(0);
__ fldz();
__ FCmp();
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
__ fadd(1);
__ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left);
__ fld(0);
__ fld(0);
__ FCmp(); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ fxch();
__ bind(&return_left);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
case kX87Float64Min: {
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = above;
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fld_d(MemOperand(esp, 0));
__ fld(1);
__ fld(1);
__ FCmp();
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
__ fld(0);
__ fldz();
__ FCmp();
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
// Push st0 and st1 to stack, then pop them to temp registers and OR them,
// load it to left.
__ push(eax);
__ fld(1);
__ fld(1);
__ sub(esp, Immediate(2 * kPointerSize));
__ fstp_s(MemOperand(esp, 0));
__ fstp_s(MemOperand(esp, kPointerSize));
__ pop(eax);
__ xor_(MemOperand(esp, 0), eax);
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ pop(eax); // restore esp
__ pop(eax); // restore esp
__ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left);
__ fld(0);
__ fld(0);
__ FCmp(); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ fxch();
__ bind(&return_left);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
case kX87Float64Abs: {
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ fabs();
__ lea(esp, Operand(esp, kDoubleSize));
break;
}
case kX87Int32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
DCHECK(input->IsRegister() || input->IsStackSlot());
__ fstp(0);
if (input->IsRegister()) {
Register input_reg = i.InputRegister(0);
__ push(input_reg);
__ fild_s(Operand(esp, 0));
__ pop(input_reg);
} else {
__ fild_s(i.InputOperand(0));
}
break;
}
case kX87Float32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
if (input->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(MemOperand(esp, 0));
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(input->IsDoubleStackSlot());
__ fstp(0);
__ fld_s(i.InputOperand(0));
}
break;
}
case kX87Uint32ToFloat64: {
__ fstp(0);
__ LoadUint32NoSSE2(i.InputRegister(0));
break;
}
case kX87Float64ToInt32: {
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToFloat32: {
InstructionOperand* input = instr->InputAt(0);
if (input->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(input->IsDoubleStackSlot());
__ fstp(0);
__ fld_d(i.InputOperand(0));
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
break;
}
case kX87Float64ToUint32: {
__ push_imm32(-2147483648);
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fld_d(i.InputOperand(0));
}
__ fild_s(Operand(esp, 0));
__ fadd(1);
__ fstp(0);
__ TruncateX87TOSToI(i.OutputRegister(0));
__ add(esp, Immediate(kInt32Size));
__ add(i.OutputRegister(), Immediate(0x80000000));
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ExtractHighWord32: {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
__ add(esp, Immediate(kDoubleSize));
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
DCHECK(input->IsDoubleStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
}
break;
}
case kX87Float64ExtractLowWord32: {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
DCHECK(input->IsDoubleStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0));
}
break;
}
case kX87Float64InsertHighWord32: {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(MemOperand(esp, 0));
__ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
break;
}
case kX87Float64InsertLowWord32: {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(MemOperand(esp, 0));
__ mov(MemOperand(esp, 0), i.InputRegister(1));
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
break;
}
case kX87Float64Sqrt: {
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ fsqrt();
__ lea(esp, Operand(esp, kDoubleSize));
break;
}
case kX87Float64Round: {
RoundingMode mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
if (mode == MiscField::encode(kRoundDown)) {
__ X87SetRC(0x0400);
} else {
__ X87SetRC(0x0c00);
}
if (!instr->InputAt(0)->IsDoubleRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
DCHECK(input->IsDoubleStackSlot());
__ fstp(0);
__ fld_d(i.InputOperand(0));
}
__ frndint();
__ X87SetRC(0x0000);
break;
}
case kX87Float64Cmp: {
__ fld_d(MemOperand(esp, kDoubleSize));
__ fld_d(MemOperand(esp, 0));
__ FCmp();
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
case kX87Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kX87Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kX87Movb: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_b(operand, i.InputInt8(index));
} else {
__ mov_b(operand, i.InputRegister(index));
}
break;
}
case kX87Movsxwl:
__ movsx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kX87Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kX87Movw: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_w(operand, i.InputInt16(index));
} else {
__ mov_w(operand, i.InputRegister(index));
}
break;
}
case kX87Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov(operand, i.InputImmediate(index));
} else {
__ mov(operand, i.InputRegister(index));
}
}
break;
case kX87Movsd: {
if (instr->HasOutput()) {
X87Register output = i.OutputDoubleRegister();
USE(output);
DCHECK(output.code() == 0);
__ fstp(0);
__ fld_d(i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ fst_d(operand);
}
break;
}
case kX87Movss: {
if (instr->HasOutput()) {
X87Register output = i.OutputDoubleRegister();
USE(output);
DCHECK(output.code() == 0);
__ fstp(0);
__ fld_s(i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ fst_s(operand);
}
break;
}
case kX87Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
// and addressing mode just happens to work out. The "addl"/"subl" forms
// in these cases are faster based on measurements.
if (mode == kMode_MI) {
__ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
} else if (i.InputRegister(0).is(i.OutputRegister())) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
__ add(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
__ sub(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shl(i.OutputRegister(), 1);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
} else if (mode == kMode_M4) {
__ shl(i.OutputRegister(), 2);
} else if (mode == kMode_M8) {
__ shl(i.OutputRegister(), 3);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
break;
}
case kX87Push:
if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
} else {
__ push(i.InputOperand(0));
}
break;
case kX87PushFloat32:
__ lea(esp, Operand(esp, -kFloatSize));
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
} else if (instr->InputAt(0)->IsDoubleRegister()) {
__ fst_s(MemOperand(esp, 0));
} else {
UNREACHABLE();
}
break;
case kX87PushFloat64:
__ lea(esp, Operand(esp, -kDoubleSize));
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
} else if (instr->InputAt(0)->IsDoubleRegister()) {
__ fst_d(MemOperand(esp, 0));
} else {
UNREACHABLE();
}
break;
case kX87StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
SaveFPRegsMode mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (HasImmediateInput(instr, 1)) {
int index = i.InputInt32(1);
Register scratch = i.TempRegister(1);
__ mov(Operand(object, index), value);
__ RecordWriteContextSlot(object, index, value, scratch, mode);
} else {
Register index = i.InputRegister(1);
__ mov(Operand(object, index, times_1, 0), value);
__ lea(index, Operand(object, index, times_1, 0));
__ RecordWrite(object, index, value, mode);
}
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(mov);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
break;
case kX87StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
}
} // NOLINT(readability/fn_size)
// Assembles a branch after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
X87OperandConverter i(this, instr);
Label::Distance flabel_distance =
branch->fallthru ? Label::kNear : Label::kFar;
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
switch (branch->condition) {
case kUnorderedEqual:
__ j(parity_even, flabel, flabel_distance);
// Fall through.
case kEqual:
__ j(equal, tlabel);
break;
case kUnorderedNotEqual:
__ j(parity_even, tlabel);
// Fall through.
case kNotEqual:
__ j(not_equal, tlabel);
break;
case kSignedLessThan:
__ j(less, tlabel);
break;
case kSignedGreaterThanOrEqual:
__ j(greater_equal, tlabel);
break;
case kSignedLessThanOrEqual:
__ j(less_equal, tlabel);
break;
case kSignedGreaterThan:
__ j(greater, tlabel);
break;
case kUnsignedLessThan:
__ j(below, tlabel);
break;
case kUnsignedGreaterThanOrEqual:
__ j(above_equal, tlabel);
break;
case kUnsignedLessThanOrEqual:
__ j(below_equal, tlabel);
break;
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
case kOverflow:
__ j(overflow, tlabel);
break;
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
}
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
X87OperandConverter i(this, instr);
Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
__ j(parity_odd, &check, Label::kNear);
__ Move(reg, Immediate(0));
__ jmp(&done, Label::kNear);
// Fall through.
case kEqual:
cc = equal;
break;
case kUnorderedNotEqual:
__ j(parity_odd, &check, Label::kNear);
__ mov(reg, Immediate(1));
__ jmp(&done, Label::kNear);
// Fall through.
case kNotEqual:
cc = not_equal;
break;
case kSignedLessThan:
cc = less;
break;
case kSignedGreaterThanOrEqual:
cc = greater_equal;
break;
case kSignedLessThanOrEqual:
cc = less_equal;
break;
case kSignedGreaterThan:
cc = greater;
break;
case kUnsignedLessThan:
cc = below;
break;
case kUnsignedGreaterThanOrEqual:
cc = above_equal;
break;
case kUnsignedLessThanOrEqual:
cc = below_equal;
break;
case kUnsignedGreaterThan:
cc = above;
break;
case kOverflow:
cc = overflow;
break;
case kNotOverflow:
cc = no_overflow;
break;
}
__ bind(&check);
if (reg.is_byte_register()) {
// setcc for byte registers (al, bl, cl, dl).
__ setcc(cc, reg);
__ movzx_b(reg, reg);
} else {
// Emit a branch to set a register to either 1 or 0.
Label set;
__ j(cc, &set, Label::kNear);
__ Move(reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&set);
__ mov(reg, Immediate(1));
}
__ bind(&done);
}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
X87OperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ cmp(input, Immediate(i.InputInt32(index + 0)));
__ j(equal, GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
}
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
X87OperandConverter i(this, instr);
Register input = i.InputRegister(0);
size_t const case_count = instr->InputCount() - 2;
Label** cases = zone()->NewArray<Label*>(case_count);
for (size_t index = 0; index < case_count; ++index) {
cases[index] = GetLabel(i.InputRpo(index + 2));
}
Label* const table = AddJumpTable(cases, case_count);
__ cmp(input, Immediate(case_count));
__ j(above_equal, GetLabel(i.InputRpo(1)));
__ jmp(Operand::JumpTable(input, times_4, table));
}
void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
// The calling convention for JSFunctions on X87 passes arguments on the
// stack and the JSFunction and context in EDI and ESI, respectively, thus
// the steps of the call look as follows:
// --{ before the call instruction }--------------------------------------------
// | caller frame |
// ^ esp ^ ebp
// --{ push arguments and setup ESI, EDI }--------------------------------------
// | args + receiver | caller frame |
// ^ esp ^ ebp
// [edi = JSFunction, esi = context]
// --{ call [edi + kCodeEntryOffset] }------------------------------------------
// | RET | args + receiver | caller frame |
// ^ esp ^ ebp
// =={ prologue of called function }============================================
// --{ push ebp }---------------------------------------------------------------
// | FP | RET | args + receiver | caller frame |
// ^ esp ^ ebp
// --{ mov ebp, esp }-----------------------------------------------------------
// | FP | RET | args + receiver | caller frame |
// ^ ebp,esp
// --{ push esi }---------------------------------------------------------------
// | CTX | FP | RET | args + receiver | caller frame |
// ^esp ^ ebp
// --{ push edi }---------------------------------------------------------------
// | FNC | CTX | FP | RET | args + receiver | caller frame |
// ^esp ^ ebp
// --{ subi esp, #N }-----------------------------------------------------------
// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
// ^esp ^ ebp
// =={ body of called function }================================================
// =={ epilogue of called function }============================================
// --{ mov esp, ebp }-----------------------------------------------------------
// | FP | RET | args + receiver | caller frame |
// ^ esp,ebp
// --{ pop ebp }-----------------------------------------------------------
// | | RET | args + receiver | caller frame |
// ^ esp ^ ebp
// --{ ret #A+1 }-----------------------------------------------------------
// | | caller frame |
// ^ esp ^ ebp
// Runtime function calls are accomplished by doing a stub call to the
// CEntryStub (a real code object). On X87 passes arguments on the
// stack, the number of arguments in EAX, the address of the runtime function
// in EBX, and the context in ESI.
// --{ before the call instruction }--------------------------------------------
// | caller frame |
// ^ esp ^ ebp
// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
// | args + receiver | caller frame |
// ^ esp ^ ebp
// [eax = #args, ebx = runtime function, esi = context]
// --{ call #CEntryStub }-------------------------------------------------------
// | RET | args + receiver | caller frame |
// ^ esp ^ ebp
// =={ body of runtime function }===============================================
// --{ runtime returns }--------------------------------------------------------
// | caller frame |
// ^ esp ^ ebp
// Other custom linkages (e.g. for calling directly into and out of C++) may
// need to save callee-saved registers on the stack, which is done in the
// function prologue of generated code.
// --{ before the call instruction }--------------------------------------------
// | caller frame |
// ^ esp ^ ebp
// --{ set up arguments in registers on stack }---------------------------------
// | args | caller frame |
// ^ esp ^ ebp
// [r0 = arg0, r1 = arg1, ...]
// --{ call code }--------------------------------------------------------------
// | RET | args | caller frame |
// ^ esp ^ ebp
// =={ prologue of called function }============================================
// --{ push ebp }---------------------------------------------------------------
// | FP | RET | args | caller frame |
// ^ esp ^ ebp
// --{ mov ebp, esp }-----------------------------------------------------------
// | FP | RET | args | caller frame |
// ^ ebp,esp
// --{ save registers }---------------------------------------------------------
// | regs | FP | RET | args | caller frame |
// ^ esp ^ ebp
// --{ subi esp, #N }-----------------------------------------------------------
// | callee frame | regs | FP | RET | args | caller frame |
// ^esp ^ ebp
// =={ body of called function }================================================
// =={ epilogue of called function }============================================
// --{ restore registers }------------------------------------------------------
// | regs | FP | RET | args | caller frame |
// ^ esp ^ ebp
// --{ mov esp, ebp }-----------------------------------------------------------
// | FP | RET | args | caller frame |
// ^ esp,ebp
// --{ pop ebp }----------------------------------------------------------------
// | RET | args | caller frame |
// ^ esp ^ ebp
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
int register_save_area_size = 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
__ push(Register::from_code(i));
register_save_area_size += kPointerSize;
}
frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
// TODO(turbofan): this prologue is redundant with OSR, but needed for
// code aging.
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
if (stack_slots > 0) {
// Allocate the stack slots used by this frame.
__ sub(esp, Immediate(stack_slots * kPointerSize));
}
// Initailize FPU state.
__ fninit();
__ fld1();
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
const RegList saves = descriptor->CalleeSavedRegisters();
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
if (stack_slots > 0) {
__ add(esp, Immediate(stack_slots * kPointerSize));
}
// Restore registers.
if (saves != 0) {
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ pop(Register::from_code(i));
}
}
__ pop(ebp); // Pop caller's frame pointer.
__ ret(0);
} else {
// No saved registers.
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
__ ret(0);
}
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ Ret(pop_count * kPointerSize, ebx);
} else {
__ ret(0);
}
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
X87OperandConverter g(this, NULL);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
Operand dst = g.ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ mov(dst, src);
} else {
Operand dst = g.ToOperand(destination);
__ push(src);
__ pop(dst);
}
} else if (source->IsConstant()) {
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
int offset;
if (IsMaterializableFromFrame(src, &offset)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ mov(dst, Operand(ebp, offset));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
__ push(Operand(ebp, offset));
__ pop(dst);
}
} else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ LoadHeapObject(dst, src);
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
AllowDeferredHandleDereference embedding_raw_address;
if (isolate()->heap()->InNewSpace(*src)) {
__ PushHeapObject(src);
__ pop(dst);
} else {
__ mov(dst, src);
}
}
} else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(dst, g.ToImmediate(source));
} else if (destination->IsStackSlot()) {
Operand dst = g.ToOperand(destination);
__ Move(dst, g.ToImmediate(source));
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
if (destination->IsDoubleRegister()) {
__ sub(esp, Immediate(kInt32Size));
__ mov(MemOperand(esp, 0), Immediate(src));
// always only push one value into the x87 stack.
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kInt32Size));
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
// always only push one value into the x87 stack.
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
} else if (source->IsDoubleRegister()) {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.machine_type()) {
case kRepFloat32:
__ fst_s(dst);
break;
case kRepFloat64:
__ fst_d(dst);
break;
default:
UNREACHABLE();
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
Operand src = g.ToOperand(source);
auto allocated = AllocatedOperand::cast(*source);
if (destination->IsDoubleRegister()) {
// always only push one value into the x87 stack.
__ fstp(0);
switch (allocated.machine_type()) {
case kRepFloat32:
__ fld_s(src);
break;
case kRepFloat64:
__ fld_d(src);
break;
default:
UNREACHABLE();
}
} else {
Operand dst = g.ToOperand(destination);
switch (allocated.machine_type()) {
case kRepFloat32:
__ fld_s(src);
__ fstp_s(dst);
break;
case kRepFloat64:
__ fld_d(src);
__ fstp_d(dst);
break;
default:
UNREACHABLE();
}
}
} else {
UNREACHABLE();
}
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
X87OperandConverter g(this, NULL);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
__ xchg(dst, src);
} else if (source->IsRegister() && destination->IsStackSlot()) {
// Register-memory.
__ xchg(g.ToRegister(source), g.ToOperand(destination));
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory.
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
__ push(dst);
__ push(src);
__ pop(dst);
__ pop(src);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
UNREACHABLE();
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.machine_type()) {
case kRepFloat32:
__ fld_s(g.ToOperand(destination));
__ fxch();
__ fstp_s(g.ToOperand(destination));
break;
case kRepFloat64:
__ fld_d(g.ToOperand(destination));
__ fxch();
__ fstp_d(g.ToOperand(destination));
break;
default:
UNREACHABLE();
}
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.machine_type()) {
case kRepFloat32:
__ fld_s(g.ToOperand(source));
__ fld_s(g.ToOperand(destination));
__ fstp_s(g.ToOperand(source));
__ fstp_s(g.ToOperand(destination));
break;
case kRepFloat64:
__ fld_d(g.ToOperand(source));
__ fld_d(g.ToOperand(destination));
__ fstp_d(g.ToOperand(source));
__ fstp_d(g.ToOperand(destination));
break;
default:
UNREACHABLE();
}
} else {
// No other combinations are possible.
UNREACHABLE();
}
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ dd(targets[index]);
}
}
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
}
}
MarkLazyDeoptSite();
}
#undef __
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-codes.h"
namespace v8 {
namespace internal {
namespace compiler {
// X87-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(X87Add) \
V(X87And) \
V(X87Cmp) \
V(X87Test) \
V(X87Or) \
V(X87Xor) \
V(X87Sub) \
V(X87Imul) \
V(X87ImulHigh) \
V(X87UmulHigh) \
V(X87Idiv) \
V(X87Udiv) \
V(X87Not) \
V(X87Neg) \
V(X87Shl) \
V(X87Shr) \
V(X87Sar) \
V(X87Ror) \
V(X87Lzcnt) \
V(X87Float32Cmp) \
V(X87Float32Add) \
V(X87Float32Sub) \
V(X87Float32Mul) \
V(X87Float32Div) \
V(X87Float32Max) \
V(X87Float32Min) \
V(X87Float32Abs) \
V(X87Float32Sqrt) \
V(X87LoadFloat64Constant) \
V(X87Float64Add) \
V(X87Float64Sub) \
V(X87Float64Mul) \
V(X87Float64Div) \
V(X87Float64Mod) \
V(X87Float64Max) \
V(X87Float64Min) \
V(X87Float64Abs) \
V(X87Int32ToFloat64) \
V(X87Float32ToFloat64) \
V(X87Uint32ToFloat64) \
V(X87Float64ToInt32) \
V(X87Float64ToFloat32) \
V(X87Float64ToUint32) \
V(X87Float64ExtractHighWord32) \
V(X87Float64ExtractLowWord32) \
V(X87Float64InsertHighWord32) \
V(X87Float64InsertLowWord32) \
V(X87Float64Sqrt) \
V(X87Float64Round) \
V(X87Float64Cmp) \
V(X87Movsxbl) \
V(X87Movzxbl) \
V(X87Movb) \
V(X87Movsxwl) \
V(X87Movzxwl) \
V(X87Movw) \
V(X87Movl) \
V(X87Movss) \
V(X87Movsd) \
V(X87Lea) \
V(X87Push) \
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87StoreWriteBarrier) \
V(X87StackCheck)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// M = memory operand
// R = base register
// N = index register * N for N in {1, 2, 4, 8}
// I = immediate displacement (int32_t)
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MR) /* [%r1 ] */ \
V(MRI) /* [%r1 + K] */ \
V(MR1) /* [%r1 + %r2*1 ] */ \
V(MR2) /* [%r1 + %r2*2 ] */ \
V(MR4) /* [%r1 + %r2*4 ] */ \
V(MR8) /* [%r1 + %r2*8 ] */ \
V(MR1I) /* [%r1 + %r2*1 + K] */ \
V(MR2I) /* [%r1 + %r2*2 + K] */ \
V(MR4I) /* [%r1 + %r2*3 + K] */ \
V(MR8I) /* [%r1 + %r2*4 + K] */ \
V(M1) /* [ %r2*1 ] */ \
V(M2) /* [ %r2*2 ] */ \
V(M4) /* [ %r2*4 ] */ \
V(M8) /* [ %r2*8 ] */ \
V(M1I) /* [ %r2*1 + K] */ \
V(M2I) /* [ %r2*2 + K] */ \
V(M4I) /* [ %r2*4 + K] */ \
V(M8I) /* [ %r2*8 + K] */ \
V(MI) /* [ K] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
// Adds X87-specific methods for generating operands.
class X87OperandGenerator final : public OperandGenerator {
public:
explicit X87OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
InstructionOperand UseByteRegister(Node* node) {
// TODO(titzer): encode byte register use constraints.
return UseFixed(node, edx);
}
InstructionOperand DefineAsByteRegister(Node* node) {
// TODO(titzer): encode byte register def constraints.
return DefineAsRegister(node);
}
InstructionOperand CreateImmediate(int imm) {
return sequence()->AddImmediate(Constant(imm));
}
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
return true;
case IrOpcode::kHeapConstant: {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
Isolate* isolate = value.handle()->GetIsolate();
return !isolate->heap()->InNewSpace(*value.handle());
}
default:
return false;
}
}
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == NULL)
? 0
: OpParameter<int32_t>(displacement_node);
if (base != NULL) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
base = NULL;
}
}
if (base != NULL) {
inputs[(*input_count)++] = UseRegister(base);
if (index != NULL) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
mode = kMRnI_modes[scale];
} else {
static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
kMode_MR4, kMode_MR8};
mode = kMRn_modes[scale];
}
} else {
if (displacement == 0) {
mode = kMode_MR;
} else {
inputs[(*input_count)++] = TempImmediate(displacement);
mode = kMode_MRI;
}
}
} else {
DCHECK(scale >= 0 && scale <= 3);
if (index != NULL) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale];
} else {
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
kMode_M4, kMode_M8};
mode = kMn_modes[scale];
}
} else {
inputs[(*input_count)++] = TempImmediate(displacement);
return kMode_MI;
}
}
return mode;
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(node->InputAt(0));
inputs[(*input_count)++] = UseRegister(node->InputAt(1));
return kMode_MR1;
}
}
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
};
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
ArchOpcode opcode;
switch (rep) {
case kRepFloat32:
opcode = kX87Movss;
break;
case kRepFloat64:
opcode = kX87Movsd;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = typ == kTypeInt32 ? kX87Movsxbl : kX87Movzxbl;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kX87Movsxwl : kX87Movzxwl;
break;
case kRepTagged: // Fall through.
case kRepWord32:
opcode = kX87Movl;
break;
default:
UNREACHABLE();
return;
}
X87OperandGenerator g(this);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitStore(Node* node) {
X87OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
MachineType rep = RepresentationOf(store_rep.machine_type());
if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
DCHECK_EQ(kRepTagged, rep);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
if (g.CanBeImmediate(index)) {
InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister()};
Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
g.UseImmediate(index), g.UseFixed(value, ecx), arraysize(temps),
temps);
} else {
InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
temps);
}
return;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
ArchOpcode opcode;
switch (rep) {
case kRepFloat32:
opcode = kX87Movss;
break;
case kRepFloat64:
opcode = kX87Movsd;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kX87Movb;
break;
case kRepWord16:
opcode = kX87Movw;
break;
case kRepTagged: // Fall through.
case kRepWord32:
opcode = kX87Movl;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
} else if (rep == kRepWord8 || rep == kRepBit) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
}
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
inputs[input_count++] = val;
Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
X87OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
offset_operand, g.UseImmediate(buffer));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MR1),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
}
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
X87OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand value_operand =
g.CanBeImmediate(value)
? g.UseImmediate(value)
: ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
: g.UseRegister(value));
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
offset_operand, length_operand, value_operand, offset_operand,
g.UseImmediate(buffer));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
offset_operand, length_operand, value_operand, g.UseRegister(buffer),
offset_operand);
}
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
X87OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
if (left == right) {
// If both inputs refer to the same operand, enforce allocating a register
// for both of them to ensure that we don't end up generating code like
// this:
//
// mov eax, [ebp-0x10]
// add eax, [ebp-0x10]
// jo label
InstructionOperand const input = g.UseRegister(left);
inputs[input_count++] = input;
inputs[input_count++] = input;
} else if (g.CanBeImmediate(right)) {
inputs[input_count++] = g.UseRegister(left);
inputs[input_count++] = g.UseImmediate(right);
} else {
if (node->op()->HasProperty(Operator::kCommutative) &&
g.CanBeBetterLeftOperand(right)) {
std::swap(left, right);
}
inputs[input_count++] = g.UseRegister(left);
inputs[input_count++] = g.Use(right);
}
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
outputs[output_count++] = g.DefineSameAsFirst(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
inputs);
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, &cont);
}
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kX87And);
}
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kX87Or);
}
void InstructionSelector::VisitWord32Xor(Node* node) {
X87OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
Emit(kX87Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
} else {
VisitBinop(this, node, kX87Xor);
}
}
// Shared routine for multiple shift operations.
static inline void VisitShift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X87OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(right));
} else {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseFixed(right, ecx));
}
}
namespace {
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X87OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUniqueRegister(node->InputAt(1)));
}
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X87OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempRegister(edx)};
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
}
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X87OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)));
}
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
int scale, Node* base, Node* displacement) {
X87OperandGenerator g(selector);
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(result);
InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
selector->Emit(opcode, 1, outputs, input_count, inputs);
}
} // namespace
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : NULL;
EmitLea(this, node, index, m.scale(), base, NULL);
return;
}
VisitShift(this, node, kX87Shl);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
VisitShift(this, node, kX87Shr);
}
void InstructionSelector::VisitWord32Sar(Node* node) {
VisitShift(this, node, kX87Sar);
}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kX87Ror);
}
void InstructionSelector::VisitWord32Clz(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitInt32Add(Node* node) {
X87OperandGenerator g(this);
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
(m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
Emit(opcode, 1, outputs, input_count, inputs);
return;
}
// No lea pattern match, use add
VisitBinop(this, node, kX87Add);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
X87OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().Is(0)) {
Emit(kX87Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
} else {
VisitBinop(this, node, kX87Sub);
}
}
void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : NULL;
EmitLea(this, node, index, m.scale(), base, NULL);
return;
}
X87OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeImmediate(right)) {
Emit(kX87Imul, g.DefineAsRegister(node), g.Use(left),
g.UseImmediate(right));
} else {
if (g.CanBeBetterLeftOperand(right)) {
std::swap(left, right);
}
Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
g.Use(right));
}
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitMulHigh(this, node, kX87ImulHigh);
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
VisitMulHigh(this, node, kX87UmulHigh);
}
void InstructionSelector::VisitInt32Div(Node* node) {
VisitDiv(this, node, kX87Idiv);
}
void InstructionSelector::VisitUint32Div(Node* node) {
VisitDiv(this, node, kX87Udiv);
}
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kX87Idiv);
}
void InstructionSelector::VisitUint32Mod(Node* node) {
VisitMod(this, node, kX87Udiv);
}
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float32ToFloat64, g.DefineAsFixed(node, stX_0),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Uint32ToFloat64, g.DefineAsFixed(node, stX_0),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64ToFloat32, g.DefineAsFixed(node, stX_0),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat32Div(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
X87OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(eax)};
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
}
void InstructionSelector::VisitFloat32Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat32Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
}
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64Round | MiscField::encode(kRoundDown),
g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
X87OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
Emit(kX87Push, g.NoOutput(), value);
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
if (hint == IfExceptionHint::kLocallyCaught) {
flags |= CallDescriptor::kHasLocalCatchHandler;
}
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
&buffer.instruction_args.front())->MarkAsCall();
}
void InstructionSelector::VisitTailCall(Node* node) {
X87OperandGenerator g(this);
CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
if (descriptor->UsesOnlyRegisters() &&
descriptor->HasSameReturnLocationsAs(
linkage()->GetIncomingDescriptor())) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
DCHECK_EQ(0u, buffer.pushed_nodes.size());
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObject;
break;
case CallDescriptor::kCallJSFunction:
opcode = kArchTailCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
opcode |= MiscField::encode(descriptor->flags());
// Emit the tailcall instruction.
Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
&buffer.instruction_args.front());
} else {
FrameStateDescriptor* frame_state_descriptor =
descriptor->NeedsFrameState()
? GetFrameStateDescriptor(
node->InputAt(static_cast<int>(descriptor->InputCount())))
: nullptr;
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
Emit(kX87Push, g.NoOutput(), value);
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchCallCodeObject;
break;
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
size_t output_count = buffer.outputs.size();
auto* outputs = &buffer.outputs.front();
Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
&buffer.instruction_args.front())->MarkAsCall();
Emit(kArchRet, 0, nullptr, output_count, outputs);
}
}
namespace {
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
X87OperandGenerator g(selector);
if (cont->IsBranch()) {
selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
left, right);
}
}
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
Node* left, Node* right, FlagsContinuation* cont,
bool commutative) {
X87OperandGenerator g(selector);
if (commutative && g.CanBeBetterLeftOperand(right)) {
std::swap(left, right);
}
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
// Shared routine for multiple float32 compare operations (inputs commuted).
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
X87OperandGenerator g(selector);
selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
if (cont->IsBranch()) {
selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float32Cmp),
g.DefineAsByteRegister(cont->result()));
}
}
// Shared routine for multiple float64 compare operations (inputs commuted).
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
X87OperandGenerator g(selector);
selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
if (cont->IsBranch()) {
selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float64Cmp),
g.DefineAsByteRegister(cont->result()));
}
}
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
X87OperandGenerator g(selector);
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right)) {
VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
} else if (g.CanBeImmediate(left)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
} else {
VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
}
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
X87OperandGenerator g(selector);
Int32BinopMatcher m(node);
if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
ExternalReference js_stack_limit =
ExternalReference::address_of_stack_limit(selector->isolate());
if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
// Compare(Load(js_stack_limit), LoadStackPointer)
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kX87StackCheck);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
}
return;
}
}
VisitWordCompare(selector, node, kX87Cmp, cont);
}
// Shared routine for word comparison with zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
// Try to combine the branch with a comparison.
while (selector->CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
// Try to combine with comparisons against 0 by simply inverting the
// continuation.
Int32BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
value = m.left().node();
cont->Negate();
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWordCompare(selector, value, cont);
}
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
if (result == NULL || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX87Add, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX87Sub, cont);
default:
break;
}
}
}
break;
case IrOpcode::kInt32Sub:
return VisitWordCompare(selector, value, cont);
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kX87Test, cont);
default:
break;
}
break;
}
// Continuation could not be combined with a compare, emit compare against 0.
X87OperandGenerator g(selector);
VisitCompare(selector, kX87Cmp, g.Use(value), g.TempImmediate(0), cont);
}
} // namespace
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X87OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kX87Lea | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
return EmitLookupSwitch(sw, value_operand);
}
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kX87Add, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kX87Add, &cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kX87Sub, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kX87Sub, &cont);
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont(kUnsignedGreaterThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64ExtractLowWord32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64ExtractHighWord32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
X87OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Emit(kX87Float64InsertLowWord32, g.UseFixed(node, stX_0), g.UseRegister(left),
g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
X87OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Emit(kX87Float64InsertHighWord32, g.UseFixed(node, stX_0),
g.UseRegister(left), g.UseRegister(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
return flags;
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
#include "src/compiler/linkage-impl.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
struct X87LinkageHelperTraits {
static Register ReturnValueReg() { return eax; }
static Register ReturnValue2Reg() { return edx; }
static Register JSCallFunctionReg() { return edi; }
static Register ContextReg() { return esi; }
static Register RuntimeCallFunctionReg() { return ebx; }
static Register RuntimeCallArgCountReg() { return eax; }
static RegList CCalleeSaveRegisters() {
return esi.bit() | edi.bit() | ebx.bit();
}
static Register CRegisterParameter(int i) { return no_reg; }
static int CRegisterParametersLength() { return 0; }
};
typedef LinkageHelper<X87LinkageHelperTraits> LH;
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
int parameter_count,
CallDescriptor::Flags flags) {
return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function, int parameter_count,
Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
stack_parameter_count, flags, properties,
return_type);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -32,7 +32,7 @@
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_X87
#define V8_TURBOFAN_BACKEND 1
#else
#define V8_TURBOFAN_BACKEND 0
......
......@@ -388,6 +388,14 @@ void Assembler::mov_b(Register dst, const Operand& src) {
}
void Assembler::mov_b(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
emit_operand(eax, dst);
EMIT(static_cast<int8_t>(src.x_));
}
void Assembler::mov_b(const Operand& dst, int8_t imm8) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
......@@ -430,6 +438,16 @@ void Assembler::mov_w(const Operand& dst, int16_t imm16) {
}
void Assembler::mov_w(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0xC7);
emit_operand(eax, dst);
EMIT(static_cast<int8_t>(src.x_ & 0xff));
EMIT(static_cast<int8_t>(src.x_ >> 8));
}
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
......@@ -1698,6 +1716,20 @@ void Assembler::fsub_i(int i) {
}
void Assembler::fsubr_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDC);
emit_operand(ebp, adr);
}
void Assembler::fsub_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDC);
emit_operand(esp, adr);
}
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
......@@ -1717,12 +1749,33 @@ void Assembler::fmul(int i) {
}
void Assembler::fmul_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDC);
emit_operand(ecx, adr);
}
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xF8, i);
}
void Assembler::fdiv_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDC);
emit_operand(esi, adr);
}
void Assembler::fdivr_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDC);
emit_operand(edi, adr);
}
void Assembler::fdiv_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xF0, i);
......
......@@ -273,6 +273,14 @@ inline Condition CommuteCondition(Condition cc) {
}
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
kRoundUp = 0x2,
kRoundToZero = 0x3
};
// -----------------------------------------------------------------------------
// Machine instruction Immediates
......@@ -620,11 +628,14 @@ class Assembler : public AssemblerBase {
void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, const Immediate& src);
void mov_b(const Operand& dst, Register src);
void mov_w(Register dst, const Operand& src);
void mov_w(const Operand& dst, Register src);
void mov_w(const Operand& dst, int16_t imm16);
void mov_w(const Operand& dst, const Immediate& src);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
......@@ -886,15 +897,21 @@ class Assembler : public AssemblerBase {
void fadd_d(const Operand& adr);
void fsub(int i);
void fsub_i(int i);
void fsub_d(const Operand& adr);
void fsubr_d(const Operand& adr);
void fmul(int i);
void fmul_d(const Operand& adr);
void fmul_i(int i);
void fdiv(int i);
void fdiv_d(const Operand& adr);
void fdivr_d(const Operand& adr);
void fdiv_i(int i);
void fisub_s(const Operand& adr);
void faddp(int i = 1);
void fsubp(int i = 1);
void fsubr(int i = 1);
void fsubrp(int i = 1);
void fmulp(int i = 1);
void fdivp(int i = 1);
......
......@@ -726,6 +726,21 @@ int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
case 0:
mnem = "fadd_d";
break;
case 1:
mnem = "fmul_d";
break;
case 4:
mnem = "fsub_d";
break;
case 5:
mnem = "fsubr_d";
break;
case 6:
mnem = "fdiv_d";
break;
case 7:
mnem = "fdivr_d";
break;
default:
UnimplementedInstruction();
}
......
......@@ -744,9 +744,11 @@ void MacroAssembler::X87SetRC(int rc) {
void MacroAssembler::X87SetFPUCW(int cw) {
RecordComment("-- X87SetFPUCW start --");
push(Immediate(cw));
fldcw(MemOperand(esp, 0));
add(esp, Immediate(kPointerSize));
RecordComment("-- X87SetFPUCW end--");
}
......
......@@ -301,12 +301,7 @@
##############################################################################
['arch == x87', {
# Test requires turbofan:
'codegen-tester/CompareWrapper': [SKIP],
'codegen-tester/ParametersEqual': [SKIP],
'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
'test-serialize/SerializeInternalReference': [FAIL],
'test-run-machops/RunFloat64InsertLowWord32': [SKIP]
}], # 'arch == x87'
##############################################################################
......
......@@ -30,12 +30,4 @@
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
}], # ALWAYS
##############################################################################
['arch == x87', {
# Crankshaft compiler did not generate required source position for it:
'overwritten-builtins': [SKIP],
'strong-object-set-proto': [SKIP],
}], # 'arch == x87'
]
......@@ -639,12 +639,6 @@
'harmony/symbols': [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
##############################################################################
['arch == x87', {
# Currently Turbofan is not supported by x87.
'compiler/opt-next-call-turbo': [SKIP],
}], # 'arch == x87'
##############################################################################
['deopt_fuzzer == True', {
......
......@@ -1110,6 +1110,10 @@
'../../src/x87/macro-assembler-x87.h',
'../../src/x87/regexp-macro-assembler-x87.cc',
'../../src/x87/regexp-macro-assembler-x87.h',
'../../src/compiler/x87/code-generator-x87.cc',
'../../src/compiler/x87/instruction-codes-x87.h',
'../../src/compiler/x87/instruction-selector-x87.cc',
'../../src/compiler/x87/linkage-x87.cc',
'../../src/ic/x87/access-compiler-x87.cc',
'../../src/ic/x87/handler-compiler-x87.cc',
'../../src/ic/x87/ic-x87.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment