Commit 07586353 authored by palfia@homejinni.com's avatar palfia@homejinni.com

MIPS: Convert UnaryOpStub to a HydrogenCodeStub.

Port r15506 (d0ea1f6)

BUG=

Review URL: https://codereview.chromium.org/18763003
Patch from Balazs Kilvady <kilvadyb@homejinni.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15524 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ba8119f9
......@@ -227,8 +227,20 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
void UnaryOpStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(UnaryOpIC_Miss);
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc);
......@@ -1586,294 +1598,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
stream->Add("UnaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
UnaryOpIC::GetName(operand_type_));
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::Generate(MacroAssembler* masm) {
switch (operand_type_) {
case UnaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case UnaryOpIC::NUMBER:
GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
break;
}
}
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Argument is in a0 and v0 at this point, so we can overwrite a0.
__ li(a2, Operand(Smi::FromInt(op_)));
__ li(a1, Operand(Smi::FromInt(mode_)));
__ li(a0, Operand(Smi::FromInt(operand_type_)));
__ Push(v0, a2, a1, a0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
GenerateSmiStubSub(masm);
break;
case Token::BIT_NOT:
GenerateSmiStubBitNot(masm);
break;
default:
UNREACHABLE();
}
}
void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
__ bind(&non_smi);
__ bind(&slow);
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
Label non_smi;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
Label* non_smi,
Label* slow) {
__ JumpIfNotSmi(a0, non_smi);
// The result of negating zero or the smallest negative smi is not a smi.
__ And(t0, a0, ~0x80000000);
__ Branch(slow, eq, t0, Operand(zero_reg));
// Return '0 - value'.
__ Ret(USE_DELAY_SLOT);
__ subu(v0, zero_reg, a0);
}
void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
Label* non_smi) {
__ JumpIfNotSmi(a0, non_smi);
// Flip bits and revert inverted smi-tag.
__ Neg(v0, a0);
__ And(v0, v0, ~kSmiTagMask);
__ Ret();
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
}
}
void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&slow);
GenerateTypeTransition(masm);
__ bind(&call_builtin);
GenerateGenericCodeFallback(masm);
}
void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
Label* slow) {
EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
// a0 is a heap number. Get a new heap number in a1.
if (mode_ == UNARY_OVERWRITE) {
__ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
__ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ Ret(USE_DELAY_SLOT);
__ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
} else {
Label slow_allocate_heapnumber, heapnumber_allocated;
__ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(a0);
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(a1, v0);
__ pop(a0);
}
__ bind(&heapnumber_allocated);
__ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
__ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
__ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
__ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a1);
}
}
void UnaryOpStub::GenerateHeapNumberCodeBitNot(
MacroAssembler* masm,
Label* slow) {
Label impossible;
EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
// Convert the heap number in a0 to an untagged integer in a1.
__ ConvertToInt32(a0, a1, a2, a3, f0, slow);
// Do the bitwise operation and check if the result fits in a smi.
Label try_float;
__ Neg(a1, a1);
__ Addu(a2, a1, Operand(0x40000000));
__ Branch(&try_float, lt, a2, Operand(zero_reg));
// Tag the result as a smi and we're done.
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a1);
// Try to store the result in a heap number.
__ bind(&try_float);
if (mode_ == UNARY_NO_OVERWRITE) {
Label slow_allocate_heapnumber, heapnumber_allocated;
// Allocate a new heap number without zapping v0, which we need if it fails.
__ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(v0); // Push the heap number, not the untagged int32.
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(a2, v0); // Move the new heap number into a2.
// Get the heap number into v0, now that the new heap number is in a2.
__ pop(v0);
}
// Convert the heap number in v0 to an untagged integer in a1.
// This can't go slow-case because it's the same number we already
// converted once again.
__ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
// Negate the result.
__ Xor(a1, a1, -1);
__ bind(&heapnumber_allocated);
__ mov(v0, a2); // Move newly allocated heap number to v0.
}
// Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
__ mtc1(a1, f0);
__ cvt_d_w(f0, f0);
__ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
__ bind(&impossible);
if (FLAG_debug_code) {
__ stop("Incorrect assumption in bit-not stub");
}
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
GenerateGenericStubSub(masm);
break;
case Token::BIT_NOT:
GenerateGenericStubBitNot(masm);
break;
default:
UNREACHABLE();
}
}
void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&slow);
GenerateGenericCodeFallback(masm);
}
void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
GenerateGenericCodeFallback(masm);
}
void UnaryOpStub::GenerateGenericCodeFallback(
MacroAssembler* masm) {
// Handle the slow case by jumping to the JavaScript builtin.
__ push(a0);
switch (op_) {
case Token::SUB:
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
break;
case Token::BIT_NOT:
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
void BinaryOpStub::Initialize() {
platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
......
......@@ -81,71 +81,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(operand_type) {
}
private:
Token::Value op_;
UnaryOverwriteMode mode_;
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
Major MajorKey() { return UnaryOp; }
int MinorKey() {
return ModeBits::encode(mode_)
| OpBits::encode(op_)
| OperandTypeInfoBits::encode(operand_type_);
}
// Note: A lot of the helper functions below will vanish when we use virtual
// function instead of switch more often.
void Generate(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateSmiStubSub(MacroAssembler* masm);
void GenerateSmiStubBitNot(MacroAssembler* masm);
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
void GenerateNumberStub(MacroAssembler* masm);
void GenerateNumberStubSub(MacroAssembler* masm);
void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateGenericStubSub(MacroAssembler* masm);
void GenerateGenericStubBitNot(MacroAssembler* masm);
void GenerateGenericCodeFallback(MacroAssembler* masm);
virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
return UnaryOpIC::ToState(operand_type_);
}
virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
......
......@@ -4400,10 +4400,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
// TODO(svenpanne): Allowing format strings in Comment would be nice here...
Comment cmt(masm_, comment);
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
UnaryOpStub stub(expr->op(), overwrite);
UnaryOpStub stub(expr->op());
// GenericUnaryOpStub expects the argument to be in a0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
......
......@@ -1971,11 +1971,11 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr,
Condition cc, Register src1, const Operand& src2) {
int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
if (right_block == left_block) {
if (right_block == left_block || cc == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ Branch(chunk_->GetAssemblyLabel(right_block),
......@@ -2015,6 +2015,25 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32() || r.IsDouble()) {
EmitBranch(instr, al, zero_reg, Operand(zero_reg));
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsTaggedNumber()) {
EmitBranch(instr, al, zero_reg, Operand(zero_reg));
}
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
__ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
EmitBranch(instr, eq, scratch0(), Operand(at));
}
}
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
......
......@@ -1919,6 +1919,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
return new(zone())
LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
......
......@@ -118,6 +118,7 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
......@@ -919,6 +920,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
class LIsNumberAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
};
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment