Commit 99a5b9f7 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

ARM: Initial type recording binary operation stub

This implements the type recording binary operation stub for ARM. This first iteration only supports ADD. Handling of 32-bit integers is currently not implemented but just transitions. The generic case for now delegates to the generic binary operation stub.
Review URL: http://codereview.chromium.org/6342019

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6471 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1494beed
This diff is collapsed.
...@@ -218,6 +218,115 @@ class GenericBinaryOpStub : public CodeStub { ...@@ -218,6 +218,115 @@ class GenericBinaryOpStub : public CodeStub {
}; };
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(
int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_vfp3_;
// Operand type information determined at runtime.
TRBinaryOpIC::TypeInfo operands_type_;
TRBinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingBinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
TRBinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return TypeRecordingBinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP3Bits::encode(use_vfp3_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return TRBinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_type_recording_binary_op_type(operands_type_);
code->set_type_recording_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
// Flag that indicates how to generate code for the stub StringAddStub. // Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags { enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0, NO_STRING_ADD_FLAGS = 0,
......
...@@ -1548,8 +1548,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, ...@@ -1548,8 +1548,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op, void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) { OverwriteMode mode) {
__ pop(r1); __ pop(r1);
GenericBinaryOpStub stub(op, mode, r1, r0); if (op == Token::ADD) {
__ CallStub(&stub); TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
}
context()->Plug(r0); context()->Plug(r0);
} }
......
...@@ -1704,7 +1704,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { ...@@ -1704,7 +1704,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
void PatchInlinedSmiCode(Address address) { void PatchInlinedSmiCode(Address address) {
UNIMPLEMENTED(); // Currently there is no smi inlining in the ARM full code generator.
} }
......
...@@ -804,6 +804,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, ...@@ -804,6 +804,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building()); ASSERT(is_building());
current_block_ = block; current_block_ = block;
...@@ -1114,7 +1115,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { ...@@ -1114,7 +1115,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathAbs: case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor: case kMathFloor:
return AssignEnvironment(DefineAsRegister(result)); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt: case kMathSqrt:
return DefineSameAsFirst(result); return DefineSameAsFirst(result);
case kMathRound: case kMathRound:
......
...@@ -1939,7 +1939,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, ...@@ -1939,7 +1939,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
void MacroAssembler::JumpIfNotBothSmi(Register reg1, void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2, Register reg2,
Label* on_not_both_smi) { Label* on_not_both_smi) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask)); tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq); tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi); b(ne, on_not_both_smi);
...@@ -1949,7 +1949,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1, ...@@ -1949,7 +1949,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
void MacroAssembler::JumpIfEitherSmi(Register reg1, void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2, Register reg2,
Label* on_either_smi) { Label* on_either_smi) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask)); tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne); tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi); b(eq, on_either_smi);
...@@ -1957,19 +1957,30 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1, ...@@ -1957,19 +1957,30 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::AbortIfSmi(Register object) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask)); tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi"); Assert(ne, "Operand is a smi");
} }
void MacroAssembler::AbortIfNotSmi(Register object) { void MacroAssembler::AbortIfNotSmi(Register object) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask)); tst(object, Operand(kSmiTagMask));
Assert(eq, "Operand is not smi"); Assert(eq, "Operand is not smi");
} }
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
cmp(scratch, heap_number_map);
b(ne, on_not_heap_number);
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first, Register first,
Register second, Register second,
...@@ -1996,7 +2007,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, ...@@ -1996,7 +2007,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
Register scratch2, Register scratch2,
Label* failure) { Label* failure) {
// Check that neither is a smi. // Check that neither is a smi.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second)); and_(scratch1, first, Operand(second));
tst(scratch1, Operand(kSmiTagMask)); tst(scratch1, Operand(kSmiTagMask));
b(eq, failure); b(eq, failure);
......
...@@ -719,6 +719,9 @@ class MacroAssembler: public Assembler { ...@@ -719,6 +719,9 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg, SBit s = LeaveCC) { void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s); add(reg, reg, Operand(reg), s);
} }
void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
add(dst, src, Operand(src), s);
}
// Try to convert int32 to smi. If the value is to large, preserve // Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and // the original value and jump to not_a_smi. Destroys scratch and
...@@ -733,6 +736,9 @@ class MacroAssembler: public Assembler { ...@@ -733,6 +736,9 @@ class MacroAssembler: public Assembler {
void SmiUntag(Register reg) { void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize)); mov(reg, Operand(reg, ASR, kSmiTagSize));
} }
void SmiUntag(Register dst, Register src) {
mov(dst, Operand(src, ASR, kSmiTagSize));
}
// Jump if either of the registers contain a non-smi. // Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
...@@ -743,6 +749,14 @@ class MacroAssembler: public Assembler { ...@@ -743,6 +749,14 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object); void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object); void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
// HeapNumber utilities
void JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// String utilities // String utilities
......
...@@ -2098,8 +2098,6 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) { ...@@ -2098,8 +2098,6 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type); Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
if (!code.is_null()) { if (!code.is_null()) {
TRBinaryOpIC ic;
ic.patch(*code);
if (FLAG_trace_ic) { if (FLAG_trace_ic) {
PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n", PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
TRBinaryOpIC::GetName(previous_type), TRBinaryOpIC::GetName(previous_type),
...@@ -2107,6 +2105,8 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) { ...@@ -2107,6 +2105,8 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
TRBinaryOpIC::GetName(result_type), TRBinaryOpIC::GetName(result_type),
Token::Name(op)); Token::Name(op));
} }
TRBinaryOpIC ic;
ic.patch(*code);
// Activate inlined smi code. // Activate inlined smi code.
if (previous_type == TRBinaryOpIC::UNINITIALIZED) { if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment