Commit 13e8360d authored by whesse@chromium.org's avatar whesse@chromium.org

X64 Crankshaft: Add TypeRecordingBinaryStub to X64

Review URL: http://codereview.chromium.org/6366028

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6622 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1853458a
......@@ -2061,6 +2061,8 @@ TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
}
if (left_type.IsInteger32() && right_type.IsInteger32()) {
// Platforms with 32-bit Smis have no distinct INT32 type.
if (kSmiValueSize == 32) return SMI;
return INT32;
}
......@@ -2104,9 +2106,11 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
}
if (type == TRBinaryOpIC::SMI &&
previous_type == TRBinaryOpIC::SMI) {
if (op == Token::DIV || op == Token::MUL) {
if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
// 31-bit Smis, most operations overflow to int32 results.
result_type = TRBinaryOpIC::HEAP_NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
......
......@@ -120,9 +120,9 @@ class TypeInfo {
}
// Integer32 is an integer that can be represented as either a signed
// 32-bit integer or as an unsigned 32-bit integer. It has to be
// in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
// Integer32 is an integer that can be represented as a signed
// 32-bit integer. It has to be
// in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
// as it is not an Integer32.
static inline bool IsInt32Double(double value) {
const DoubleRepresentation minus_zero(-0.0);
......
This diff is collapsed.
......@@ -270,6 +270,11 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateFloatingPointCode(MacroAssembler* masm,
Label* allocation_failure,
Label* non_numeric_failure);
void GenerateStringAddCode(MacroAssembler* masm);
void GenerateCallRuntimeCode(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
......
......@@ -1529,14 +1529,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
__ j(smi, &smi_case);
__ bind(&stub_call);
GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
if (stub.ArgsInRegistersSupported()) {
stub.GenerateCall(masm_, rdx, rcx);
} else {
__ push(rdx);
__ push(rcx);
__ CallStub(&stub);
}
TypeRecordingBinaryOpStub stub(op, mode);
__ movq(rax, rcx);
__ CallStub(&stub);
__ jmp(&done);
__ bind(&smi_case);
......@@ -1580,14 +1575,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
if (stub.ArgsInRegistersSupported()) {
__ pop(rdx);
stub.GenerateCall(masm_, rdx, rax);
} else {
__ push(result_register());
__ CallStub(&stub);
}
TypeRecordingBinaryOpStub stub(op, mode);
__ pop(rdx);
__ CallStub(&stub);
context()->Plug(rax);
}
......@@ -3217,6 +3207,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// the first smi check before calling ToNumber.
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
......@@ -3230,12 +3221,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
stub.GenerateCall(masm_, rax, Smi::FromInt(1));
__ bind(&done);
TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
if (expr->op() == Token::INC) {
__ Move(rdx, Smi::FromInt(1));
} else {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
__ CallStub(&stub);
__ bind(&done);
// Store the value returned in rax.
switch (assign_type) {
case VARIABLE:
......
......@@ -1698,7 +1698,8 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
}
void PatchInlinedSmiCode(Address address) {
UNIMPLEMENTED();
// Disabled, then patched inline smi code is not implemented on X64.
// So we do nothing in this case.
}
......
......@@ -540,6 +540,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
// If object is a string, its map is loaded into object_map.
template <typename LabelType>
void JumpIfNotString(Register object,
Register object_map,
LabelType* not_string);
template <typename LabelType>
void JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
......@@ -1458,6 +1466,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
// dst and src1 can be the same, because the one case that bails out
// is a shift by 0, which leaves dst, and therefore src1, unchanged.
NearLabel result_ok;
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
......@@ -1591,6 +1601,17 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
}
template <typename LabelType>
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
LabelType* not_string) {
Condition is_smi = CheckSmi(object);
j(is_smi, not_string);
CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
j(above_equal, not_string);
}
template <typename LabelType>
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment