Commit c26f2c32 authored by lrn@chromium.org's avatar lrn@chromium.org

Implemented FastAsciiStringJoin in X64 full codegen.

Review URL: http://codereview.chromium.org/6729016

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7345 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e6cbf659
......@@ -944,6 +944,12 @@ void Assembler::clc() {
emit(0xF8);
}
void Assembler::cld() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xFC);
}
void Assembler::cdq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......
......@@ -1137,6 +1137,7 @@ class Assembler : public AssemblerBase {
// Miscellaneous
void clc();
void cld();
void cpuid();
void hlt();
void int3();
......
This diff is collapsed.
......@@ -909,9 +909,9 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src,
int constant) {
if (dst.is(src)) {
addq(dst, Immediate(constant));
addl(dst, Immediate(constant));
} else {
lea(dst, Operand(src, constant));
leal(dst, Operand(src, constant));
}
shl(dst, Immediate(kSmiShift));
}
......@@ -1245,12 +1245,10 @@ void MacroAssembler::SmiAdd(Register dst,
// No overflow checking. Use only when it's known that
// overflowing is impossible.
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
addq(dst, src2);
} else {
if (!dst.is(src1)) {
movq(dst, src1);
addq(dst, src2);
}
addq(dst, src2);
Assert(no_overflow, "Smi addition overflow");
}
......@@ -1259,12 +1257,10 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
subq(dst, src2);
} else {
if (!dst.is(src1)) {
movq(dst, src1);
subq(dst, src2);
}
subq(dst, src2);
Assert(no_overflow, "Smi subtraction overflow");
}
......@@ -1274,12 +1270,10 @@ void MacroAssembler::SmiSub(Register dst,
const Operand& src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (dst.is(src1)) {
subq(dst, src2);
} else {
if (!dst.is(src1)) {
movq(dst, src1);
subq(dst, src2);
}
subq(dst, src2);
Assert(no_overflow, "Smi subtraction overflow");
}
......@@ -1466,6 +1460,13 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
ASSERT_EQ(0, kSmiShift % kBitsPerByte);
addl(dst, Operand(src, kSmiShift / kBitsPerByte));
}
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
movq(dst, src);
......@@ -2701,6 +2702,70 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
}
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Destination is incremented by length, source, length and scratch are
// clobbered.
// A simpler loop is faster on small copies, but slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register destination,
Register source,
Register length,
int min_length,
Register scratch) {
ASSERT(min_length >= 0);
if (FLAG_debug_code) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, "Invalid min_length");
}
Label loop, done, short_string, short_loop;
const int kLongStringLimit = 20;
if (min_length <= kLongStringLimit) {
cmpl(length, Immediate(kLongStringLimit));
j(less_equal, &short_string);
}
ASSERT(source.is(rsi));
ASSERT(destination.is(rdi));
ASSERT(length.is(rcx));
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
movq(scratch, length);
shrl(length, Immediate(3));
repmovsq();
// Move remaining bytes of length.
andl(scratch, Immediate(0x7));
movq(length, Operand(source, scratch, times_1, -8));
movq(Operand(destination, scratch, times_1, -8), length);
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done);
bind(&short_string);
if (min_length == 0) {
testl(length, length);
j(zero, &done);
}
lea(scratch, Operand(destination, length, times_1, 0));
bind(&short_loop);
movb(length, Operand(source, 0));
movb(Operand(destination, 0), length);
incq(source);
incq(destination);
cmpq(destination, scratch);
j(not_equal, &short_loop);
bind(&done);
}
}
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
......
......@@ -470,6 +470,11 @@ class MacroAssembler: public Assembler {
Register src1,
Register src2,
LabelType* on_not_smi_result);
template <typename LabelType>
void SmiAdd(Register dst,
Register src1,
const Operand& src2,
LabelType* on_not_smi_result);
void SmiAdd(Register dst,
Register src1,
......@@ -590,6 +595,10 @@ class MacroAssembler: public Assembler {
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
// Add the value of a smi in memory to an int32 register.
// Sets flags as a normal add.
void AddSmiField(Register dst, const Operand& src);
// Basic Smi operations.
void Move(Register dst, Smi* source) {
LoadSmiConstant(dst, source);
......@@ -1022,6 +1031,18 @@ class MacroAssembler: public Assembler {
Handle<Object> CodeObject() { return code_object_; }
// Copy length bytes from source to destination.
// Uses scratch register internally (if you have a low-eight register
// free, do use it, otherwise kScratchRegister will be used).
// The min_length is a minimum limit on the value that length will have.
// The algorithm has some special cases that might be omitted if the string
// is known to always be long.
void CopyBytes(Register destination,
Register source,
Register length,
int min_length = 0,
Register scratch = kScratchRegister);
// ---------------------------------------------------------------------------
// StatsCounter support
......@@ -1272,6 +1293,26 @@ void MacroAssembler::SmiAdd(Register dst,
}
template <typename LabelType>
void MacroAssembler::SmiAdd(Register dst,
Register src1,
const Operand& src2,
LabelType* on_not_smi_result) {
ASSERT_NOT_NULL(on_not_smi_result);
if (dst.is(src1)) {
movq(kScratchRegister, src1);
addq(kScratchRegister, src2);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
} else {
ASSERT(!src2.AddressUsesRegister(dst));
movq(dst, src1);
addq(dst, src2);
j(overflow, on_not_smi_result);
}
}
template <typename LabelType>
void MacroAssembler::SmiSub(Register dst,
Register src1,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment