Commit 5e8b787a authored by Loren Merritt's avatar Loren Merritt

simplified and slightly faster h264_chroma_mc8_mmx

Originally committed as revision 5214 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent a11a334d
......@@ -37,112 +37,56 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
assert(x<8 && y<8 && x>=0 && y>=0);
if(y==0)
if(y==0 || x==0)
{
/* horizontal filter only */
asm volatile("movd %0, %%mm5\n\t"
"punpcklwd %%mm5, %%mm5\n\t"
"punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
"movq %1, %%mm4\n\t"
"pxor %%mm7, %%mm7\n\t"
"psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
: : "rm" (x), "m" (ff_pw_8));
/* 1 dimensional filter only */
const int dxy = x ? 1 : stride;
asm volatile(
"movd %0, %%mm5\n\t"
"movq %1, %%mm4\n\t"
"punpcklwd %%mm5, %%mm5\n\t"
"punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
"movq %%mm4, %%mm6\n\t"
"pxor %%mm7, %%mm7\n\t"
"psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
"psrlw $1, %%mm6\n\t" /* mm6 = 4 */
:: "rm"(x+y), "m"(ff_pw_8));
for(i=0; i<h; i++) {
asm volatile(
/* mm0 = src[0..7], mm1 = src[1..8] */
"movq %0, %%mm0\n\t"
"movq %1, %%mm1\n\t"
: : "m" (src[0]), "m" (src[1]));
"movq %1, %%mm2\n\t"
:: "m"(src[0]), "m"(src[dxy]));
asm volatile(
/* [mm2,mm3] = A * src[0..7] */
"movq %%mm0, %%mm2\n\t"
"punpcklbw %%mm7, %%mm2\n\t"
"pmullw %%mm4, %%mm2\n\t"
"movq %%mm0, %%mm3\n\t"
"punpckhbw %%mm7, %%mm3\n\t"
"pmullw %%mm4, %%mm3\n\t"
/* [mm2,mm3] += B * src[1..8] */
"movq %%mm1, %%mm0\n\t"
/* [mm0,mm1] = A * src[0..7] */
/* [mm2,mm3] = B * src[1..8] */
"movq %%mm0, %%mm1\n\t"
"movq %%mm2, %%mm3\n\t"
"punpcklbw %%mm7, %%mm0\n\t"
"pmullw %%mm5, %%mm0\n\t"
"punpckhbw %%mm7, %%mm1\n\t"
"pmullw %%mm5, %%mm1\n\t"
"paddw %%mm0, %%mm2\n\t"
"paddw %%mm1, %%mm3\n\t"
/* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
"paddw %1, %%mm2\n\t"
"paddw %1, %%mm3\n\t"
"psrlw $3, %%mm2\n\t"
"psrlw $3, %%mm3\n\t"
"packuswb %%mm3, %%mm2\n\t"
H264_CHROMA_OP(%0, %%mm2)
"movq %%mm2, %0\n\t"
: "=m" (dst[0]) : "m" (ff_pw_4));
src += stride;
dst += stride;
}
return;
}
if(x==0)
{
/* vertical filter only */
asm volatile("movd %0, %%mm6\n\t"
"punpcklwd %%mm6, %%mm6\n\t"
"punpckldq %%mm6, %%mm6\n\t" /* mm6 = C = y */
"movq %1, %%mm4\n\t"
"pxor %%mm7, %%mm7\n\t"
"psubw %%mm6, %%mm4\n\t" /* mm4 = A = 8-y */
: : "rm" (y), "m" (ff_pw_8));
asm volatile(
/* mm0 = src[0..7] */
"movq %0, %%mm0\n\t"
: : "m" (src[0]));
for(i=0; i<h; i++) {
asm volatile(
/* [mm2,mm3] = A * src[0..7] */
"movq %mm0, %mm2\n\t"
"punpcklbw %mm7, %mm2\n\t"
"pmullw %mm4, %mm2\n\t"
"movq %mm0, %mm3\n\t"
"punpckhbw %mm7, %mm3\n\t"
"pmullw %mm4, %mm3\n\t");
"punpcklbw %%mm7, %%mm2\n\t"
"punpckhbw %%mm7, %%mm3\n\t"
"pmullw %%mm4, %%mm0\n\t"
"pmullw %%mm4, %%mm1\n\t"
"pmullw %%mm5, %%mm2\n\t"
"pmullw %%mm5, %%mm3\n\t"
/* dst[0..7] = (A * src[0..7] + B * src[1..8] + 4) >> 3 */
"paddw %%mm6, %%mm0\n\t"
"paddw %%mm6, %%mm1\n\t"
"paddw %%mm2, %%mm0\n\t"
"paddw %%mm3, %%mm1\n\t"
"psrlw $3, %%mm0\n\t"
"psrlw $3, %%mm1\n\t"
"packuswb %%mm1, %%mm0\n\t"
H264_CHROMA_OP(%0, %%mm0)
"movq %%mm0, %0\n\t"
: "=m" (dst[0]));
src += stride;
asm volatile(
/* mm0 = src[0..7] */
"movq %0, %%mm0\n\t"
: : "m" (src[0]));
asm volatile(
/* [mm2,mm3] += C * src[0..7] */
"movq %mm0, %mm1\n\t"
"punpcklbw %mm7, %mm1\n\t"
"pmullw %mm6, %mm1\n\t"
"paddw %mm1, %mm2\n\t"
"movq %mm0, %mm5\n\t"
"punpckhbw %mm7, %mm5\n\t"
"pmullw %mm6, %mm5\n\t"
"paddw %mm5, %mm3\n\t");
asm volatile(
/* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
"paddw %1, %%mm2\n\t"
"paddw %1, %%mm3\n\t"
"psrlw $3, %%mm2\n\t"
"psrlw $3, %%mm3\n\t"
"packuswb %%mm3, %%mm2\n\t"
H264_CHROMA_OP(%0, %%mm2)
"movq %%mm2, %0\n\t"
: "=m" (dst[0]) : "m" (ff_pw_4));
dst += stride;
}
return;
......@@ -177,57 +121,53 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
: : "m" (src[0]), "m" (src[1]));
for(i=0; i<h; i++) {
src += stride;
asm volatile(
/* [mm2,mm3] = A * src[0..7] */
/* mm2 = A * src[0..3] + B * src[1..4] */
/* mm3 = A * src[4..7] + B * src[5..8] */
"movq %%mm0, %%mm2\n\t"
"movq %%mm1, %%mm3\n\t"
"punpckhbw %%mm7, %%mm0\n\t"
"punpcklbw %%mm7, %%mm1\n\t"
"punpcklbw %%mm7, %%mm2\n\t"
"pmullw %0, %%mm2\n\t"
"movq %%mm0, %%mm3\n\t"
"punpckhbw %%mm7, %%mm3\n\t"
"pmullw %0, %%mm3\n\t"
"pmullw %0, %%mm0\n\t"
"pmullw %0, %%mm2\n\t"
"pmullw %%mm5, %%mm1\n\t"
"pmullw %%mm5, %%mm3\n\t"
"paddw %%mm1, %%mm2\n\t"
"paddw %%mm0, %%mm3\n\t"
: : "m" (AA));
/* [mm2,mm3] += B * src[1..8] */
"movq %%mm1, %%mm0\n\t"
asm volatile(
/* [mm2,mm3] += C * src[0..7] */
"movq %0, %%mm0\n\t"
"movq %%mm0, %%mm1\n\t"
"punpcklbw %%mm7, %%mm0\n\t"
"pmullw %%mm5, %%mm0\n\t"
"punpckhbw %%mm7, %%mm1\n\t"
"pmullw %%mm5, %%mm1\n\t"
"pmullw %%mm6, %%mm0\n\t"
"pmullw %%mm6, %%mm1\n\t"
"paddw %%mm0, %%mm2\n\t"
"paddw %%mm1, %%mm3\n\t"
: : "m" (AA));
: : "m" (src[0]));
src += stride;
asm volatile(
/* mm0 = src[0..7], mm1 = src[1..8] */
"movq %0, %%mm0\n\t"
/* [mm2,mm3] += D * src[1..8] */
"movq %1, %%mm1\n\t"
: : "m" (src[0]), "m" (src[1]));
asm volatile(
/* [mm2,mm3] += C * src[0..7] */
"movq %mm0, %mm4\n\t"
"punpcklbw %mm7, %mm4\n\t"
"pmullw %mm6, %mm4\n\t"
"paddw %mm4, %mm2\n\t"
"movq %mm0, %mm4\n\t"
"punpckhbw %mm7, %mm4\n\t"
"pmullw %mm6, %mm4\n\t"
"paddw %mm4, %mm3\n\t");
asm volatile(
/* [mm2,mm3] += D * src[1..8] */
"movq %%mm1, %%mm4\n\t"
"punpcklbw %%mm7, %%mm4\n\t"
"pmullw %0, %%mm4\n\t"
"paddw %%mm4, %%mm2\n\t"
"movq %%mm1, %%mm0\n\t"
"movq %%mm1, %%mm4\n\t"
"punpcklbw %%mm7, %%mm0\n\t"
"punpckhbw %%mm7, %%mm4\n\t"
"pmullw %0, %%mm4\n\t"
"pmullw %2, %%mm0\n\t"
"pmullw %2, %%mm4\n\t"
"paddw %%mm0, %%mm2\n\t"
"paddw %%mm4, %%mm3\n\t"
: : "m" (DD));
"movq %0, %%mm0\n\t"
: : "m" (src[0]), "m" (src[1]), "m" (DD));
asm volatile(
/* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
/* dst[0..7] = ([mm2,mm3] + 32) >> 6 */
"paddw %1, %%mm2\n\t"
"paddw %1, %%mm3\n\t"
"psrlw $6, %%mm2\n\t"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment