Commit 84ecbbfb authored by Clément Bœsch's avatar Clément Bœsch

Merge commit 'f1a9eee4'

* commit 'f1a9eee4':
  x86: Add missing movsxd for the int stride parameter
Merged-by: 's avatarClément Bœsch <u@pkh.me>
parents 1ad3ffad f1a9eee4
...@@ -82,6 +82,7 @@ SECTION .text ...@@ -82,6 +82,7 @@ SECTION .text
INIT_MMX mmx INIT_MMX mmx
; void ff_h264_idct_add_8_mmx(uint8_t *dst, int16_t *block, int stride) ; void ff_h264_idct_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct_add_8, 3, 3, 0 cglobal h264_idct_add_8, 3, 3, 0
movsxdifnidn r2, r2d
IDCT4_ADD r0, r1, r2 IDCT4_ADD r0, r1, r2
RET RET
...@@ -204,6 +205,7 @@ cglobal h264_idct_add_8, 3, 3, 0 ...@@ -204,6 +205,7 @@ cglobal h264_idct_add_8, 3, 3, 0
INIT_MMX mmx INIT_MMX mmx
; void ff_h264_idct8_add_8_mmx(uint8_t *dst, int16_t *block, int stride) ; void ff_h264_idct8_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct8_add_8, 3, 4, 0 cglobal h264_idct8_add_8, 3, 4, 0
movsxdifnidn r2, r2d
%assign pad 128+4-(stack_offset&7) %assign pad 128+4-(stack_offset&7)
SUB rsp, pad SUB rsp, pad
...@@ -272,6 +274,7 @@ cglobal h264_idct8_add_8, 3, 4, 0 ...@@ -272,6 +274,7 @@ cglobal h264_idct8_add_8, 3, 4, 0
INIT_XMM sse2 INIT_XMM sse2
; void ff_h264_idct8_add_8_sse2(uint8_t *dst, int16_t *block, int stride) ; void ff_h264_idct8_add_8_sse2(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct8_add_8, 3, 4, 10 cglobal h264_idct8_add_8, 3, 4, 10
movsxdifnidn r2, r2d
IDCT8_ADD_SSE r0, r1, r2, r3 IDCT8_ADD_SSE r0, r1, r2, r3
RET RET
...@@ -310,6 +313,7 @@ INIT_MMX mmxext ...@@ -310,6 +313,7 @@ INIT_MMX mmxext
; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride) ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
%if ARCH_X86_64 %if ARCH_X86_64
cglobal h264_idct_dc_add_8, 3, 4, 0 cglobal h264_idct_dc_add_8, 3, 4, 0
movsxd r2, r2d
movsx r3, word [r1] movsx r3, word [r1]
mov dword [r1], 0 mov dword [r1], 0
DC_ADD_MMXEXT_INIT r3, r2 DC_ADD_MMXEXT_INIT r3, r2
...@@ -318,6 +322,7 @@ cglobal h264_idct_dc_add_8, 3, 4, 0 ...@@ -318,6 +322,7 @@ cglobal h264_idct_dc_add_8, 3, 4, 0
; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride) ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct8_dc_add_8, 3, 4, 0 cglobal h264_idct8_dc_add_8, 3, 4, 0
movsxd r2, r2d
movsx r3, word [r1] movsx r3, word [r1]
mov dword [r1], 0 mov dword [r1], 0
DC_ADD_MMXEXT_INIT r3, r2 DC_ADD_MMXEXT_INIT r3, r2
...@@ -352,6 +357,7 @@ INIT_MMX mmx ...@@ -352,6 +357,7 @@ INIT_MMX mmx
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
movsxdifnidn r3, r3d
xor r5, r5 xor r5, r5
%ifdef PIC %ifdef PIC
lea picregq, [scan8_mem] lea picregq, [scan8_mem]
...@@ -375,6 +381,7 @@ cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, ...@@ -375,6 +381,7 @@ cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride,
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
movsxdifnidn r3, r3d
%assign pad 128+4-(stack_offset&7) %assign pad 128+4-(stack_offset&7)
SUB rsp, pad SUB rsp, pad
...@@ -409,6 +416,7 @@ INIT_MMX mmxext ...@@ -409,6 +416,7 @@ INIT_MMX mmxext
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
movsxdifnidn r3, r3d
xor r5, r5 xor r5, r5
%ifdef PIC %ifdef PIC
lea picregq, [scan8_mem] lea picregq, [scan8_mem]
...@@ -456,6 +464,7 @@ INIT_MMX mmx ...@@ -456,6 +464,7 @@ INIT_MMX mmx
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
movsxdifnidn r3, r3d
xor r5, r5 xor r5, r5
%ifdef PIC %ifdef PIC
lea picregq, [scan8_mem] lea picregq, [scan8_mem]
...@@ -481,6 +490,7 @@ INIT_MMX mmxext ...@@ -481,6 +490,7 @@ INIT_MMX mmxext
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
movsxdifnidn r3, r3d
xor r5, r5 xor r5, r5
%ifdef PIC %ifdef PIC
lea picregq, [scan8_mem] lea picregq, [scan8_mem]
...@@ -525,6 +535,7 @@ cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, s ...@@ -525,6 +535,7 @@ cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, s
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
movsxdifnidn r3, r3d
%assign pad 128+4-(stack_offset&7) %assign pad 128+4-(stack_offset&7)
SUB rsp, pad SUB rsp, pad
...@@ -587,6 +598,7 @@ INIT_XMM sse2 ...@@ -587,6 +598,7 @@ INIT_XMM sse2
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
movsxdifnidn r3, r3d
xor r5, r5 xor r5, r5
%ifdef PIC %ifdef PIC
lea picregq, [scan8_mem] lea picregq, [scan8_mem]
...@@ -638,6 +650,7 @@ INIT_XMM cpuname ...@@ -638,6 +650,7 @@ INIT_XMM cpuname
INIT_MMX mmx INIT_MMX mmx
h264_idct_add8_mmx_plane: h264_idct_add8_mmx_plane:
movsxdifnidn r3, r3d
.nextblock: .nextblock:
movzx r6, byte [scan8+r5] movzx r6, byte [scan8+r5]
movzx r6, byte [r4+r6] movzx r6, byte [r4+r6]
...@@ -664,6 +677,7 @@ h264_idct_add8_mmx_plane: ...@@ -664,6 +677,7 @@ h264_idct_add8_mmx_plane:
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
movsxdifnidn r3, r3d
mov r5, 16 mov r5, 16
add r2, 512 add r2, 512
%ifdef PIC %ifdef PIC
...@@ -684,6 +698,7 @@ cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, ...@@ -684,6 +698,7 @@ cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride,
RET RET
h264_idct_add8_mmxext_plane: h264_idct_add8_mmxext_plane:
movsxdifnidn r3, r3d
.nextblock: .nextblock:
movzx r6, byte [scan8+r5] movzx r6, byte [scan8+r5]
movzx r6, byte [r4+r6] movzx r6, byte [r4+r6]
...@@ -730,6 +745,7 @@ INIT_MMX mmxext ...@@ -730,6 +745,7 @@ INIT_MMX mmxext
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
movsxdifnidn r3, r3d
mov r5, 16 mov r5, 16
add r2, 512 add r2, 512
%if ARCH_X86_64 %if ARCH_X86_64
...@@ -751,6 +767,7 @@ cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, ...@@ -751,6 +767,7 @@ cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride,
; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
h264_idct_dc_add8_mmxext: h264_idct_dc_add8_mmxext:
movsxdifnidn r3, r3d
movd m0, [r2 ] ; 0 0 X D movd m0, [r2 ] ; 0 0 X D
mov word [r2+ 0], 0 mov word [r2+ 0], 0
punpcklwd m0, [r2+32] ; x X d D punpcklwd m0, [r2+32] ; x X d D
...@@ -771,6 +788,7 @@ ALIGN 16 ...@@ -771,6 +788,7 @@ ALIGN 16
INIT_XMM sse2 INIT_XMM sse2
; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
h264_add8x4_idct_sse2: h264_add8x4_idct_sse2:
movsxdifnidn r3, r3d
movq m0, [r2+ 0] movq m0, [r2+ 0]
movq m1, [r2+ 8] movq m1, [r2+ 8]
movq m2, [r2+16] movq m2, [r2+16]
...@@ -814,6 +832,7 @@ h264_add8x4_idct_sse2: ...@@ -814,6 +832,7 @@ h264_add8x4_idct_sse2:
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8 cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
movsxdifnidn r3, r3d
%if ARCH_X86_64 %if ARCH_X86_64
mov r5, r0 mov r5, r0
%endif %endif
...@@ -862,6 +881,7 @@ cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8 ...@@ -862,6 +881,7 @@ cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8 cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
movsxdifnidn r3, r3d
%if ARCH_X86_64 %if ARCH_X86_64
mov r7, r0 mov r7, r0
%endif %endif
...@@ -914,6 +934,7 @@ cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8 ...@@ -914,6 +934,7 @@ cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
; int16_t *block, int stride, ; int16_t *block, int stride,
; const uint8_t nnzc[6 * 8]) ; const uint8_t nnzc[6 * 8])
cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8 cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
movsxdifnidn r3, r3d
add r2, 512 add r2, 512
%if ARCH_X86_64 %if ARCH_X86_64
mov r7, r0 mov r7, r0
......
...@@ -76,6 +76,7 @@ cextern pd_32 ...@@ -76,6 +76,7 @@ cextern pd_32
%macro IDCT_ADD_10 0 %macro IDCT_ADD_10 0
cglobal h264_idct_add_10, 3,3 cglobal h264_idct_add_10, 3,3
movsxdifnidn r2, r2d
IDCT4_ADD_10 r0, r1, r2 IDCT4_ADD_10 r0, r1, r2
RET RET
%endmacro %endmacro
...@@ -137,6 +138,7 @@ ADD4x4IDCT ...@@ -137,6 +138,7 @@ ADD4x4IDCT
%macro IDCT_ADD16_10 0 %macro IDCT_ADD16_10 0
cglobal h264_idct_add16_10, 5,6 cglobal h264_idct_add16_10, 5,6
movsxdifnidn r3, r3d
ADD16_OP 0, 4+1*8 ADD16_OP 0, 4+1*8
ADD16_OP 1, 5+1*8 ADD16_OP 1, 5+1*8
ADD16_OP 2, 4+2*8 ADD16_OP 2, 4+2*8
...@@ -195,6 +197,7 @@ IDCT_ADD16_10 ...@@ -195,6 +197,7 @@ IDCT_ADD16_10
INIT_MMX mmxext INIT_MMX mmxext
cglobal h264_idct_dc_add_10,3,3 cglobal h264_idct_dc_add_10,3,3
movsxdifnidn r2, r2d
movd m0, [r1] movd m0, [r1]
mov dword [r1], 0 mov dword [r1], 0
paddd m0, [pd_32] paddd m0, [pd_32]
...@@ -210,6 +213,7 @@ cglobal h264_idct_dc_add_10,3,3 ...@@ -210,6 +213,7 @@ cglobal h264_idct_dc_add_10,3,3
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
%macro IDCT8_DC_ADD 0 %macro IDCT8_DC_ADD 0
cglobal h264_idct8_dc_add_10,3,4,7 cglobal h264_idct8_dc_add_10,3,4,7
movsxdifnidn r2, r2d
movd m0, [r1] movd m0, [r1]
mov dword[r1], 0 mov dword[r1], 0
paddd m0, [pd_32] paddd m0, [pd_32]
...@@ -279,6 +283,7 @@ idct_dc_add %+ SUFFIX: ...@@ -279,6 +283,7 @@ idct_dc_add %+ SUFFIX:
ret ret
cglobal h264_idct_add16intra_10,5,7,8 cglobal h264_idct_add16intra_10,5,7,8
movsxdifnidn r3, r3d
ADD16_OP_INTRA 0, 4+1*8 ADD16_OP_INTRA 0, 4+1*8
ADD16_OP_INTRA 2, 4+2*8 ADD16_OP_INTRA 2, 4+2*8
ADD16_OP_INTRA 4, 6+1*8 ADD16_OP_INTRA 4, 6+1*8
...@@ -313,6 +318,7 @@ IDCT_ADD16INTRA_10 ...@@ -313,6 +318,7 @@ IDCT_ADD16INTRA_10
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
%macro IDCT_ADD8 0 %macro IDCT_ADD8 0
cglobal h264_idct_add8_10,5,8,7 cglobal h264_idct_add8_10,5,8,7
movsxdifnidn r3, r3d
%if ARCH_X86_64 %if ARCH_X86_64
mov r7, r0 mov r7, r0
%endif %endif
...@@ -449,6 +455,7 @@ IDCT_ADD8 ...@@ -449,6 +455,7 @@ IDCT_ADD8
%macro IDCT8_ADD 0 %macro IDCT8_ADD 0
cglobal h264_idct8_add_10, 3,4,16 cglobal h264_idct8_add_10, 3,4,16
movsxdifnidn r2, r2d
%if UNIX64 == 0 %if UNIX64 == 0
%assign pad 16-gprsize-(stack_offset&15) %assign pad 16-gprsize-(stack_offset&15)
sub rsp, pad sub rsp, pad
...@@ -573,6 +580,7 @@ IDCT8_ADD ...@@ -573,6 +580,7 @@ IDCT8_ADD
%macro IDCT8_ADD4 0 %macro IDCT8_ADD4 0
cglobal h264_idct8_add4_10, 0,7,16 cglobal h264_idct8_add4_10, 0,7,16
movsxdifnidn r3, r3d
%assign pad 16-gprsize-(stack_offset&15) %assign pad 16-gprsize-(stack_offset&15)
SUB rsp, pad SUB rsp, pad
mov r5, r0mp mov r5, r0mp
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment