Commit 8c433d8a authored by Ronald S. Bultje's avatar Ronald S. Bultje

swscale: rename "dstw" to "w" to prevent name collisions.

"dstw" can collide with the word-version of the "dst" argument, causing
all kind of weird stuff down the pipe.
parent ef66a0ed
...@@ -273,17 +273,17 @@ yuv2planeX_fn 10, 7, 5 ...@@ -273,17 +273,17 @@ yuv2planeX_fn 10, 7, 5
%macro yuv2plane1_mainloop 2 %macro yuv2plane1_mainloop 2
.loop_%2: .loop_%2:
%if %1 == 8 %if %1 == 8
paddsw m0, m2, [srcq+dstwq*2+mmsize*0] paddsw m0, m2, [srcq+wq*2+mmsize*0]
paddsw m1, m3, [srcq+dstwq*2+mmsize*1] paddsw m1, m3, [srcq+wq*2+mmsize*1]
psraw m0, 7 psraw m0, 7
psraw m1, 7 psraw m1, 7
packuswb m0, m1 packuswb m0, m1
mov%2 [dstq+dstwq], m0 mov%2 [dstq+wq], m0
%elif %1 == 16 %elif %1 == 16
paddd m0, m4, [srcq+dstwq*4+mmsize*0] paddd m0, m4, [srcq+wq*4+mmsize*0]
paddd m1, m4, [srcq+dstwq*4+mmsize*1] paddd m1, m4, [srcq+wq*4+mmsize*1]
paddd m2, m4, [srcq+dstwq*4+mmsize*2] paddd m2, m4, [srcq+wq*4+mmsize*2]
paddd m3, m4, [srcq+dstwq*4+mmsize*3] paddd m3, m4, [srcq+wq*4+mmsize*3]
psrad m0, 3 psrad m0, 3
psrad m1, 3 psrad m1, 3
psrad m2, 3 psrad m2, 3
...@@ -297,40 +297,40 @@ yuv2planeX_fn 10, 7, 5 ...@@ -297,40 +297,40 @@ yuv2planeX_fn 10, 7, 5
paddw m0, m5 paddw m0, m5
paddw m2, m5 paddw m2, m5
%endif ; mmx/sse2/sse4/avx %endif ; mmx/sse2/sse4/avx
mov%2 [dstq+dstwq*2+mmsize*0], m0 mov%2 [dstq+wq*2+mmsize*0], m0
mov%2 [dstq+dstwq*2+mmsize*1], m2 mov%2 [dstq+wq*2+mmsize*1], m2
%else ; %1 == 9/10 %else ; %1 == 9/10
paddsw m0, m2, [srcq+dstwq*2+mmsize*0] paddsw m0, m2, [srcq+wq*2+mmsize*0]
paddsw m1, m2, [srcq+dstwq*2+mmsize*1] paddsw m1, m2, [srcq+wq*2+mmsize*1]
psraw m0, 15 - %1 psraw m0, 15 - %1
psraw m1, 15 - %1 psraw m1, 15 - %1
pmaxsw m0, m4 pmaxsw m0, m4
pmaxsw m1, m4 pmaxsw m1, m4
pminsw m0, m3 pminsw m0, m3
pminsw m1, m3 pminsw m1, m3
mov%2 [dstq+dstwq*2+mmsize*0], m0 mov%2 [dstq+wq*2+mmsize*0], m0
mov%2 [dstq+dstwq*2+mmsize*1], m1 mov%2 [dstq+wq*2+mmsize*1], m1
%endif %endif
add dstwq, mmsize add wq, mmsize
jl .loop_%2 jl .loop_%2
%endmacro %endmacro
%macro yuv2plane1_fn 3 %macro yuv2plane1_fn 3
cglobal yuv2plane1_%1, %3, %3, %2, src, dst, dstw, dither, offset cglobal yuv2plane1_%1, %3, %3, %2, src, dst, w, dither, offset
movsxdifnidn dstwq, dstwd movsxdifnidn wq, wd
add dstwq, mmsize - 1 add wq, mmsize - 1
and dstwq, ~(mmsize - 1) and wq, ~(mmsize - 1)
%if %1 == 8 %if %1 == 8
add dstq, dstwq add dstq, wq
%else ; %1 != 8 %else ; %1 != 8
lea dstq, [dstq+dstwq*2] lea dstq, [dstq+wq*2]
%endif ; %1 == 8 %endif ; %1 == 8
%if %1 == 16 %if %1 == 16
lea srcq, [srcq+dstwq*4] lea srcq, [srcq+wq*4]
%else ; %1 != 16 %else ; %1 != 16
lea srcq, [srcq+dstwq*2] lea srcq, [srcq+wq*2]
%endif ; %1 == 16 %endif ; %1 == 16
neg dstwq neg wq
%if %1 == 8 %if %1 == 8
pxor m4, m4 ; zero pxor m4, m4 ; zero
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment