Commit ea41e6d6 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '9c12c6ff'

* commit '9c12c6ff':
  motion_est: convert stride to ptrdiff_t

Conflicts:
	libavcodec/me_cmp.c
	libavcodec/ppc/me_cmp.c
	libavcodec/x86/me_cmp_init.c

See: 9c669672Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents b4d8724a 9c12c6ff
...@@ -26,17 +26,17 @@ ...@@ -26,17 +26,17 @@
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideo.h"
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2, int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2, int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2, int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2, int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h); ptrdiff_t stride, int h);
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2, int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h); ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx) av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
{ {
......
This diff is collapsed.
...@@ -47,7 +47,8 @@ struct MpegEncContext; ...@@ -47,7 +47,8 @@ struct MpegEncContext;
* width < 8 are neither used nor implemented. */ * width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c, typedef int (*me_cmp_func)(struct MpegEncContext *c,
uint8_t *blk1 /* align width (8 or 16) */, uint8_t *blk1 /* align width (8 or 16) */,
uint8_t *blk2 /* align 1 */, int line_size, int h); uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
int h);
typedef struct MECmpContext { typedef struct MECmpContext {
int (*sum_abs_dctelem)(int16_t *block /* align 16 */); int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
......
...@@ -290,7 +290,7 @@ static int cmp_qpel(MpegEncContext *s, const int x, const int y, const int subx, ...@@ -290,7 +290,7 @@ static int cmp_qpel(MpegEncContext *s, const int x, const int y, const int subx,
#include "motion_est_template.c" #include "motion_est_template.c"
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b, static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
int stride, int h) ptrdiff_t stride, int h)
{ {
return 0; return 0;
} }
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
} }
#endif #endif
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s = 0; int __attribute__((aligned(16))) s = 0;
...@@ -83,8 +83,8 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -83,8 +83,8 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */ /* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad); sad = vec_sum4s(t5, sad);
pix1 += line_size; pix1 += stride;
pix2 += line_size; pix2 += stride;
} }
/* Sum up the four partial sums, and put the result into s. */ /* Sum up the four partial sums, and put the result into s. */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
...@@ -95,7 +95,7 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -95,7 +95,7 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
} }
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s = 0; int __attribute__((aligned(16))) s = 0;
...@@ -105,9 +105,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -105,9 +105,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0); vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs; vector signed int sumdiffs;
uint8_t *pix3 = pix2 + line_size; uint8_t *pix3 = pix2 + stride;
/* Due to the fact that pix3 = pix2 + line_size, the pix3 of one /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this * iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, each * fact to avoid a potentially expensive unaligned read, each
* time around the loop. * time around the loop.
...@@ -132,9 +132,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -132,9 +132,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */ /* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad); sad = vec_sum4s(t5, sad);
pix1 += line_size; pix1 += stride;
pix2v = pix3v; pix2v = pix3v;
pix3 += line_size; pix3 += stride;
} }
/* Sum up the four partial sums, and put the result into s. */ /* Sum up the four partial sums, and put the result into s. */
...@@ -145,11 +145,11 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -145,11 +145,11 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
} }
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s = 0; int __attribute__((aligned(16))) s = 0;
uint8_t *pix3 = pix2 + line_size; uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero = const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0); (const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two = const vector unsigned short two =
...@@ -163,7 +163,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -163,7 +163,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vector unsigned char perm1, perm2, pix2v, pix2iv; vector unsigned char perm1, perm2, pix2v, pix2iv;
GET_PERM(perm1, perm2, pix2); GET_PERM(perm1, perm2, pix2);
/* Due to the fact that pix3 = pix2 + line_size, the pix3 of one /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this * iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, as well * fact to avoid a potentially expensive unaligned read, as well
* as some splitting, and vector addition each time around the loop. * as some splitting, and vector addition each time around the loop.
...@@ -219,8 +219,8 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -219,8 +219,8 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */ /* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad); sad = vec_sum4s(t5, sad);
pix1 += line_size; pix1 += stride;
pix3 += line_size; pix3 += stride;
/* Transfer the calculated values for pix3 into pix2. */ /* Transfer the calculated values for pix3 into pix2. */
t1 = t3; t1 = t3;
t2 = t4; t2 = t4;
...@@ -234,7 +234,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -234,7 +234,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
} }
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s; int __attribute__((aligned(16))) s;
...@@ -256,8 +256,8 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -256,8 +256,8 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */ /* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad); sad = vec_sum4s(t5, sad);
pix1 += line_size; pix1 += stride;
pix2 += line_size; pix2 += stride;
} }
/* Sum up the four partial sums, and put the result into s. */ /* Sum up the four partial sums, and put the result into s. */
...@@ -269,7 +269,7 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -269,7 +269,7 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
} }
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s; int __attribute__((aligned(16))) s;
...@@ -298,8 +298,8 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -298,8 +298,8 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */ /* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad); sad = vec_sum4s(t5, sad);
pix1 += line_size; pix1 += stride;
pix2 += line_size; pix2 += stride;
} }
/* Sum up the four partial sums, and put the result into s. */ /* Sum up the four partial sums, and put the result into s. */
...@@ -313,7 +313,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -313,7 +313,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced. /* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */ * It's the sad8_altivec code above w/ squaring added. */
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s; int __attribute__((aligned(16))) s;
...@@ -343,8 +343,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -343,8 +343,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Square the values and add them to our sum. */ /* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum); sum = vec_msum(t5, t5, sum);
pix1 += line_size; pix1 += stride;
pix2 += line_size; pix2 += stride;
} }
/* Sum up the four partial sums, and put the result into s. */ /* Sum up the four partial sums, and put the result into s. */
...@@ -358,7 +358,7 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -358,7 +358,7 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced. /* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */ * It's the sad16_altivec code above w/ squaring added. */
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
int __attribute__((aligned(16))) s; int __attribute__((aligned(16))) s;
...@@ -383,8 +383,8 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -383,8 +383,8 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Square the values and add them to our sum. */ /* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum); sum = vec_msum(t5, t5, sum);
pix1 += line_size; pix1 += stride;
pix2 += line_size; pix2 += stride;
} }
/* Sum up the four partial sums, and put the result into s. */ /* Sum up the four partial sums, and put the result into s. */
...@@ -396,7 +396,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ...@@ -396,7 +396,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
} }
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst, static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h) uint8_t *src, ptrdiff_t stride, int h)
{ {
int __attribute__((aligned(16))) sum; int __attribute__((aligned(16))) sum;
register const vector unsigned char vzero = register const vector unsigned char vzero =
...@@ -522,7 +522,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst, ...@@ -522,7 +522,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
* but xlc goes to around 660 on the regular C code... * but xlc goes to around 660 on the regular C code...
*/ */
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst, static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h) uint8_t *src, ptrdiff_t stride, int h)
{ {
int __attribute__((aligned(16))) sum; int __attribute__((aligned(16))) sum;
register vector signed short register vector signed short
...@@ -713,7 +713,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst, ...@@ -713,7 +713,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
} }
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst, static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h) uint8_t *src, ptrdiff_t stride, int h)
{ {
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8); int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
......
...@@ -745,7 +745,7 @@ void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, ...@@ -745,7 +745,7 @@ void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
decomposition_count, y); decomposition_count, y);
} }
static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size,
int w, int h, int type) int w, int h, int type)
{ {
int s, i, j; int s, i, j;
...@@ -814,32 +814,32 @@ static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, in ...@@ -814,32 +814,32 @@ static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, in
return s >> 9; return s >> 9;
} }
static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 8, h, 1); return w_c(v, pix1, pix2, line_size, 8, h, 1);
} }
static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 8, h, 0); return w_c(v, pix1, pix2, line_size, 8, h, 0);
} }
static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 16, h, 1); return w_c(v, pix1, pix2, line_size, 16, h, 1);
} }
static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 16, h, 0); return w_c(v, pix1, pix2, line_size, 16, h, 0);
} }
int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 32, h, 1); return w_c(v, pix1, pix2, line_size, 32, h, 1);
} }
int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 32, h, 0); return w_c(v, pix1, pix2, line_size, 32, h, 0);
} }
......
...@@ -105,8 +105,8 @@ void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride, ...@@ -105,8 +105,8 @@ void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
int src_y, int src_stride, slice_buffer *sb, int src_y, int src_stride, slice_buffer *sb,
int add, uint8_t *dst8); int add, uint8_t *dst8);
int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride, void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
int type, int decomposition_count); int type, int decomposition_count);
......
...@@ -215,7 +215,7 @@ hadamard8_16_wrapper %1, 3 ...@@ -215,7 +215,7 @@ hadamard8_16_wrapper %1, 3
%elif cpuflag(mmx) %elif cpuflag(mmx)
ALIGN 16 ALIGN 16
; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, ; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
; uint8_t *src2, int stride, int h) ; uint8_t *src2, ptrdiff_t stride, int h)
; r0 = void *s = unused, int h = unused (always 8) ; r0 = void *s = unused, int h = unused (always 8)
; note how r1, r2 and r3 are not clobbered in this function, so 16x16 ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
; can simply call this 2x2x (and that's why we access rsp+gprsize ; can simply call this 2x2x (and that's why we access rsp+gprsize
...@@ -280,7 +280,7 @@ INIT_XMM ssse3 ...@@ -280,7 +280,7 @@ INIT_XMM ssse3
HADAMARD8_DIFF 9 HADAMARD8_DIFF 9
; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
; int line_size, int h) ; ptrdiff_t line_size, int h)
%macro SUM_SQUARED_ERRORS 1 %macro SUM_SQUARED_ERRORS 1
cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h
...@@ -395,7 +395,7 @@ INIT_XMM ssse3 ...@@ -395,7 +395,7 @@ INIT_XMM ssse3
SUM_ABS_DCTELEM 6, 2 SUM_ABS_DCTELEM 6, 2
;------------------------------------------------------------------------------ ;------------------------------------------------------------------------------
; int ff_hf_noise*_mmx(uint8_t *pix1, int lsize, int h) ; int ff_hf_noise*_mmx(uint8_t *pix1, ptrdiff_t lsize, int h)
;------------------------------------------------------------------------------ ;------------------------------------------------------------------------------
; %1 = 8/16. %2-5=m# ; %1 = 8/16. %2-5=m#
%macro HF_NOISE_PART1 5 %macro HF_NOISE_PART1 5
...@@ -437,7 +437,6 @@ SUM_ABS_DCTELEM 6, 2 ...@@ -437,7 +437,6 @@ SUM_ABS_DCTELEM 6, 2
; %1 = 8/16 ; %1 = 8/16
%macro HF_NOISE 1 %macro HF_NOISE 1
cglobal hf_noise%1, 3,3,0, pix1, lsize, h cglobal hf_noise%1, 3,3,0, pix1, lsize, h
movsxdifnidn lsizeq, lsized
sub hd, 2 sub hd, 2
pxor m7, m7 pxor m7, m7
pxor m6, m6 pxor m6, m6
...@@ -472,7 +471,7 @@ HF_NOISE 8 ...@@ -472,7 +471,7 @@ HF_NOISE 8
HF_NOISE 16 HF_NOISE 16
;--------------------------------------------------------------------------------------- ;---------------------------------------------------------------------------------------
;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h); ;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;--------------------------------------------------------------------------------------- ;---------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD 1 %macro SAD 1
...@@ -527,7 +526,7 @@ INIT_XMM sse2 ...@@ -527,7 +526,7 @@ INIT_XMM sse2
SAD 16 SAD 16
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h); ;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD_X2 1 %macro SAD_X2 1
...@@ -604,7 +603,7 @@ INIT_XMM sse2 ...@@ -604,7 +603,7 @@ INIT_XMM sse2
SAD_X2 16 SAD_X2 16
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h); ;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD_Y2 1 %macro SAD_Y2 1
...@@ -674,7 +673,7 @@ INIT_XMM sse2 ...@@ -674,7 +673,7 @@ INIT_XMM sse2
SAD_Y2 16 SAD_Y2 16
;------------------------------------------------------------------------------------------- ;-------------------------------------------------------------------------------------------
;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h); ;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------- ;-------------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD_APPROX_XY2 1 %macro SAD_APPROX_XY2 1
...@@ -776,7 +775,7 @@ SAD_APPROX_XY2 16 ...@@ -776,7 +775,7 @@ SAD_APPROX_XY2 16
;-------------------------------------------------------------------- ;--------------------------------------------------------------------
;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
; int line_size, int h); ; ptrdiff_t line_size, int h);
;-------------------------------------------------------------------- ;--------------------------------------------------------------------
; %1 = 8/16 ; %1 = 8/16
%macro VSAD_INTRA 1 %macro VSAD_INTRA 1
...@@ -837,7 +836,7 @@ VSAD_INTRA 16 ...@@ -837,7 +836,7 @@ VSAD_INTRA 16
;--------------------------------------------------------------------- ;---------------------------------------------------------------------
;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
; int line_size, int h); ; ptrdiff_t line_size, int h);
;--------------------------------------------------------------------- ;---------------------------------------------------------------------
; %1 = 8/16 ; %1 = 8/16
%macro VSAD_APPROX 1 %macro VSAD_APPROX 1
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment