Commit 9c12c6ff authored by Vittorio Giovara's avatar Vittorio Giovara

motion_est: convert stride to ptrdiff_t

CC: libav-stable@libav.org
Bug-Id: CID 700556 / CID 700557 / CID 700558
parent f856d9c2
......@@ -26,17 +26,17 @@
#include "libavcodec/mpegvideo.h"
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h);
ptrdiff_t stride, int h);
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h);
ptrdiff_t stride, int h);
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h);
ptrdiff_t stride, int h);
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h);
ptrdiff_t stride, int h);
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
int line_size, int h);
ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
{
......
This diff is collapsed.
......@@ -33,7 +33,8 @@ struct MpegEncContext;
* width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c,
uint8_t *blk1 /* align width (8 or 16) */,
uint8_t *blk2 /* align 1 */, int line_size, int h);
uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
int h);
typedef struct MECmpContext {
int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
......
......@@ -36,7 +36,7 @@
#if HAVE_ALTIVEC
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s = 0;
const vector unsigned char zero =
......@@ -66,8 +66,8 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
pix1 += line_size;
pix2 += line_size;
pix1 += stride;
pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
......@@ -78,7 +78,7 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s = 0;
const vector unsigned char zero =
......@@ -87,9 +87,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vector unsigned char pix1v, pix3v, avgv, t5;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
uint8_t *pix3 = pix2 + line_size;
uint8_t *pix3 = pix2 + stride;
/* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, each
* time around the loop.
......@@ -119,9 +119,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
pix1 += line_size;
pix1 += stride;
pix2v = pix3v;
pix3 += line_size;
pix3 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
......@@ -132,10 +132,10 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s = 0;
uint8_t *pix3 = pix2 + line_size;
uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two =
......@@ -149,7 +149,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
/* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, as well
* as some splitting, and vector addition each time around the loop.
......@@ -212,8 +212,8 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
pix1 += line_size;
pix3 += line_size;
pix1 += stride;
pix3 += stride;
/* Transfer the calculated values for pix3 into pix2. */
t1 = t3;
t2 = t4;
......@@ -227,7 +227,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
......@@ -251,8 +251,8 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
pix1 += line_size;
pix2 += line_size;
pix1 += stride;
pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
......@@ -264,7 +264,7 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
......@@ -298,8 +298,8 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
pix1 += line_size;
pix2 += line_size;
pix1 += stride;
pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
......@@ -313,7 +313,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
......@@ -350,8 +350,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
pix1 += line_size;
pix2 += line_size;
pix1 += stride;
pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
......@@ -365,7 +365,7 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
......@@ -392,8 +392,8 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
pix1 += line_size;
pix2 += line_size;
pix1 += stride;
pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
......@@ -405,7 +405,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h)
uint8_t *src, ptrdiff_t stride, int h)
{
int sum;
register const vector unsigned char vzero =
......@@ -534,7 +534,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
* but xlc goes to around 660 on the regular C code...
*/
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h)
uint8_t *src, ptrdiff_t stride, int h)
{
int sum;
register vector signed short
......@@ -731,7 +731,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
}
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h)
uint8_t *src, ptrdiff_t stride, int h)
{
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment