Commit e3905ce0 authored by Diego Biurrun's avatar Diego Biurrun

cosmetics: Reformat PPC code in libavcodec according to style guidelines.

This includes indentation changes, comment reformatting, consistent brace
placement and some prettyprinting.

Originally committed as revision 14316 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 41f5c62f
This diff is collapsed.
......@@ -31,40 +31,40 @@ void powerpc_display_perf_report(void);
/* if you add to the enum below, also add to the perfname array
in dsputil_ppc.c */
enum powerpc_perf_index {
altivec_fft_num = 0,
altivec_gmc1_num,
altivec_dct_unquantize_h263_num,
altivec_fdct,
altivec_idct_add_num,
altivec_idct_put_num,
altivec_put_pixels16_num,
altivec_avg_pixels16_num,
altivec_avg_pixels8_num,
altivec_put_pixels8_xy2_num,
altivec_put_no_rnd_pixels8_xy2_num,
altivec_put_pixels16_xy2_num,
altivec_put_no_rnd_pixels16_xy2_num,
altivec_hadamard8_diff8x8_num,
altivec_hadamard8_diff16_num,
altivec_avg_pixels8_xy2_num,
powerpc_clear_blocks_dcbz32,
powerpc_clear_blocks_dcbz128,
altivec_put_h264_chroma_mc8_num,
altivec_avg_h264_chroma_mc8_num,
altivec_put_h264_qpel16_h_lowpass_num,
altivec_avg_h264_qpel16_h_lowpass_num,
altivec_put_h264_qpel16_v_lowpass_num,
altivec_avg_h264_qpel16_v_lowpass_num,
altivec_put_h264_qpel16_hv_lowpass_num,
altivec_avg_h264_qpel16_hv_lowpass_num,
powerpc_perf_total
altivec_fft_num = 0,
altivec_gmc1_num,
altivec_dct_unquantize_h263_num,
altivec_fdct,
altivec_idct_add_num,
altivec_idct_put_num,
altivec_put_pixels16_num,
altivec_avg_pixels16_num,
altivec_avg_pixels8_num,
altivec_put_pixels8_xy2_num,
altivec_put_no_rnd_pixels8_xy2_num,
altivec_put_pixels16_xy2_num,
altivec_put_no_rnd_pixels16_xy2_num,
altivec_hadamard8_diff8x8_num,
altivec_hadamard8_diff16_num,
altivec_avg_pixels8_xy2_num,
powerpc_clear_blocks_dcbz32,
powerpc_clear_blocks_dcbz128,
altivec_put_h264_chroma_mc8_num,
altivec_avg_h264_chroma_mc8_num,
altivec_put_h264_qpel16_h_lowpass_num,
altivec_avg_h264_qpel16_h_lowpass_num,
altivec_put_h264_qpel16_v_lowpass_num,
altivec_avg_h264_qpel16_v_lowpass_num,
altivec_put_h264_qpel16_hv_lowpass_num,
altivec_avg_h264_qpel16_hv_lowpass_num,
powerpc_perf_total
};
enum powerpc_data_index {
powerpc_data_min = 0,
powerpc_data_max,
powerpc_data_sum,
powerpc_data_num,
powerpc_data_total
powerpc_data_min = 0,
powerpc_data_max,
powerpc_data_sum,
powerpc_data_num,
powerpc_data_total
};
extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][powerpc_data_total];
......@@ -105,45 +105,42 @@ extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][
#define POWERPC_GET_PMC6(a) do {} while (0)
#endif
#endif /* HAVE_PPC64 */
#define POWERPC_PERF_DECLARE(a, cond) \
POWERP_PMC_DATATYPE \
pmc_start[POWERPC_NUM_PMC_ENABLED], \
pmc_stop[POWERPC_NUM_PMC_ENABLED], \
pmc_loop_index;
#define POWERPC_PERF_DECLARE(a, cond) \
POWERP_PMC_DATATYPE \
pmc_start[POWERPC_NUM_PMC_ENABLED], \
pmc_stop[POWERPC_NUM_PMC_ENABLED], \
pmc_loop_index;
#define POWERPC_PERF_START_COUNT(a, cond) do { \
POWERPC_GET_PMC6(pmc_start[5]); \
POWERPC_GET_PMC5(pmc_start[4]); \
POWERPC_GET_PMC4(pmc_start[3]); \
POWERPC_GET_PMC3(pmc_start[2]); \
POWERPC_GET_PMC2(pmc_start[1]); \
POWERPC_GET_PMC1(pmc_start[0]); \
} while (0)
POWERPC_GET_PMC6(pmc_start[5]); \
POWERPC_GET_PMC5(pmc_start[4]); \
POWERPC_GET_PMC4(pmc_start[3]); \
POWERPC_GET_PMC3(pmc_start[2]); \
POWERPC_GET_PMC2(pmc_start[1]); \
POWERPC_GET_PMC1(pmc_start[0]); \
} while (0)
#define POWERPC_PERF_STOP_COUNT(a, cond) do { \
POWERPC_GET_PMC1(pmc_stop[0]); \
POWERPC_GET_PMC2(pmc_stop[1]); \
POWERPC_GET_PMC3(pmc_stop[2]); \
POWERPC_GET_PMC4(pmc_stop[3]); \
POWERPC_GET_PMC5(pmc_stop[4]); \
POWERPC_GET_PMC6(pmc_stop[5]); \
if (cond) \
{ \
for(pmc_loop_index = 0; \
pmc_loop_index < POWERPC_NUM_PMC_ENABLED; \
pmc_loop_index++) \
{ \
if (pmc_stop[pmc_loop_index] >= pmc_start[pmc_loop_index]) \
{ \
POWERP_PMC_DATATYPE diff = \
pmc_stop[pmc_loop_index] - pmc_start[pmc_loop_index]; \
if (diff < perfdata[pmc_loop_index][a][powerpc_data_min]) \
perfdata[pmc_loop_index][a][powerpc_data_min] = diff; \
if (diff > perfdata[pmc_loop_index][a][powerpc_data_max]) \
perfdata[pmc_loop_index][a][powerpc_data_max] = diff; \
perfdata[pmc_loop_index][a][powerpc_data_sum] += diff; \
perfdata[pmc_loop_index][a][powerpc_data_num] ++; \
} \
} \
} \
POWERPC_GET_PMC1(pmc_stop[0]); \
POWERPC_GET_PMC2(pmc_stop[1]); \
POWERPC_GET_PMC3(pmc_stop[2]); \
POWERPC_GET_PMC4(pmc_stop[3]); \
POWERPC_GET_PMC5(pmc_stop[4]); \
POWERPC_GET_PMC6(pmc_stop[5]); \
if (cond) { \
for(pmc_loop_index = 0; \
pmc_loop_index < POWERPC_NUM_PMC_ENABLED; \
pmc_loop_index++) { \
if (pmc_stop[pmc_loop_index] >= pmc_start[pmc_loop_index]) { \
POWERP_PMC_DATATYPE diff = \
pmc_stop[pmc_loop_index] - pmc_start[pmc_loop_index]; \
if (diff < perfdata[pmc_loop_index][a][powerpc_data_min]) \
perfdata[pmc_loop_index][a][powerpc_data_min] = diff; \
if (diff > perfdata[pmc_loop_index][a][powerpc_data_max]) \
perfdata[pmc_loop_index][a][powerpc_data_max] = diff; \
perfdata[pmc_loop_index][a][powerpc_data_sum] += diff; \
perfdata[pmc_loop_index][a][powerpc_data_num] ++; \
} \
} \
} \
} while (0)
#else /* CONFIG_POWERPC_PERF */
// those are needed to avoid empty statements.
......
......@@ -33,21 +33,21 @@
/* butter fly op */
#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
{\
FFTSample ax, ay, bx, by;\
bx=pre1;\
by=pim1;\
ax=qre1;\
ay=qim1;\
pre = (bx + ax);\
pim = (by + ay);\
qre = (bx - ax);\
qim = (by - ay);\
FFTSample ax, ay, bx, by;\
bx=pre1;\
by=pim1;\
ax=qre1;\
ay=qim1;\
pre = (bx + ax);\
pim = (by + ay);\
qre = (bx - ax);\
qim = (by - ay);\
}
#define MUL16(a,b) ((a) * (b))
#define CMUL(pre, pim, are, aim, bre, bim) \
{\
pre = (MUL16(are, bre) - MUL16(aim, bim));\
pim = (MUL16(are, bim) + MUL16(bre, aim));\
pre = (MUL16(are, bre) - MUL16(aim, bim));\
pim = (MUL16(are, bim) + MUL16(bre, aim));\
}
......@@ -85,14 +85,11 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
c1 = vcii(p,p,n,n);
if (s->inverse)
{
c2 = vcii(p,p,n,p);
}
else
{
c2 = vcii(p,p,p,n);
}
if (s->inverse) {
c2 = vcii(p,p,n,p);
} else {
c2 = vcii(p,p,p,n);
}
j = (np >> 2);
do {
......
......@@ -36,16 +36,16 @@ void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int str
{
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) =
{rounder, rounder, rounder, rounder,
rounder, rounder, rounder, rounder};
{rounder, rounder, rounder, rounder,
rounder, rounder, rounder, rounder};
const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
{
(16-x16)*(16-y16), /* A */
( x16)*(16-y16), /* B */
(16-x16)*( y16), /* C */
( x16)*( y16), /* D */
0, 0, 0, 0 /* padding */
};
{
(16-x16)*(16-y16), /* A */
( x16)*(16-y16), /* B */
(16-x16)*( y16), /* C */
( x16)*( y16), /* D */
0, 0, 0, 0 /* padding */
};
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
......@@ -74,73 +74,67 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
src_1 = vec_ld(16, src);
srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
if (src_really_odd != 0x0000000F)
{ // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
}
else
{
srcvB = src_1;
if (src_really_odd != 0x0000000F) {
// if src & 0xF == 0xF, then (src+1) is properly aligned
// on the second vector.
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
} else {
srcvB = src_1;
}
srcvA = vec_mergeh(vczero, srcvA);
srcvB = vec_mergeh(vczero, srcvB);
for(i=0; i<h; i++)
{
dst_odd = (unsigned long)dst & 0x0000000F;
src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
dstv = vec_ld(0, dst);
// we we'll be able to pick-up our 9 char elements
// at src + stride from those 32 bytes
// then reuse the resulting 2 vectors srvcC and srcvD
// as the next srcvA and srcvB
src_0 = vec_ld(stride + 0, src);
src_1 = vec_ld(stride + 16, src);
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
if (src_really_odd != 0x0000000F)
{ // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
}
else
{
srcvD = src_1;
}
srcvC = vec_mergeh(vczero, srcvC);
srcvD = vec_mergeh(vczero, srcvD);
// OK, now we (finally) do the math :-)
// those four instructions replaces 32 int muls & 32 int adds.
// isn't AltiVec nice ?
tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
srcvA = srcvC;
srcvB = srcvD;
tempD = vec_sr(tempD, vcsr8);
dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
if (dst_odd)
{
dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
}
else
{
dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
}
vec_st(dstv2, 0, dst);
dst += stride;
src += stride;
for(i=0; i<h; i++) {
dst_odd = (unsigned long)dst & 0x0000000F;
src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
dstv = vec_ld(0, dst);
// we we'll be able to pick-up our 9 char elements
// at src + stride from those 32 bytes
// then reuse the resulting 2 vectors srvcC and srcvD
// as the next srcvA and srcvB
src_0 = vec_ld(stride + 0, src);
src_1 = vec_ld(stride + 16, src);
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
if (src_really_odd != 0x0000000F) {
// if src & 0xF == 0xF, then (src+1) is properly aligned
// on the second vector.
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
} else {
srcvD = src_1;
}
srcvC = vec_mergeh(vczero, srcvC);
srcvD = vec_mergeh(vczero, srcvD);
// OK, now we (finally) do the math :-)
// those four instructions replaces 32 int muls & 32 int adds.
// isn't AltiVec nice ?
tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
srcvA = srcvC;
srcvB = srcvD;
tempD = vec_sr(tempD, vcsr8);
dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
if (dst_odd) {
dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
} else {
dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
}
vec_st(dstv2, 0, dst);
dst += stride;
src += stride;
}
POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
......
......@@ -196,7 +196,7 @@ void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride
const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
LOAD_ZERO;
const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
const vec_u16_t v6us = vec_splat_u16(6);
const vec_u16_t v6us = vec_splat_u16(6);
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
......@@ -392,8 +392,8 @@ static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
*/
H264_MC(put_, 16, altivec)
H264_MC(avg_, 16, altivec)
H264_MC(put_, 16, altivec)
H264_MC(avg_, 16, altivec)
/****************************************************************************
......@@ -685,9 +685,9 @@ static inline void write16x4(uint8_t *dst, int dst_stride,
r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
\
/*Third merge*/ \
r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
......
This diff is collapsed.
......@@ -22,7 +22,6 @@
* NOTE: This code is based on GPL code from the libmpeg2 project. The
* author, Michel Lespinasses, has given explicit permission to release
* under LGPL as part of ffmpeg.
*
*/
/*
......
......@@ -46,8 +46,7 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
vector signed short zeros, sumhv, sumlv;
s = src;
for(i=0;i<4;i++)
{
for(i=0;i<4;i++) {
/*
The vec_madds later on does an implicit >>15 on the result.
Since FILTER_BITS is 8, and we have 15 bits of magnitude in
......@@ -86,13 +85,11 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
/* Do our altivec resampling on 16 pixels at once. */
while(dst_width>=16) {
/*
Read 16 (potentially unaligned) bytes from each of
/* Read 16 (potentially unaligned) bytes from each of
4 lines into 4 vectors, and split them into shorts.
Interleave the multipy/accumulate for the resample
filter with the loads to hide the 3 cycle latency
the vec_madds have.
*/
the vec_madds have. */
tv = (vector unsigned char *) &s[0 * wrap];
tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
......@@ -121,10 +118,8 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
/*
Pack the results into our destination vector,
and do an aligned write of that back to memory.
*/
/* Pack the results into our destination vector,
and do an aligned write of that back to memory. */
dstv = vec_packsu(sumhv, sumlv) ;
vec_st(dstv, 0, (vector unsigned char *) dst);
......@@ -133,10 +128,8 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
dst_width-=16;
}
/*
If there are any leftover pixels, resample them
with the slow scalar method.
*/
/* If there are any leftover pixels, resample them
with the slow scalar method. */
while(dst_width>0) {
sum = s[0 * wrap] * filter[0] +
s[1 * wrap] * filter[1] +
......
......@@ -38,7 +38,7 @@ static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
vector signed short vpix2, vdiff, vpix1l,vpix1h;
union { vector signed int vscore;
int32_t score[4];
} u;
} u;
u.vscore = vec_splat_s32(0);
//
//XXX lazy way, fix it later
......
......@@ -25,14 +25,14 @@
#if defined(ARCH_POWERPC_405)
/* signed 16x16 -> 32 multiply add accumulate */
# define MAC16(rt, ra, rb) \
asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
#define MAC16(rt, ra, rb) \
asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
/* signed 16x16 -> 32 multiply */
# define MUL16(ra, rb) \
({ int __rt; \
asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \
__rt; })
#define MUL16(ra, rb) \
({ int __rt; \
asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \
__rt; })
#endif
#endif /* FFMPEG_PPC_MATHOPS_H */
This diff is collapsed.
......@@ -379,8 +379,7 @@ void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
v4=(vector signed int *)b4;
v5=(vector signed int *)b5;
for (i=0; i< w4;i++)
{
for (i=0; i< w4;i++) {
#if 0
b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
......@@ -782,8 +781,8 @@ void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
{
#if 0
c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
#endif
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment