Commit 744ac4be authored by Michael Niedermayer's avatar Michael Niedermayer

dct_unquantize_h263_altivec by (Romain Dolbeau <dolbeaur at club-internet dot fr>)

Originally committed as revision 1455 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 67309e49
/*
* GMC (???)
* GMC (Global Motion Compensation)
* AltiVec-enabled
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
......
......@@ -503,3 +503,124 @@ int dct_quantize_altivec(MpegEncContext* s,
return lastNonZero;
}
/*
AltiVec version of dct_unquantize_h263
this code assumes `block' is 16 bytes-aligned
*/
void dct_unquantize_h263_altivec(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
int i, level, qmul, qadd;
int nCoeffs;
assert(s->block_last_index[n]>=0);
qadd = (qscale - 1) | 1;
qmul = qscale << 1;
if (s->mb_intra) {
if (!s->h263_aic) {
if (n < 4)
block[0] = block[0] * s->y_dc_scale;
else
block[0] = block[0] * s->c_dc_scale;
}else
qadd = 0;
i = 1;
nCoeffs= 63; //does not allways use zigzag table
} else {
i = 0;
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
}
#if 0
for(;i<=nCoeffs;i++) {
level = block[i];
if (level) {
if (level < 0) {
level = level * qmul - qadd;
} else {
level = level * qmul + qadd;
}
block[i] = level;
}
}
#else
{
register const vector short vczero = (const vector short)(0);
short __attribute__ ((aligned(16))) qmul8[] =
{
qmul, qmul, qmul, qmul,
qmul, qmul, qmul, qmul
};
short __attribute__ ((aligned(16))) qadd8[] =
{
qadd, qadd, qadd, qadd,
qadd, qadd, qadd, qadd
};
short __attribute__ ((aligned(16))) nqadd8[] =
{
-qadd, -qadd, -qadd, -qadd,
-qadd, -qadd, -qadd, -qadd
};
register vector short blockv, qmulv, qaddv, nqaddv, temp1;
register vector bool short blockv_null, blockv_neg;
register short backup_0 = block[0];
register int j = 0;
qmulv = vec_ld(0, qmul8);
qaddv = vec_ld(0, qadd8);
nqaddv = vec_ld(0, nqadd8);
// first make sure block[j] is 16 bytes-aligned
for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) {
level = block[j];
if (level) {
if (level < 0) {
level = level * qmul - qadd;
} else {
level = level * qmul + qadd;
}
block[j] = level;
}
}
// vectorize all the 16 bytes-aligned blocks
// of 8 elements
for(; (j + 7) <= nCoeffs ; j+=8)
{
blockv = vec_ld(j << 1, block);
blockv_neg = vec_cmplt(blockv, vczero);
blockv_null = vec_cmpeq(blockv, vczero);
// choose between +qadd or -qadd as the third operand
temp1 = vec_sel(qaddv, nqaddv, blockv_neg);
// multiply & add (block{i,i+7} * qmul [+-] qadd)
temp1 = vec_mladd(blockv, qmulv, temp1);
// put 0 where block[{i,i+7} used to have 0
blockv = vec_sel(temp1, blockv, blockv_null);
vec_st(blockv, j << 1, block);
}
// if nCoeffs isn't a multiple of 8, finish the job
// using good old scalar units.
// (we could do it using a truncated vector,
// but I'm not sure it's worth the hassle)
for(; j <= nCoeffs ; j++) {
level = block[j];
if (level) {
if (level < 0) {
level = level * qmul - qadd;
} else {
level = level * qmul + qadd;
}
block[j] = level;
}
}
if (i == 1)
{ // cheat. this avoid special-casing the first iteration
block[0] = backup_0;
}
}
#endif
}
......@@ -27,6 +27,8 @@
extern int dct_quantize_altivec(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow);
extern void dct_unquantize_h263_altivec(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
extern void idct_put_altivec(UINT8 *dest, int line_size, INT16 *block);
extern void idct_add_altivec(UINT8 *dest, int line_size, INT16 *block);
......@@ -66,6 +68,7 @@ void MPV_common_init_ppc(MpegEncContext *s)
(s->avctx->dct_algo == FF_DCT_ALTIVEC))
{
s->dct_quantize = dct_quantize_altivec;
s->dct_unquantize_h263 = dct_unquantize_h263_altivec;
}
} else
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment