Commit 295f3737 authored by Aurelien Jacobs's avatar Aurelien Jacobs

various simplifications around recent av_clip_int16() usage

Originally committed as revision 10080 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent b0f33828
...@@ -208,7 +208,7 @@ static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, sho ...@@ -208,7 +208,7 @@ static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, sho
{ {
int delta = sample - c->prev_sample; int delta = sample - c->prev_sample;
int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
c->prev_sample = c->prev_sample + ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
c->prev_sample = av_clip_int16(c->prev_sample); c->prev_sample = av_clip_int16(c->prev_sample);
c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
return nibble; return nibble;
...@@ -228,10 +228,9 @@ static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, shor ...@@ -228,10 +228,9 @@ static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, shor
nibble= av_clip(nibble, -8, 7)&0x0F; nibble= av_clip(nibble, -8, 7)&0x0F;
predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
predictor = av_clip_int16(predictor);
c->sample2 = c->sample1; c->sample2 = c->sample1;
c->sample1 = predictor; c->sample1 = av_clip_int16(predictor);
c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
if (c->idelta < 16) c->idelta = 16; if (c->idelta < 16) c->idelta = 16;
...@@ -252,7 +251,7 @@ static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, ...@@ -252,7 +251,7 @@ static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
c->predictor = c->predictor + ((c->step * yamaha_difflookup[nibble]) / 8); c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
c->predictor = av_clip_int16(c->predictor); c->predictor = av_clip_int16(c->predictor);
c->step = (c->step * yamaha_indexscale[nibble]) >> 8; c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
c->step = av_clip(c->step, 127, 24567); c->step = av_clip(c->step, 127, 24567);
...@@ -670,11 +669,10 @@ static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, ...@@ -670,11 +669,10 @@ static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble,
if (sign) predictor -= diff; if (sign) predictor -= diff;
else predictor += diff; else predictor += diff;
predictor = av_clip_int16(predictor); c->predictor = av_clip_int16(predictor);
c->predictor = predictor;
c->step_index = step_index; c->step_index = step_index;
return (short)predictor; return (short)c->predictor;
} }
static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
...@@ -683,14 +681,13 @@ static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) ...@@ -683,14 +681,13 @@ static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256; predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
predictor = av_clip_int16(predictor);
c->sample2 = c->sample1; c->sample2 = c->sample1;
c->sample1 = predictor; c->sample1 = av_clip_int16(predictor);
c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
if (c->idelta < 16) c->idelta = 16; if (c->idelta < 16) c->idelta = 16;
return (short)predictor; return c->sample1;
} }
static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
...@@ -719,9 +716,8 @@ static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) ...@@ -719,9 +716,8 @@ static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
if(c->step > 32767) if(c->step > 32767)
c->step = 32767; c->step = 32767;
predictor = av_clip_int16(predictor); c->predictor = av_clip_int16(predictor);
c->predictor = predictor; return (short)c->predictor;
return (short)predictor;
} }
static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift) static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
...@@ -789,11 +785,10 @@ static void xa_decode(short *out, const unsigned char *in, ...@@ -789,11 +785,10 @@ static void xa_decode(short *out, const unsigned char *in,
t = (signed char)(d<<4)>>4; t = (signed char)(d<<4)>>4;
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
s = av_clip_int16(s);
*out = s;
out += inc;
s_2 = s_1; s_2 = s_1;
s_1 = s; s_1 = av_clip_int16(s);
*out = s_1;
out += inc;
} }
if (inc==2) { /* stereo */ if (inc==2) { /* stereo */
...@@ -815,11 +810,10 @@ static void xa_decode(short *out, const unsigned char *in, ...@@ -815,11 +810,10 @@ static void xa_decode(short *out, const unsigned char *in,
t = (signed char)d >> 4; t = (signed char)d >> 4;
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
s = av_clip_int16(s);
*out = s;
out += inc;
s_2 = s_1; s_2 = s_1;
s_1 = s; s_1 = av_clip_int16(s);
*out = s_1;
out += inc;
} }
if (inc==2) { /* stereo */ if (inc==2) { /* stereo */
...@@ -1181,13 +1175,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, ...@@ -1181,13 +1175,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
next_right_sample = (next_right_sample + next_right_sample = (next_right_sample +
(current_right_sample * coeff1r) + (current_right_sample * coeff1r) +
(previous_right_sample * coeff2r) + 0x80) >> 8; (previous_right_sample * coeff2r) + 0x80) >> 8;
next_left_sample = av_clip_int16(next_left_sample);
next_right_sample = av_clip_int16(next_right_sample);
previous_left_sample = current_left_sample; previous_left_sample = current_left_sample;
current_left_sample = next_left_sample; current_left_sample = av_clip_int16(next_left_sample);
previous_right_sample = current_right_sample; previous_right_sample = current_right_sample;
current_right_sample = next_right_sample; current_right_sample = av_clip_int16(next_right_sample);
*samples++ = (unsigned short)current_left_sample; *samples++ = (unsigned short)current_left_sample;
*samples++ = (unsigned short)current_right_sample; *samples++ = (unsigned short)current_right_sample;
} }
...@@ -1386,8 +1378,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, ...@@ -1386,8 +1378,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
sampledat = ((prev[ch][0]*factor1 sampledat = ((prev[ch][0]*factor1
+ prev[ch][1]*factor2) >> 11) + (sampledat>>exp); + prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
sampledat = av_clip_int16(sampledat); *samples = av_clip_int16(sampledat);
*samples = sampledat;
prev[ch][1] = prev[ch][0]; prev[ch][1] = prev[ch][0];
prev[ch][0] = *samples++; prev[ch][0] = *samples++;
......
...@@ -108,19 +108,17 @@ static void adx_decode(short *out,const unsigned char *in,PREV *prev) ...@@ -108,19 +108,17 @@ static void adx_decode(short *out,const unsigned char *in,PREV *prev)
// d>>=4; if (d&8) d-=16; // d>>=4; if (d&8) d-=16;
d = ((signed char)d >> 4); d = ((signed char)d >> 4);
s0 = (BASEVOL*d*scale + SCALE1*s1 - SCALE2*s2)>>14; s0 = (BASEVOL*d*scale + SCALE1*s1 - SCALE2*s2)>>14;
s0 = av_clip_int16(s0);
*out++=s0;
s2 = s1; s2 = s1;
s1 = s0; s1 = av_clip_int16(s0);
*out++=s1;
d = in[i]; d = in[i];
//d&=15; if (d&8) d-=16; //d&=15; if (d&8) d-=16;
d = ((signed char)(d<<4) >> 4); d = ((signed char)(d<<4) >> 4);
s0 = (BASEVOL*d*scale + SCALE1*s1 - SCALE2*s2)>>14; s0 = (BASEVOL*d*scale + SCALE1*s1 - SCALE2*s2)>>14;
s0 = av_clip_int16(s0);
*out++=s0;
s2 = s1; s2 = s1;
s1 = s0; s1 = av_clip_int16(s0);
*out++=s1;
} }
prev->s1 = s1; prev->s1 = s1;
prev->s2 = s2; prev->s2 = s2;
......
...@@ -120,12 +120,7 @@ static int a52_decode_init(AVCodecContext *avctx) ...@@ -120,12 +120,7 @@ static int a52_decode_init(AVCodecContext *avctx)
return 0; return 0;
} }
/**** the following two functions comes from a52dec */ /**** the following function comes from a52dec */
static inline int blah (int32_t i)
{
return av_clip_int16(i - 0x43c00000);
}
static inline void float_to_int (float * _f, int16_t * s16, int nchannels) static inline void float_to_int (float * _f, int16_t * s16, int nchannels)
{ {
int i, j, c; int i, j, c;
...@@ -135,7 +130,7 @@ static inline void float_to_int (float * _f, int16_t * s16, int nchannels) ...@@ -135,7 +130,7 @@ static inline void float_to_int (float * _f, int16_t * s16, int nchannels)
nchannels *= 256; nchannels *= 256;
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
for (c = 0; c < nchannels; c += 256) for (c = 0; c < nchannels; c += 256)
s16[j++] = blah (f[i + c]); s16[j++] = av_clip_int16(f[i + c] - 0x43c00000);
} }
} }
......
...@@ -295,7 +295,7 @@ static int oggvorbis_decode_init(AVCodecContext *avccontext) { ...@@ -295,7 +295,7 @@ static int oggvorbis_decode_init(AVCodecContext *avccontext) {
static inline int conv(int samples, float **pcm, char *buf, int channels) { static inline int conv(int samples, float **pcm, char *buf, int channels) {
int i, j, val ; int i, j;
ogg_int16_t *ptr, *data = (ogg_int16_t*)buf ; ogg_int16_t *ptr, *data = (ogg_int16_t*)buf ;
float *mono ; float *mono ;
...@@ -304,12 +304,7 @@ static inline int conv(int samples, float **pcm, char *buf, int channels) { ...@@ -304,12 +304,7 @@ static inline int conv(int samples, float **pcm, char *buf, int channels) {
mono = pcm[i] ; mono = pcm[i] ;
for(j = 0 ; j < samples ; j++) { for(j = 0 ; j < samples ; j++) {
*ptr = av_clip_int16(mono[j] * 32767.f);
val = mono[j] * 32767.f;
val = av_clip_int16(val);
*ptr = val ;
ptr += channels; ptr += channels;
} }
} }
......
...@@ -430,7 +430,6 @@ static int ra144_decode_frame(AVCodecContext * avctx, ...@@ -430,7 +430,6 @@ static int ra144_decode_frame(AVCodecContext * avctx,
uint8_t * buf, int buf_size) uint8_t * buf, int buf_size)
{ {
unsigned int a,b,c; unsigned int a,b,c;
long s;
signed short *shptr; signed short *shptr;
unsigned int *lptr,*temp; unsigned int *lptr,*temp;
const short **dptr; const short **dptr;
...@@ -484,11 +483,8 @@ static int ra144_decode_frame(AVCodecContext * avctx, ...@@ -484,11 +483,8 @@ static int ra144_decode_frame(AVCodecContext * avctx,
glob->resetflag=0; glob->resetflag=0;
shptr=glob->output_buffer; shptr=glob->output_buffer;
while (shptr<glob->output_buffer+BLOCKSIZE) { while (shptr<glob->output_buffer+BLOCKSIZE)
s=*(shptr++)<<2; *data++=av_clip_int16(*(shptr++)<<2);
*data=av_clip_int16(s);
data++;
}
b+=30; b+=30;
} }
......
...@@ -712,7 +712,7 @@ static int wma_decode_block(WMACodecContext *s) ...@@ -712,7 +712,7 @@ static int wma_decode_block(WMACodecContext *s)
/* decode a frame of frame_len samples */ /* decode a frame of frame_len samples */
static int wma_decode_frame(WMACodecContext *s, int16_t *samples) static int wma_decode_frame(WMACodecContext *s, int16_t *samples)
{ {
int ret, i, n, a, ch, incr; int ret, i, n, ch, incr;
int16_t *ptr; int16_t *ptr;
float *iptr; float *iptr;
...@@ -739,9 +739,7 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples) ...@@ -739,9 +739,7 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples)
iptr = s->frame_out[ch]; iptr = s->frame_out[ch];
for(i=0;i<n;i++) { for(i=0;i<n;i++) {
a = lrintf(*iptr++); *ptr = av_clip_int16(lrintf(*iptr++));
a = av_clip_int16(a);
*ptr = a;
ptr += incr; ptr += incr;
} }
/* prepare for next block */ /* prepare for next block */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment