Commit ad1c8dd6 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  fate: add dxtory test
  adx_parser: rewrite.
  adxdec: Validate channel count to fix a division by zero.
  adxdec: Do not require extradata.
  cmdutils: K&R reformatting cosmetics
  alacdec: implement the 2-pass prediction type.
  alacenc: implement the 2-pass prediction type.
  alacenc: do not generate invalid multi-channel ALAC files
  alacdec: fill in missing or guessed info about the extradata format.
  utvideo: proper median prediction for interlaced videos
  lavu: bump lavu minor for av_popcount64
  dca: K&R formatting cosmetics
  dct: K&R formatting cosmetics
  lavf: flush decoders in avformat_find_stream_info().
  win32: detect number of CPUs using affinity
  Add av_popcount64
  snow: Restore three mistakenly removed casts.

Conflicts:
	cmdutils.c
	doc/APIchanges
	libavcodec/adx_parser.c
	libavcodec/adxdec.c
	libavcodec/alacenc.c
	libavutil/avutil.h
	tests/fate/screen.mak
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents d6da16dc fd16f567
This diff is collapsed.
...@@ -1175,9 +1175,9 @@ HAVE_LIST=" ...@@ -1175,9 +1175,9 @@ HAVE_LIST="
fork fork
getaddrinfo getaddrinfo
gethrtime gethrtime
GetProcessAffinityMask
GetProcessMemoryInfo GetProcessMemoryInfo
GetProcessTimes GetProcessTimes
GetSystemInfo
getrusage getrusage
gnu_as gnu_as
ibm_asm ibm_asm
...@@ -3016,8 +3016,8 @@ check_func_headers windows.h PeekNamedPipe ...@@ -3016,8 +3016,8 @@ check_func_headers windows.h PeekNamedPipe
check_func_headers io.h setmode check_func_headers io.h setmode
check_func_headers lzo/lzo1x.h lzo1x_999_compress check_func_headers lzo/lzo1x.h lzo1x_999_compress
check_lib2 "windows.h psapi.h" GetProcessMemoryInfo -lpsapi check_lib2 "windows.h psapi.h" GetProcessMemoryInfo -lpsapi
check_func_headers windows.h GetProcessAffinityMask
check_func_headers windows.h GetProcessTimes check_func_headers windows.h GetProcessTimes
check_func_headers windows.h GetSystemInfo
check_func_headers windows.h MapViewOfFile check_func_headers windows.h MapViewOfFile
check_func_headers windows.h VirtualAlloc check_func_headers windows.h VirtualAlloc
......
...@@ -31,21 +31,24 @@ API changes, most recent first: ...@@ -31,21 +31,24 @@ API changes, most recent first:
2011-10-20 - b35e9e1 - lavu 51.22.0 2011-10-20 - b35e9e1 - lavu 51.22.0
Add av_strtok() to avstring.h. Add av_strtok() to avstring.h.
2011-01-03 - b73ec05 - lavu 51.21.0
Add av_popcount64
2011-12-18 - 8400b12 - lavc 53.28.1 2011-12-18 - 8400b12 - lavc 53.28.1
Deprecate AVFrame.age. The field is unused. Deprecate AVFrame.age. The field is unused.
2011-xx-xx - xxxxxxx - lavf 53.17.0 2011-12-12 - 5266045 - lavf 53.17.0
Add avformat_open_input(). Add avformat_close_input().
Deprecate av_close_input_file() and av_close_input_stream(). Deprecate av_close_input_file() and av_close_input_stream().
2011-xx-xx - xxxxxxx - lavc 53.25.0 2011-12-02 - 0eea212 - lavc 53.25.0
Add nb_samples and extended_data fields to AVFrame. Add nb_samples and extended_data fields to AVFrame.
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE. Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4(). Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
avcodec_decode_audio4() writes output samples to an AVFrame, which allows avcodec_decode_audio4() writes output samples to an AVFrame, which allows
audio decoders to use get_buffer(). audio decoders to use get_buffer().
2011-xx-xx - xxxxxxx - lavc 53.24.0 2011-12-04 - 560f773 - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump. Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump. Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
Change AVCodecContext.error[4] to [8] at next major bump. Change AVCodecContext.error[4] to [8] at next major bump.
......
...@@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf, ...@@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
/* channels */ /* channels */
avctx->channels = buf[7]; avctx->channels = buf[7];
if (avctx->channels > 2) if (avctx->channels <= 0 || avctx->channels > 2)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
/* sample rate */ /* sample rate */
......
...@@ -45,27 +45,31 @@ static int adx_parse(AVCodecParserContext *s1, ...@@ -45,27 +45,31 @@ static int adx_parse(AVCodecParserContext *s1,
ParseContext *pc = &s->pc; ParseContext *pc = &s->pc;
int next = END_NOT_FOUND; int next = END_NOT_FOUND;
int i; int i;
uint64_t state= pc->state64; uint64_t state = pc->state64;
if(!s->header_size){ if (!s->header_size) {
for(i=0; i<buf_size; i++){ for (i = 0; i < buf_size; i++) {
state= (state<<8) | buf[i]; state = (state << 8) | buf[i];
if((state&0xFFFF0000FFFFFF00) == 0x8000000003120400ULL && (state&0xFF) && ((state>>32)&0xFFFF)>=4){ /* check for fixed fields in ADX header for possible match */
s->header_size= ((state>>32)&0xFFFF) + 4; if ((state & 0xFFFF0000FFFFFF00) == 0x8000000003120400ULL) {
s->block_size = BLOCK_SIZE * (state&0xFF); int channels = state & 0xFF;
s->remaining = i - 7 + s->header_size + s->block_size; int header_size = ((state >> 32) & 0xFFFF) + 4;
break; if (channels > 0 && header_size >= 8) {
s->header_size = header_size;
s->block_size = BLOCK_SIZE * channels;
s->remaining = i - 7 + s->header_size + s->block_size;
break;
}
} }
} }
pc->state64= state; pc->state64 = state;
} }
if (s->header_size) { if (s->header_size) {
if (!s->remaining) { if (!s->remaining)
s->remaining = s->block_size; s->remaining = s->block_size;
} if (s->remaining <= buf_size) {
if (s->remaining<=buf_size) { next = s->remaining;
next= s->remaining;
s->remaining = 0; s->remaining = 0;
} else } else
s->remaining -= buf_size; s->remaining -= buf_size;
......
...@@ -45,7 +45,8 @@ static av_cold int adx_decode_init(AVCodecContext *avctx) ...@@ -45,7 +45,8 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n"); av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
c->channels = avctx->channels; c->channels = avctx->channels;
c->header_parsed = 1;
} }
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
...@@ -106,21 +107,21 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data, ...@@ -106,21 +107,21 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
return buf_size; return buf_size;
} }
if(AV_RB16(buf) == 0x8000){ if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) {
int header_size; int header_size;
if ((ret = avpriv_adx_decode_header(avctx, buf, if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size,
buf_size, &header_size,
c->coeff)) < 0) { c->coeff)) < 0) {
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n"); av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
c->channels = avctx->channels; c->channels = avctx->channels;
if(buf_size < header_size) c->header_parsed = 1;
if (buf_size < header_size)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
buf += header_size; buf += header_size;
buf_size -= header_size; buf_size -= header_size;
} }
if(c->channels <= 0) if (!c->header_parsed)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
/* calculate number of blocks in the packet */ /* calculate number of blocks in the packet */
......
...@@ -25,27 +25,23 @@ ...@@ -25,27 +25,23 @@
* @author 2005 David Hammerton * @author 2005 David Hammerton
* @see http://crazney.net/programs/itunes/alac.html * @see http://crazney.net/programs/itunes/alac.html
* *
* Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be * Note: This decoder expects a 36-byte QuickTime atom to be
* passed through the extradata[_size] fields. This atom is tacked onto * passed through the extradata[_size] fields. This atom is tacked onto
* the end of an 'alac' stsd atom and has the following format: * the end of an 'alac' stsd atom and has the following format:
* bytes 0-3 atom size (0x24), big-endian
* bytes 4-7 atom type ('alac', not the 'alac' tag from start of stsd)
* bytes 8-35 data bytes needed by decoder
* *
* Extradata: * 32bit atom size
* 32bit size * 32bit tag ("alac")
* 32bit tag (=alac) * 32bit tag version (0)
* 32bit zero? * 32bit samples per frame (used when not set explicitly in the frames)
* 32bit max sample per frame * 8bit compatible version (0)
* 8bit ?? (zero?)
* 8bit sample size * 8bit sample size
* 8bit history mult * 8bit history mult (40)
* 8bit initial history * 8bit initial history (14)
* 8bit kmodifier * 8bit kmodifier (10)
* 8bit channels? * 8bit channels
* 16bit ?? * 16bit maxRun (255)
* 32bit max coded frame size * 32bit max coded frame size (0 means unknown)
* 32bit bitrate? * 32bit average bitrate (0 means unknown)
* 32bit samplerate * 32bit samplerate
*/ */
...@@ -464,24 +460,29 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data, ...@@ -464,24 +460,29 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
if(ret<0) if(ret<0)
return ret; return ret;
if (prediction_type[ch] == 0) { /* adaptive FIR filter */
/* adaptive fir */ if (prediction_type[ch] == 15) {
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch], /* Prediction type 15 runs the adaptive FIR twice.
alac->outputsamples_buffer[ch], * The first pass uses the special-case coef_num = 31, while
outputsamples, * the second pass uses the coefs from the bitstream.
readsamplesize, *
predictor_coef_table[ch], * However, this prediction type is not currently used by the
predictor_coef_num[ch], * reference encoder.
prediction_quantitization[ch]);
} else {
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[ch]);
/* I think the only other prediction type (or perhaps this is
* just a boolean?) runs adaptive fir twice.. like:
* predictor_decompress_fir_adapt(predictor_error, tempout, ...)
* predictor_decompress_fir_adapt(predictor_error, outputsamples ...)
* little strange..
*/ */
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
alac->predicterror_buffer[ch],
outputsamples, readsamplesize,
NULL, 31, 0);
} else if (prediction_type[ch] > 0) {
av_log(avctx, AV_LOG_WARNING, "unknown prediction type: %i\n",
prediction_type[ch]);
} }
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
alac->outputsamples_buffer[ch],
outputsamples, readsamplesize,
predictor_coef_table[ch],
predictor_coef_num[ch],
prediction_quantitization[ch]);
} }
} else { } else {
/* not compressed, easy case */ /* not compressed, easy case */
...@@ -584,7 +585,7 @@ static int alac_set_info(ALACContext *alac) ...@@ -584,7 +585,7 @@ static int alac_set_info(ALACContext *alac)
ptr += 4; /* size */ ptr += 4; /* size */
ptr += 4; /* alac */ ptr += 4; /* alac */
ptr += 4; /* 0 ? */ ptr += 4; /* version */
if(AV_RB32(ptr) >= UINT_MAX/4){ if(AV_RB32(ptr) >= UINT_MAX/4){
av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n"); av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n");
...@@ -593,15 +594,15 @@ static int alac_set_info(ALACContext *alac) ...@@ -593,15 +594,15 @@ static int alac_set_info(ALACContext *alac)
/* buffer size / 2 ? */ /* buffer size / 2 ? */
alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr); alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr);
ptr++; /* ??? */ ptr++; /* compatible version */
alac->setinfo_sample_size = *ptr++; alac->setinfo_sample_size = *ptr++;
alac->setinfo_rice_historymult = *ptr++; alac->setinfo_rice_historymult = *ptr++;
alac->setinfo_rice_initialhistory = *ptr++; alac->setinfo_rice_initialhistory = *ptr++;
alac->setinfo_rice_kmodifier = *ptr++; alac->setinfo_rice_kmodifier = *ptr++;
alac->numchannels = *ptr++; alac->numchannels = *ptr++;
bytestream_get_be16(&ptr); /* ??? */ bytestream_get_be16(&ptr); /* maxRun */
bytestream_get_be32(&ptr); /* max coded frame size */ bytestream_get_be32(&ptr); /* max coded frame size */
bytestream_get_be32(&ptr); /* bitrate ? */ bytestream_get_be32(&ptr); /* average bitrate */
bytestream_get_be32(&ptr); /* samplerate */ bytestream_get_be32(&ptr); /* samplerate */
return 0; return 0;
......
...@@ -348,6 +348,7 @@ static void alac_entropy_coder(AlacEncodeContext *s) ...@@ -348,6 +348,7 @@ static void alac_entropy_coder(AlacEncodeContext *s)
static void write_compressed_frame(AlacEncodeContext *s) static void write_compressed_frame(AlacEncodeContext *s)
{ {
int i, j; int i, j;
int prediction_type = 0;
if (s->avctx->channels == 2) if (s->avctx->channels == 2)
alac_stereo_decorrelation(s); alac_stereo_decorrelation(s);
...@@ -358,7 +359,7 @@ static void write_compressed_frame(AlacEncodeContext *s) ...@@ -358,7 +359,7 @@ static void write_compressed_frame(AlacEncodeContext *s)
calc_predictor_params(s, i); calc_predictor_params(s, i);
put_bits(&s->pbctx, 4, 0); // prediction type : currently only type 0 has been RE'd put_bits(&s->pbctx, 4, prediction_type);
put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant); put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant);
put_bits(&s->pbctx, 3, s->rc.rice_modifier); put_bits(&s->pbctx, 3, s->rc.rice_modifier);
...@@ -373,6 +374,14 @@ static void write_compressed_frame(AlacEncodeContext *s) ...@@ -373,6 +374,14 @@ static void write_compressed_frame(AlacEncodeContext *s)
for (i = 0; i < s->avctx->channels; i++) { for (i = 0; i < s->avctx->channels; i++) {
alac_linear_predictor(s, i); alac_linear_predictor(s, i);
// TODO: determine when this will actually help. for now it's not used.
if (prediction_type == 15) {
// 2nd pass 1st order filter
for (j = s->avctx->frame_size - 1; j > 0; j--)
s->predictor_buf[j] -= s->predictor_buf[j - 1];
}
alac_entropy_coder(s); alac_entropy_coder(s);
} }
} }
...@@ -391,8 +400,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) ...@@ -391,8 +400,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
return -1; return -1;
} }
if(avctx->channels > 2) { /* TODO: Correctly implement multi-channel ALAC.
av_log(avctx, AV_LOG_ERROR, "channels > 2 not supported\n"); It is similar to multi-channel AAC, in that it has a series of
single-channel (SCE), channel-pair (CPE), and LFE elements. */
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
......
This diff is collapsed.
...@@ -28,15 +28,16 @@ ...@@ -28,15 +28,16 @@
*/ */
#include <math.h> #include <math.h>
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "dct.h" #include "dct.h"
#include "dct32.h" #include "dct32.h"
/* sin((M_PI * x / (2*n)) */ /* sin((M_PI * x / (2 * n)) */
#define SIN(s,n,x) (s->costab[(n) - (x)]) #define SIN(s, n, x) (s->costab[(n) - (x)])
/* cos((M_PI * x / (2*n)) */ /* cos((M_PI * x / (2 * n)) */
#define COS(s,n,x) (s->costab[x]) #define COS(s, n, x) (s->costab[x])
static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data) static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data)
{ {
...@@ -44,28 +45,28 @@ static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data) ...@@ -44,28 +45,28 @@ static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data)
int i; int i;
data[0] = 0; data[0] = 0;
for(i = 1; i < n/2; i++) { for (i = 1; i < n / 2; i++) {
float tmp1 = data[i ]; float tmp1 = data[i ];
float tmp2 = data[n - i]; float tmp2 = data[n - i];
float s = SIN(ctx, n, 2*i); float s = SIN(ctx, n, 2 * i);
s *= tmp1 + tmp2; s *= tmp1 + tmp2;
tmp1 = (tmp1 - tmp2) * 0.5f; tmp1 = (tmp1 - tmp2) * 0.5f;
data[i ] = s + tmp1; data[i] = s + tmp1;
data[n - i] = s - tmp1; data[n - i] = s - tmp1;
} }
data[n/2] *= 2; data[n / 2] *= 2;
ctx->rdft.rdft_calc(&ctx->rdft, data); ctx->rdft.rdft_calc(&ctx->rdft, data);
data[0] *= 0.5f; data[0] *= 0.5f;
for(i = 1; i < n-2; i += 2) { for (i = 1; i < n - 2; i += 2) {
data[i + 1] += data[i - 1]; data[i + 1] += data[i - 1];
data[i ] = -data[i + 2]; data[i] = -data[i + 2];
} }
data[n-1] = 0; data[n - 1] = 0;
} }
static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data)
...@@ -74,19 +75,19 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) ...@@ -74,19 +75,19 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data)
int i; int i;
float next = -0.5f * (data[0] - data[n]); float next = -0.5f * (data[0] - data[n]);
for(i = 0; i < n/2; i++) { for (i = 0; i < n / 2; i++) {
float tmp1 = data[i ]; float tmp1 = data[i];
float tmp2 = data[n - i]; float tmp2 = data[n - i];
float s = SIN(ctx, n, 2*i); float s = SIN(ctx, n, 2 * i);
float c = COS(ctx, n, 2*i); float c = COS(ctx, n, 2 * i);
c *= tmp1 - tmp2; c *= tmp1 - tmp2;
s *= tmp1 - tmp2; s *= tmp1 - tmp2;
next += c; next += c;
tmp1 = (tmp1 + tmp2) * 0.5f; tmp1 = (tmp1 + tmp2) * 0.5f;
data[i ] = tmp1 - s; data[i] = tmp1 - s;
data[n - i] = tmp1 + s; data[n - i] = tmp1 + s;
} }
...@@ -94,7 +95,7 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) ...@@ -94,7 +95,7 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data)
data[n] = data[1]; data[n] = data[1];
data[1] = next; data[1] = next;
for(i = 3; i <= n; i += 2) for (i = 3; i <= n; i += 2)
data[i] = data[i - 2] - data[i]; data[i] = data[i - 2] - data[i];
} }
...@@ -103,16 +104,16 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data) ...@@ -103,16 +104,16 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data)
int n = 1 << ctx->nbits; int n = 1 << ctx->nbits;
int i; int i;
float next = data[n - 1]; float next = data[n - 1];
float inv_n = 1.0f / n; float inv_n = 1.0f / n;
for (i = n - 2; i >= 2; i -= 2) { for (i = n - 2; i >= 2; i -= 2) {
float val1 = data[i ]; float val1 = data[i];
float val2 = data[i - 1] - data[i + 1]; float val2 = data[i - 1] - data[i + 1];
float c = COS(ctx, n, i); float c = COS(ctx, n, i);
float s = SIN(ctx, n, i); float s = SIN(ctx, n, i);
data[i ] = c * val1 + s * val2; data[i] = c * val1 + s * val2;
data[i + 1] = s * val1 - c * val2; data[i + 1] = s * val1 - c * val2;
} }
...@@ -121,13 +122,13 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data) ...@@ -121,13 +122,13 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data)
ctx->rdft.rdft_calc(&ctx->rdft, data); ctx->rdft.rdft_calc(&ctx->rdft, data);
for (i = 0; i < n / 2; i++) { for (i = 0; i < n / 2; i++) {
float tmp1 = data[i ] * inv_n; float tmp1 = data[i] * inv_n;
float tmp2 = data[n - i - 1] * inv_n; float tmp2 = data[n - i - 1] * inv_n;
float csc = ctx->csc2[i] * (tmp1 - tmp2); float csc = ctx->csc2[i] * (tmp1 - tmp2);
tmp1 += tmp2; tmp1 += tmp2;
data[i ] = tmp1 + csc; data[i] = tmp1 + csc;
data[n - i - 1] = tmp1 - csc; data[n - i - 1] = tmp1 - csc;
} }
} }
...@@ -137,34 +138,33 @@ static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data) ...@@ -137,34 +138,33 @@ static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data)
int i; int i;
float next; float next;
for (i=0; i < n/2; i++) { for (i = 0; i < n / 2; i++) {
float tmp1 = data[i ]; float tmp1 = data[i];
float tmp2 = data[n - i - 1]; float tmp2 = data[n - i - 1];
float s = SIN(ctx, n, 2*i + 1); float s = SIN(ctx, n, 2 * i + 1);
s *= tmp1 - tmp2; s *= tmp1 - tmp2;
tmp1 = (tmp1 + tmp2) * 0.5f; tmp1 = (tmp1 + tmp2) * 0.5f;
data[i ] = tmp1 + s; data[i] = tmp1 + s;
data[n-i-1] = tmp1 - s; data[n-i-1] = tmp1 - s;
} }
ctx->rdft.rdft_calc(&ctx->rdft, data); ctx->rdft.rdft_calc(&ctx->rdft, data);
next = data[1] * 0.5; next = data[1] * 0.5;
data[1] *= -1; data[1] *= -1;
for (i = n - 2; i >= 0; i -= 2) { for (i = n - 2; i >= 0; i -= 2) {
float inr = data[i ]; float inr = data[i ];
float ini = data[i + 1]; float ini = data[i + 1];
float c = COS(ctx, n, i); float c = COS(ctx, n, i);
float s = SIN(ctx, n, i); float s = SIN(ctx, n, i);
data[i ] = c * inr + s * ini; data[i] = c * inr + s * ini;
data[i + 1] = next;
data[i+1] = next; next += s * inr - c * ini;
next += s * inr - c * ini;
} }
} }
...@@ -180,36 +180,36 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse) ...@@ -180,36 +180,36 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse)
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
s->nbits = nbits; s->nbits = nbits;
s->inverse = inverse; s->inverse = inverse;
if (inverse == DCT_II && nbits == 5) { if (inverse == DCT_II && nbits == 5) {
s->dct_calc = dct32_func; s->dct_calc = dct32_func;
} else { } else {
ff_init_ff_cos_tabs(nbits+2); ff_init_ff_cos_tabs(nbits + 2);
s->costab = ff_cos_tabs[nbits+2];
s->csc2 = av_malloc(n/2 * sizeof(FFTSample)); s->costab = ff_cos_tabs[nbits + 2];
s->csc2 = av_malloc(n / 2 * sizeof(FFTSample));
if (ff_rdft_init(&s->rdft, nbits, inverse == DCT_III) < 0) { if (ff_rdft_init(&s->rdft, nbits, inverse == DCT_III) < 0) {
av_free(s->csc2); av_free(s->csc2);
return -1; return -1;
} }
for (i = 0; i < n/2; i++) for (i = 0; i < n / 2; i++)
s->csc2[i] = 0.5 / sin((M_PI / (2*n) * (2*i + 1))); s->csc2[i] = 0.5 / sin((M_PI / (2 * n) * (2 * i + 1)));
switch(inverse) { switch (inverse) {
case DCT_I : s->dct_calc = ff_dct_calc_I_c; break; case DCT_I : s->dct_calc = ff_dct_calc_I_c; break;
case DCT_II : s->dct_calc = ff_dct_calc_II_c ; break; case DCT_II : s->dct_calc = ff_dct_calc_II_c; break;
case DCT_III: s->dct_calc = ff_dct_calc_III_c; break; case DCT_III: s->dct_calc = ff_dct_calc_III_c; break;
case DST_I : s->dct_calc = ff_dst_calc_I_c; break; case DST_I : s->dct_calc = ff_dst_calc_I_c; break;
} }
} }
s->dct32 = ff_dct32_float; s->dct32 = ff_dct32_float;
if (HAVE_MMX) ff_dct_init_mmx(s); if (HAVE_MMX)
ff_dct_init_mmx(s);
return 0; return 0;
} }
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define _GNU_SOURCE #define _GNU_SOURCE
#include <sched.h> #include <sched.h>
#endif #endif
#if HAVE_GETSYSTEMINFO #if HAVE_GETPROCESSAFFINITYMASK
#include <windows.h> #include <windows.h>
#endif #endif
#if HAVE_SYSCTL #if HAVE_SYSCTL
...@@ -172,10 +172,11 @@ static int get_logical_cpus(AVCodecContext *avctx) ...@@ -172,10 +172,11 @@ static int get_logical_cpus(AVCodecContext *avctx)
if (!ret) { if (!ret) {
nb_cpus = CPU_COUNT(&cpuset); nb_cpus = CPU_COUNT(&cpuset);
} }
#elif HAVE_GETSYSTEMINFO #elif HAVE_GETPROCESSAFFINITYMASK
SYSTEM_INFO sysinfo; DWORD_PTR proc_aff, sys_aff;
GetSystemInfo(&sysinfo); ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff);
nb_cpus = sysinfo.dwNumberOfProcessors; if (ret)
nb_cpus = av_popcount64(proc_aff);
#elif HAVE_SYSCTL && defined(HW_NCPU) #elif HAVE_SYSCTL && defined(HW_NCPU)
int mib[2] = { CTL_HW, HW_NCPU }; int mib[2] = { CTL_HW, HW_NCPU };
size_t len = sizeof(nb_cpus); size_t len = sizeof(nb_cpus);
......
...@@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr ...@@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr
int ls= frame->linesize[p]; int ls= frame->linesize[p];
uint8_t *src= frame->data[p]; uint8_t *src= frame->data[p];
halfpel[1][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); halfpel[1][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[2][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); halfpel[2][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[3][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); halfpel[3][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[0][p]= src; halfpel[0][p]= src;
for(y=0; y<h; y++){ for(y=0; y<h; y++){
......
...@@ -282,6 +282,77 @@ static void restore_median(uint8_t *src, int step, int stride, ...@@ -282,6 +282,77 @@ static void restore_median(uint8_t *src, int step, int stride,
} }
} }
/* UtVideo interlaced mode treats every two lines as a single one,
* so restoring function should take care of possible padding between
* two parts of the same "line".
*/
static void restore_median_il(uint8_t *src, int step, int stride,
int width, int height, int slices, int rmode)
{
int i, j, slice;
int A, B, C;
uint8_t *bsrc;
int slice_start, slice_height;
const int cmask = ~(rmode ? 3 : 1);
const int stride2 = stride << 1;
for (slice = 0; slice < slices; slice++) {
slice_start = ((slice * height) / slices) & cmask;
slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start;
slice_height >>= 1;
bsrc = src + slice_start * stride;
// first line - left neighbour prediction
bsrc[0] += 0x80;
A = bsrc[0];
for (i = step; i < width * step; i += step) {
bsrc[i] += A;
A = bsrc[i];
}
for (i = 0; i < width * step; i += step) {
bsrc[stride + i] += A;
A = bsrc[stride + i];
}
bsrc += stride2;
if (slice_height == 1)
continue;
// second line - first element has top predition, the rest uses median
C = bsrc[-stride2];
bsrc[0] += C;
A = bsrc[0];
for (i = step; i < width * step; i += step) {
B = bsrc[i - stride2];
bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[i];
}
for (i = 0; i < width * step; i += step) {
B = bsrc[i - stride];
bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[stride + i];
}
bsrc += stride2;
// the rest of lines use continuous median prediction
for (j = 2; j < slice_height; j++) {
for (i = 0; i < width * step; i += step) {
B = bsrc[i - stride2];
bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[i];
}
for (i = 0; i < width * step; i += step) {
B = bsrc[i - stride];
bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[i + stride];
}
bsrc += stride2;
}
}
}
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
...@@ -381,10 +452,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac ...@@ -381,10 +452,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
c->frame_pred == PRED_LEFT); c->frame_pred == PRED_LEFT);
if (ret) if (ret)
return ret; return ret;
if (c->frame_pred == PRED_MEDIAN) if (c->frame_pred == PRED_MEDIAN) {
restore_median(c->pic.data[i], 1, c->pic.linesize[i], if (!c->interlaced) {
avctx->width >> !!i, avctx->height >> !!i, restore_median(c->pic.data[i], 1, c->pic.linesize[i],
c->slices, !i); avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
}
}
} }
break; break;
case PIX_FMT_YUV422P: case PIX_FMT_YUV422P:
...@@ -395,9 +474,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac ...@@ -395,9 +474,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
c->frame_pred == PRED_LEFT); c->frame_pred == PRED_LEFT);
if (ret) if (ret)
return ret; return ret;
if (c->frame_pred == PRED_MEDIAN) if (c->frame_pred == PRED_MEDIAN) {
restore_median(c->pic.data[i], 1, c->pic.linesize[i], if (!c->interlaced) {
avctx->width >> !!i, avctx->height, c->slices, 0); restore_median(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
}
} }
break; break;
} }
......
...@@ -2235,7 +2235,7 @@ static int has_decode_delay_been_guessed(AVStream *st) ...@@ -2235,7 +2235,7 @@ static int has_decode_delay_been_guessed(AVStream *st)
static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options) static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
{ {
AVCodec *codec; AVCodec *codec;
int got_picture, ret = 0; int got_picture = 1, ret = 0;
AVFrame picture; AVFrame picture;
AVPacket pkt = *avpkt; AVPacket pkt = *avpkt;
...@@ -2248,7 +2248,8 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option ...@@ -2248,7 +2248,8 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
return ret; return ret;
} }
while (pkt.size > 0 && ret >= 0 && while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
ret >= 0 &&
(!has_codec_parameters(st->codec) || (!has_codec_parameters(st->codec) ||
!has_decode_delay_been_guessed(st) || !has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) { (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
...@@ -2377,14 +2378,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ...@@ -2377,14 +2378,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
int i, count, ret, read_size, j; int i, count, ret, read_size, j;
AVStream *st; AVStream *st;
AVPacket pkt1, *pkt; AVPacket pkt1, *pkt;
AVDictionary *one_thread_opt = NULL;
int64_t old_offset = avio_tell(ic->pb); int64_t old_offset = avio_tell(ic->pb);
int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
/* this function doesn't flush the decoders, so force thread count
* to 1 to fix behavior when thread count > number of frames in the file */
av_dict_set(&one_thread_opt, "threads", "1", 0);
for(i=0;i<ic->nb_streams;i++) { for(i=0;i<ic->nb_streams;i++) {
AVCodec *codec; AVCodec *codec;
st = ic->streams[i]; st = ic->streams[i];
...@@ -2406,21 +2402,15 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ...@@ -2406,21 +2402,15 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
assert(!st->codec->codec); assert(!st->codec->codec);
codec = avcodec_find_decoder(st->codec->codec_id); codec = avcodec_find_decoder(st->codec->codec_id);
/* this function doesn't flush the decoders, so force thread count
* to 1 to fix behavior when thread count > number of frames in the file */
if (options)
av_dict_set(&options[i], "threads", "1", 0);
/* Ensure that subtitle_header is properly set. */ /* Ensure that subtitle_header is properly set. */
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
&& codec && !st->codec->codec) && codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i] : &one_thread_opt); avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
//try to just open decoders, in case this is enough to get parameters //try to just open decoders, in case this is enough to get parameters
if(!has_codec_parameters(st->codec)){ if(!has_codec_parameters(st->codec)){
if (codec && !st->codec->codec) if (codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i] avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
: &one_thread_opt);
} }
} }
...@@ -2486,10 +2476,22 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ...@@ -2486,10 +2476,22 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
continue; continue;
if (ret < 0) { if (ret < 0) {
/* EOF or error */ /* EOF or error*/
AVPacket empty_pkt = { 0 };
int err;
av_init_packet(&empty_pkt);
ret = -1; /* we could not have all the codec parameters before EOF */ ret = -1; /* we could not have all the codec parameters before EOF */
for(i=0;i<ic->nb_streams;i++) { for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i]; st = ic->streams[i];
/* flush the decoders */
while ((err = try_decode_frame(st, &empty_pkt,
(options && i < orig_nb_streams) ?
&options[i] : NULL)) >= 0)
if (has_codec_parameters(st->codec))
break;
if (!has_codec_parameters(st->codec)){ if (!has_codec_parameters(st->codec)){
char buf[256]; char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0); avcodec_string(buf, sizeof(buf), st->codec, 0);
...@@ -2562,8 +2564,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ...@@ -2562,8 +2564,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
least one frame of codec data, this makes sure the codec initializes least one frame of codec data, this makes sure the codec initializes
the channel configuration and does not only trust the values from the container. the channel configuration and does not only trust the values from the container.
*/ */
try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
: &one_thread_opt);
st->codec_info_nb_frames++; st->codec_info_nb_frames++;
count++; count++;
...@@ -2689,7 +2690,6 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ...@@ -2689,7 +2690,6 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
ic->streams[i]->codec->thread_count = 0; ic->streams[i]->codec->thread_count = 0;
av_freep(&ic->streams[i]->info); av_freep(&ic->streams[i]->info);
} }
av_dict_free(&one_thread_opt);
return ret; return ret;
} }
......
...@@ -154,7 +154,7 @@ ...@@ -154,7 +154,7 @@
*/ */
#define LIBAVUTIL_VERSION_MAJOR 51 #define LIBAVUTIL_VERSION_MAJOR 51
#define LIBAVUTIL_VERSION_MINOR 33 #define LIBAVUTIL_VERSION_MINOR 34
#define LIBAVUTIL_VERSION_MICRO 100 #define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
......
...@@ -220,6 +220,16 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) ...@@ -220,6 +220,16 @@ static av_always_inline av_const int av_popcount_c(uint32_t x)
return (x + (x >> 16)) & 0x3F; return (x + (x >> 16)) & 0x3F;
} }
/**
* Count number of bits set to one in x
* @param x value to count bits of
* @return the number of bits set to one in x
*/
static av_always_inline av_const int av_popcount64_c(uint64_t x)
{
return av_popcount(x) + av_popcount(x >> 32);
}
#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) #define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) #define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
...@@ -385,3 +395,6 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) ...@@ -385,3 +395,6 @@ static av_always_inline av_const int av_popcount_c(uint32_t x)
#ifndef av_popcount #ifndef av_popcount
# define av_popcount av_popcount_c # define av_popcount av_popcount_c
#endif #endif
#ifndef av_popcount64
# define av_popcount64 av_popcount64_c
#endif
FATE_SCREEN += fate-cscd FATE_SCREEN += fate-cscd
fate-cscd: CMD = framecrc -i $(SAMPLES)/CSCD/sample_video.avi -an -vsync 0 -pix_fmt rgb24 fate-cscd: CMD = framecrc -i $(SAMPLES)/CSCD/sample_video.avi -an -vsync 0 -pix_fmt rgb24
FATE_SCREEN += fate-dxtory
fate-dxtory: CMD = framecrc -i $(SAMPLES)/dxtory/dxtory_mic.avi
FATE_SCREEN += fate-fraps-v0 FATE_SCREEN += fate-fraps-v0
fate-fraps-v0: CMD = framecrc -i $(SAMPLES)/fraps/Griffin_Ragdoll01-partial.avi fate-fraps-v0: CMD = framecrc -i $(SAMPLES)/fraps/Griffin_Ragdoll01-partial.avi
......
0, 0, 1382400, 0x44373645
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment