Commit 7636c8c6 authored by Aneesh Dogra's avatar Aneesh Dogra Committed by Anton Khirnov

avconv: K&R cosmetics

Signed-off-by: 's avatarAnton Khirnov <anton@khirnov.net>
parent cf4afe0b
...@@ -111,10 +111,10 @@ static int do_hex_dump = 0; ...@@ -111,10 +111,10 @@ static int do_hex_dump = 0;
static int do_pkt_dump = 0; static int do_pkt_dump = 0;
static int do_pass = 0; static int do_pass = 0;
static char *pass_logfilename_prefix = NULL; static char *pass_logfilename_prefix = NULL;
static int video_sync_method= -1; static int video_sync_method = -1;
static int audio_sync_method= 0; static int audio_sync_method = 0;
static float audio_drift_threshold= 0.1; static float audio_drift_threshold = 0.1;
static int copy_ts= 0; static int copy_ts = 0;
static int copy_tb = 1; static int copy_tb = 1;
static int opt_shortest = 0; static int opt_shortest = 0;
static char *vstats_filename; static char *vstats_filename;
...@@ -197,9 +197,9 @@ typedef struct OutputStream { ...@@ -197,9 +197,9 @@ typedef struct OutputStream {
int frame_number; int frame_number;
/* input pts and corresponding output pts /* input pts and corresponding output pts
for A/V sync */ for A/V sync */
//double sync_ipts; /* dts from the AVPacket of the demuxer in second units */ // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
struct InputStream *sync_ist; /* input stream to sync against */ struct InputStream *sync_ist; /* input stream to sync against */
int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
AVBitStreamFilterContext *bitstream_filters; AVBitStreamFilterContext *bitstream_filters;
AVCodec *enc; AVCodec *enc;
int64_t max_frames; int64_t max_frames;
...@@ -552,9 +552,9 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) ...@@ -552,9 +552,9 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
ost->graph = avfilter_graph_alloc(); ost->graph = avfilter_graph_alloc();
if (ist->st->sample_aspect_ratio.num){ if (ist->st->sample_aspect_ratio.num) {
sample_aspect_ratio = ist->st->sample_aspect_ratio; sample_aspect_ratio = ist->st->sample_aspect_ratio;
}else } else
sample_aspect_ratio = ist->st->codec->sample_aspect_ratio; sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width, snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
...@@ -616,7 +616,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) ...@@ -616,7 +616,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
codec->height = ost->output_video_filter->inputs[0]->h; codec->height = ost->output_video_filter->inputs[0]->h;
codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
ost->frame_aspect_ratio ? // overridden by the -aspect cli option ost->frame_aspect_ratio ? // overridden by the -aspect cli option
av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) : av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
ost->output_video_filter->inputs[0]->sample_aspect_ratio; ost->output_video_filter->inputs[0]->sample_aspect_ratio;
return 0; return 0;
...@@ -660,14 +660,14 @@ void exit_program(int ret) ...@@ -660,14 +660,14 @@ void exit_program(int ret)
int i; int i;
/* close files */ /* close files */
for(i=0;i<nb_output_files;i++) { for (i = 0; i < nb_output_files; i++) {
AVFormatContext *s = output_files[i].ctx; AVFormatContext *s = output_files[i].ctx;
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb) if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
avio_close(s->pb); avio_close(s->pb);
avformat_free_context(s); avformat_free_context(s);
av_dict_free(&output_files[i].opts); av_dict_free(&output_files[i].opts);
} }
for(i=0;i<nb_input_files;i++) { for (i = 0; i < nb_input_files; i++) {
avformat_close_input(&input_files[i].ctx); avformat_close_input(&input_files[i].ctx);
} }
for (i = 0; i < nb_input_streams; i++) { for (i = 0; i < nb_input_streams; i++) {
...@@ -689,7 +689,7 @@ void exit_program(int ret) ...@@ -689,7 +689,7 @@ void exit_program(int ret)
uninit_opts(); uninit_opts();
av_free(audio_buf); av_free(audio_buf);
av_free(audio_out); av_free(audio_out);
allocated_audio_buf_size= allocated_audio_out_size= 0; allocated_audio_buf_size = allocated_audio_out_size = 0;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
avfilter_uninit(); avfilter_uninit();
...@@ -733,10 +733,10 @@ static void assert_codec_experimental(AVCodecContext *c, int encoder) ...@@ -733,10 +733,10 @@ static void assert_codec_experimental(AVCodecContext *c, int encoder)
static void choose_sample_fmt(AVStream *st, AVCodec *codec) static void choose_sample_fmt(AVStream *st, AVCodec *codec)
{ {
if(codec && codec->sample_fmts){ if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p= codec->sample_fmts; const enum AVSampleFormat *p = codec->sample_fmts;
for(; *p!=-1; p++){ for (; *p != -1; p++) {
if(*p == st->codec->sample_fmt) if (*p == st->codec->sample_fmt)
break; break;
} }
if (*p == -1) { if (*p == -1) {
...@@ -792,41 +792,42 @@ static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec, ...@@ -792,41 +792,42 @@ static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
static void choose_sample_rate(AVStream *st, AVCodec *codec) static void choose_sample_rate(AVStream *st, AVCodec *codec)
{ {
if(codec && codec->supported_samplerates){ if (codec && codec->supported_samplerates) {
const int *p= codec->supported_samplerates; const int *p = codec->supported_samplerates;
int best=0; int best = 0;
int best_dist=INT_MAX; int best_dist = INT_MAX;
for(; *p; p++){ for (; *p; p++) {
int dist= abs(st->codec->sample_rate - *p); int dist = abs(st->codec->sample_rate - *p);
if(dist < best_dist){ if (dist < best_dist) {
best_dist= dist; best_dist = dist;
best= *p; best = *p;
} }
} }
if(best_dist){ if (best_dist) {
av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best); av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
} }
st->codec->sample_rate= best; st->codec->sample_rate = best;
} }
} }
static void choose_pixel_fmt(AVStream *st, AVCodec *codec) static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
{ {
if(codec && codec->pix_fmts){ if (codec && codec->pix_fmts) {
const enum PixelFormat *p= codec->pix_fmts; const enum PixelFormat *p = codec->pix_fmts;
if(st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL){ if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
if(st->codec->codec_id==CODEC_ID_MJPEG){ if (st->codec->codec_id == CODEC_ID_MJPEG) {
p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}; p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
}else if(st->codec->codec_id==CODEC_ID_LJPEG){ } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE}; p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
} }
} }
for (; *p != PIX_FMT_NONE; p++) { for (; *p != PIX_FMT_NONE; p++) {
if(*p == st->codec->pix_fmt) if (*p == st->codec->pix_fmt)
break; break;
} }
if (*p == PIX_FMT_NONE) { if (*p == PIX_FMT_NONE) {
if(st->codec->pix_fmt != PIX_FMT_NONE) if (st->codec->pix_fmt != PIX_FMT_NONE)
av_log(NULL, AV_LOG_WARNING, av_log(NULL, AV_LOG_WARNING,
"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n", "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
av_pix_fmt_descriptors[st->codec->pix_fmt].name, av_pix_fmt_descriptors[st->codec->pix_fmt].name,
...@@ -842,22 +843,23 @@ get_sync_ipts(const OutputStream *ost) ...@@ -842,22 +843,23 @@ get_sync_ipts(const OutputStream *ost)
{ {
const InputStream *ist = ost->sync_ist; const InputStream *ist = ost->sync_ist;
OutputFile *of = &output_files[ost->file_index]; OutputFile *of = &output_files[ost->file_index];
return (double)(ist->pts - of->start_time)/AV_TIME_BASE; return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
} }
static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc)
{
int ret; int ret;
while(bsfc){ while (bsfc) {
AVPacket new_pkt= *pkt; AVPacket new_pkt = *pkt;
int a= av_bitstream_filter_filter(bsfc, avctx, NULL, int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
&new_pkt.data, &new_pkt.size, &new_pkt.data, &new_pkt.size,
pkt->data, pkt->size, pkt->data, pkt->size,
pkt->flags & AV_PKT_FLAG_KEY); pkt->flags & AV_PKT_FLAG_KEY);
if(a>0){ if (a > 0) {
av_free_packet(pkt); av_free_packet(pkt);
new_pkt.destruct= av_destruct_packet; new_pkt.destruct = av_destruct_packet;
} else if(a<0){ } else if (a < 0) {
av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s", av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
bsfc->filter->name, pkt->stream_index, bsfc->filter->name, pkt->stream_index,
avctx->codec ? avctx->codec->name : "copy"); avctx->codec ? avctx->codec->name : "copy");
...@@ -865,13 +867,13 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx ...@@ -865,13 +867,13 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx
if (exit_on_error) if (exit_on_error)
exit_program(1); exit_program(1);
} }
*pkt= new_pkt; *pkt = new_pkt;
bsfc= bsfc->next; bsfc = bsfc->next;
} }
ret= av_interleaved_write_frame(s, pkt); ret = av_interleaved_write_frame(s, pkt);
if(ret < 0){ if (ret < 0) {
print_error("av_interleaved_write_frame()", ret); print_error("av_interleaved_write_frame()", ret);
exit_program(1); exit_program(1);
} }
...@@ -892,8 +894,8 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, ...@@ -892,8 +894,8 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
int64_t audio_out_size, audio_buf_size; int64_t audio_out_size, audio_buf_size;
int size_out, frame_bytes, ret, resample_changed; int size_out, frame_bytes, ret, resample_changed;
AVCodecContext *enc= ost->st->codec; AVCodecContext *enc = ost->st->codec;
AVCodecContext *dec= ist->st->codec; AVCodecContext *dec = ist->st->codec;
int osize = av_get_bytes_per_sample(enc->sample_fmt); int osize = av_get_bytes_per_sample(enc->sample_fmt);
int isize = av_get_bytes_per_sample(dec->sample_fmt); int isize = av_get_bytes_per_sample(dec->sample_fmt);
const int coded_bps = av_get_bits_per_sample(enc->codec->id); const int coded_bps = av_get_bits_per_sample(enc->codec->id);
...@@ -902,25 +904,25 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, ...@@ -902,25 +904,25 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
int64_t allocated_for_size = size; int64_t allocated_for_size = size;
need_realloc: need_realloc:
audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels); audio_buf_size = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate; audio_buf_size = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API audio_buf_size = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
audio_buf_size= FFMAX(audio_buf_size, enc->frame_size); audio_buf_size = FFMAX(audio_buf_size, enc->frame_size);
audio_buf_size*= osize*enc->channels; audio_buf_size *= osize * enc->channels;
audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels); audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
if(coded_bps > 8*osize) if (coded_bps > 8 * osize)
audio_out_size= audio_out_size * coded_bps / (8*osize); audio_out_size = audio_out_size * coded_bps / (8*osize);
audio_out_size += FF_MIN_BUFFER_SIZE; audio_out_size += FF_MIN_BUFFER_SIZE;
if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){ if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) {
av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n"); av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
exit_program(1); exit_program(1);
} }
av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size); av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size); av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
if (!audio_buf || !audio_out){ if (!audio_buf || !audio_out) {
av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n"); av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
exit_program(1); exit_program(1);
} }
...@@ -968,8 +970,8 @@ need_realloc: ...@@ -968,8 +970,8 @@ need_realloc:
} }
#define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b)) #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt && if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt &&
MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) { MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) {
if (ost->reformat_ctx) if (ost->reformat_ctx)
av_audio_convert_free(ost->reformat_ctx); av_audio_convert_free(ost->reformat_ctx);
ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1, ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
...@@ -980,45 +982,45 @@ need_realloc: ...@@ -980,45 +982,45 @@ need_realloc:
av_get_sample_fmt_name(enc->sample_fmt)); av_get_sample_fmt_name(enc->sample_fmt));
exit_program(1); exit_program(1);
} }
ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt); ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
} }
if(audio_sync_method){ if (audio_sync_method) {
double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts -
- av_fifo_size(ost->fifo)/(enc->channels * osize); av_fifo_size(ost->fifo) / (enc->channels * osize);
int idelta = delta * dec->sample_rate / enc->sample_rate; int idelta = delta * dec->sample_rate / enc->sample_rate;
int byte_delta = idelta * isize * dec->channels; int byte_delta = idelta * isize * dec->channels;
//FIXME resample delay // FIXME resample delay
if(fabs(delta) > 50){ if (fabs(delta) > 50) {
if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){ if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
if(byte_delta < 0){ if (byte_delta < 0) {
byte_delta= FFMAX(byte_delta, -size); byte_delta = FFMAX(byte_delta, -size);
size += byte_delta; size += byte_delta;
buf -= byte_delta; buf -= byte_delta;
av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n", av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
-byte_delta / (isize * dec->channels)); -byte_delta / (isize * dec->channels));
if(!size) if (!size)
return; return;
ist->is_start=0; ist->is_start = 0;
}else{ } else {
static uint8_t *input_tmp= NULL; static uint8_t *input_tmp = NULL;
input_tmp= av_realloc(input_tmp, byte_delta + size); input_tmp = av_realloc(input_tmp, byte_delta + size);
if(byte_delta > allocated_for_size - size){ if (byte_delta > allocated_for_size - size) {
allocated_for_size= byte_delta + (int64_t)size; allocated_for_size = byte_delta + (int64_t)size;
goto need_realloc; goto need_realloc;
} }
ist->is_start=0; ist->is_start = 0;
generate_silence(input_tmp, dec->sample_fmt, byte_delta); generate_silence(input_tmp, dec->sample_fmt, byte_delta);
memcpy(input_tmp + byte_delta, buf, size); memcpy(input_tmp + byte_delta, buf, size);
buf= input_tmp; buf = input_tmp;
size += byte_delta; size += byte_delta;
av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta); av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
} }
}else if(audio_sync_method>1){ } else if (audio_sync_method > 1) {
int comp= av_clip(delta, -audio_sync_method, audio_sync_method); int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
av_assert0(ost->audio_resample); av_assert0(ost->audio_resample);
av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n", av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
delta, comp, enc->sample_rate); delta, comp, enc->sample_rate);
...@@ -1026,9 +1028,9 @@ need_realloc: ...@@ -1026,9 +1028,9 @@ need_realloc:
av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate); av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
} }
} }
}else } else
ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate) ost->sync_opts = lrintf(get_sync_ipts(ost) * enc->sample_rate) -
- av_fifo_size(ost->fifo)/(enc->channels * osize); //FIXME wrong av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
if (ost->audio_resample) { if (ost->audio_resample) {
buftmp = audio_buf; buftmp = audio_buf;
...@@ -1041,20 +1043,20 @@ need_realloc: ...@@ -1041,20 +1043,20 @@ need_realloc:
size_out = size; size_out = size;
} }
if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) { if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) {
const void *ibuf[6]= {buftmp}; const void *ibuf[6] = { buftmp };
void *obuf[6]= {audio_buf}; void *obuf[6] = { audio_buf };
int istride[6]= {isize}; int istride[6] = { isize };
int ostride[6]= {osize}; int ostride[6] = { osize };
int len= size_out/istride[0]; int len = size_out / istride[0];
if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) { if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
printf("av_audio_convert() failed\n"); printf("av_audio_convert() failed\n");
if (exit_on_error) if (exit_on_error)
exit_program(1); exit_program(1);
return; return;
} }
buftmp = audio_buf; buftmp = audio_buf;
size_out = len*osize; size_out = len * osize;
} }
/* now encode as many frames as possible */ /* now encode as many frames as possible */
...@@ -1074,7 +1076,7 @@ need_realloc: ...@@ -1074,7 +1076,7 @@ need_realloc:
av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL); av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
//FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio() // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
ret = avcodec_encode_audio(enc, audio_out, audio_out_size, ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
(short *)audio_buf); (short *)audio_buf);
...@@ -1083,11 +1085,11 @@ need_realloc: ...@@ -1083,11 +1085,11 @@ need_realloc:
exit_program(1); exit_program(1);
} }
audio_size += ret; audio_size += ret;
pkt.stream_index= ost->index; pkt.stream_index = ost->index;
pkt.data= audio_out; pkt.data = audio_out;
pkt.size= ret; pkt.size = ret;
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, enc, ost->bitstream_filters); write_frame(s, &pkt, enc, ost->bitstream_filters);
...@@ -1103,14 +1105,14 @@ need_realloc: ...@@ -1103,14 +1105,14 @@ need_realloc:
/* determine the size of the coded buffer */ /* determine the size of the coded buffer */
size_out /= osize; size_out /= osize;
if (coded_bps) if (coded_bps)
size_out = size_out*coded_bps/8; size_out = size_out * coded_bps / 8;
if(size_out > audio_out_size){ if (size_out > audio_out_size) {
av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n"); av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n");
exit_program(1); exit_program(1);
} }
//FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio() // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
ret = avcodec_encode_audio(enc, audio_out, size_out, ret = avcodec_encode_audio(enc, audio_out, size_out,
(short *)buftmp); (short *)buftmp);
if (ret < 0) { if (ret < 0) {
...@@ -1118,11 +1120,11 @@ need_realloc: ...@@ -1118,11 +1120,11 @@ need_realloc:
exit_program(1); exit_program(1);
} }
audio_size += ret; audio_size += ret;
pkt.stream_index= ost->index; pkt.stream_index = ost->index;
pkt.data= audio_out; pkt.data = audio_out;
pkt.size= ret; pkt.size = ret;
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, enc, ost->bitstream_filters); write_frame(s, &pkt, enc, ost->bitstream_filters);
} }
...@@ -1150,7 +1152,7 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void * ...@@ -1150,7 +1152,7 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void *
picture2 = &picture_tmp; picture2 = &picture_tmp;
avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height); avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
if(avpicture_deinterlace(picture2, picture, if (avpicture_deinterlace(picture2, picture,
dec->pix_fmt, dec->width, dec->height) < 0) { dec->pix_fmt, dec->width, dec->height) < 0) {
/* if error, do not deinterlace */ /* if error, do not deinterlace */
av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n"); av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
...@@ -1200,10 +1202,10 @@ static void do_subtitle_out(AVFormatContext *s, ...@@ -1200,10 +1202,10 @@ static void do_subtitle_out(AVFormatContext *s,
else else
nb = 1; nb = 1;
for(i = 0; i < nb; i++) { for (i = 0; i < nb; i++) {
sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q); sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
// start_display_time is required to be 0 // start_display_time is required to be 0
sub->pts += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q); sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
sub->end_display_time -= sub->start_display_time; sub->end_display_time -= sub->start_display_time;
sub->start_display_time = 0; sub->start_display_time = 0;
subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out, subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
...@@ -1230,8 +1232,8 @@ static void do_subtitle_out(AVFormatContext *s, ...@@ -1230,8 +1232,8 @@ static void do_subtitle_out(AVFormatContext *s,
} }
} }
static int bit_buffer_size= 1024*256; static int bit_buffer_size = 1024 * 256;
static uint8_t *bit_buffer= NULL; static uint8_t *bit_buffer = NULL;
static void do_video_resample(OutputStream *ost, static void do_video_resample(OutputStream *ost,
InputStream *ist, InputStream *ist,
...@@ -1252,7 +1254,7 @@ static void do_video_resample(OutputStream *ost, ...@@ -1252,7 +1254,7 @@ static void do_video_resample(OutputStream *ost,
ist->file_index, ist->st->index, ist->file_index, ist->st->index,
ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt)); dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt));
if(!ost->video_resample) if (!ost->video_resample)
ost->video_resample = 1; ost->video_resample = 1;
} }
...@@ -1322,26 +1324,26 @@ static void do_video_out(AVFormatContext *s, ...@@ -1322,26 +1324,26 @@ static void do_video_out(AVFormatContext *s,
if (format_video_sync) { if (format_video_sync) {
double vdelta = sync_ipts - ost->sync_opts; double vdelta = sync_ipts - ost->sync_opts;
//FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
if (vdelta < -1.1) if (vdelta < -1.1)
nb_frames = 0; nb_frames = 0;
else if (format_video_sync == 2) { else if (format_video_sync == 2) {
if(vdelta<=-0.6){ if (vdelta <= -0.6) {
nb_frames=0; nb_frames = 0;
}else if(vdelta>0.6) } else if (vdelta > 0.6)
ost->sync_opts= lrintf(sync_ipts); ost->sync_opts = lrintf(sync_ipts);
}else if (vdelta > 1.1) } else if (vdelta > 1.1)
nb_frames = lrintf(vdelta); nb_frames = lrintf(vdelta);
//fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames); //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
if (nb_frames == 0){ if (nb_frames == 0) {
++nb_frames_drop; ++nb_frames_drop;
av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n"); av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
}else if (nb_frames > 1) { } else if (nb_frames > 1) {
nb_frames_dup += nb_frames - 1; nb_frames_dup += nb_frames - 1;
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames-1); av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
} }
}else } else
ost->sync_opts= lrintf(sync_ipts); ost->sync_opts = lrintf(sync_ipts);
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number); nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
if (nb_frames <= 0) if (nb_frames <= 0)
...@@ -1350,10 +1352,10 @@ static void do_video_out(AVFormatContext *s, ...@@ -1350,10 +1352,10 @@ static void do_video_out(AVFormatContext *s,
do_video_resample(ost, ist, in_picture, &final_picture); do_video_resample(ost, ist, in_picture, &final_picture);
/* duplicates frame if needed */ /* duplicates frame if needed */
for(i=0;i<nb_frames;i++) { for (i = 0; i < nb_frames; i++) {
AVPacket pkt; AVPacket pkt;
av_init_packet(&pkt); av_init_packet(&pkt);
pkt.stream_index= ost->index; pkt.stream_index = ost->index;
if (s->oformat->flags & AVFMT_RAWPICTURE && if (s->oformat->flags & AVFMT_RAWPICTURE &&
enc->codec->id == CODEC_ID_RAWVIDEO) { enc->codec->id == CODEC_ID_RAWVIDEO) {
...@@ -1362,16 +1364,16 @@ static void do_video_out(AVFormatContext *s, ...@@ -1362,16 +1364,16 @@ static void do_video_out(AVFormatContext *s,
method. */ method. */
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
enc->coded_frame->top_field_first = in_picture->top_field_first; enc->coded_frame->top_field_first = in_picture->top_field_first;
pkt.data= (uint8_t *)final_picture; pkt.data = (uint8_t *)final_picture;
pkt.size= sizeof(AVPicture); pkt.size = sizeof(AVPicture);
pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
} else { } else {
AVFrame big_picture; AVFrame big_picture;
big_picture= *final_picture; big_picture = *final_picture;
/* better than nothing: use input picture interlaced /* better than nothing: use input picture interlaced
settings */ settings */
big_picture.interlaced_frame = in_picture->interlaced_frame; big_picture.interlaced_frame = in_picture->interlaced_frame;
...@@ -1388,9 +1390,9 @@ static void do_video_out(AVFormatContext *s, ...@@ -1388,9 +1390,9 @@ static void do_video_out(AVFormatContext *s,
if (!enc->me_threshold) if (!enc->me_threshold)
big_picture.pict_type = 0; big_picture.pict_type = 0;
// big_picture.pts = AV_NOPTS_VALUE; // big_picture.pts = AV_NOPTS_VALUE;
big_picture.pts= ost->sync_opts; big_picture.pts = ost->sync_opts;
// big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den); // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts); // av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
if (ost->forced_kf_index < ost->forced_kf_count && if (ost->forced_kf_index < ost->forced_kf_count &&
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
big_picture.pict_type = AV_PICTURE_TYPE_I; big_picture.pict_type = AV_PICTURE_TYPE_I;
...@@ -1404,21 +1406,21 @@ static void do_video_out(AVFormatContext *s, ...@@ -1404,21 +1406,21 @@ static void do_video_out(AVFormatContext *s,
exit_program(1); exit_program(1);
} }
if(ret>0){ if (ret > 0) {
pkt.data= bit_buffer; pkt.data = bit_buffer;
pkt.size= ret; pkt.size = ret;
if(enc->coded_frame->pts != AV_NOPTS_VALUE) if (enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n", /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1, pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/ pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
if(enc->coded_frame->key_frame) if (enc->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
*frame_size = ret; *frame_size = ret;
video_size += ret; video_size += ret;
//fprintf(stderr,"\nFrame: %3d size: %5d type: %d", // fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
// enc->frame_number-1, ret, enc->pict_type); // enc->frame_number-1, ret, enc->pict_type);
/* if two pass, output log */ /* if two pass, output log */
if (ost->logfile && enc->stats_out) { if (ost->logfile && enc->stats_out) {
...@@ -1431,8 +1433,9 @@ static void do_video_out(AVFormatContext *s, ...@@ -1431,8 +1433,9 @@ static void do_video_out(AVFormatContext *s,
} }
} }
static double psnr(double d){ static double psnr(double d)
return -10.0*log(d)/log(10.0); {
return -10.0 * log(d) / log(10.0);
} }
static void do_video_stats(AVFormatContext *os, OutputStream *ost, static void do_video_stats(AVFormatContext *os, OutputStream *ost,
...@@ -1454,9 +1457,9 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost, ...@@ -1454,9 +1457,9 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
enc = ost->st->codec; enc = ost->st->codec;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
frame_number = ost->frame_number; frame_number = ost->frame_number;
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA); fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
if (enc->flags&CODEC_FLAG_PSNR) if (enc->flags&CODEC_FLAG_PSNR)
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0))); fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
fprintf(vstats_file,"f_size= %6d ", frame_size); fprintf(vstats_file,"f_size= %6d ", frame_size);
/* compute pts value */ /* compute pts value */
...@@ -1506,57 +1509,58 @@ static void print_report(OutputFile *output_files, ...@@ -1506,57 +1509,58 @@ static void print_report(OutputFile *output_files,
oc = output_files[0].ctx; oc = output_files[0].ctx;
total_size = avio_size(oc->pb); total_size = avio_size(oc->pb);
if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
total_size= avio_tell(oc->pb); total_size = avio_tell(oc->pb);
buf[0] = '\0'; buf[0] = '\0';
ti1 = 1e10; ti1 = 1e10;
vid = 0; vid = 0;
for(i=0;i<nb_ostreams;i++) { for (i = 0; i < nb_ostreams; i++) {
float q = -1; float q = -1;
ost = &ost_table[i]; ost = &ost_table[i];
enc = ost->st->codec; enc = ost->st->codec;
if (!ost->stream_copy && enc->coded_frame) if (!ost->stream_copy && enc->coded_frame)
q = enc->coded_frame->quality/(float)FF_QP2LAMBDA; q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
} }
if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
float t = (av_gettime()-timer_start) / 1000000.0; float t = (av_gettime() - timer_start) / 1000000.0;
frame_number = ost->frame_number; frame_number = ost->frame_number;
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ", snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q); frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
if(is_last_report) if (is_last_report)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L"); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
if(qp_hist){ if (qp_hist) {
int j; int j;
int qp = lrintf(q); int qp = lrintf(q);
if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram)) if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
qp_histogram[qp]++; qp_histogram[qp]++;
for(j=0; j<32; j++) for (j = 0; j < 32; j++)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2))); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
} }
if (enc->flags&CODEC_FLAG_PSNR){ if (enc->flags&CODEC_FLAG_PSNR) {
int j; int j;
double error, error_sum=0; double error, error_sum = 0;
double scale, scale_sum=0; double scale, scale_sum = 0;
char type[3]= {'Y','U','V'}; char type[3] = { 'Y','U','V' };
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR="); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
for(j=0; j<3; j++){ for (j = 0; j < 3; j++) {
if(is_last_report){ if (is_last_report) {
error= enc->error[j]; error = enc->error[j];
scale= enc->width*enc->height*255.0*255.0*frame_number; scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
}else{ } else {
error= enc->coded_frame->error[j]; error = enc->coded_frame->error[j];
scale= enc->width*enc->height*255.0*255.0; scale = enc->width * enc->height * 255.0 * 255.0;
} }
if(j) scale/=4; if (j)
scale /= 4;
error_sum += error; error_sum += error;
scale_sum += scale; scale_sum += scale;
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale)); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
} }
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum)); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
} }
vid = 1; vid = 1;
} }
...@@ -1586,10 +1590,10 @@ static void print_report(OutputFile *output_files, ...@@ -1586,10 +1590,10 @@ static void print_report(OutputFile *output_files,
int64_t raw= audio_size + video_size + extra_size; int64_t raw= audio_size + video_size + extra_size;
av_log(NULL, AV_LOG_INFO, "\n"); av_log(NULL, AV_LOG_INFO, "\n");
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n", av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
video_size/1024.0, video_size / 1024.0,
audio_size/1024.0, audio_size / 1024.0,
extra_size/1024.0, extra_size / 1024.0,
100.0*(total_size - raw)/raw 100.0 * (total_size - raw) / raw
); );
} }
} }
...@@ -1606,16 +1610,16 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) ...@@ -1606,16 +1610,16 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
if (!ost->encoding_needed) if (!ost->encoding_needed)
continue; continue;
if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1) if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
continue; continue;
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO) if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
continue; continue;
for(;;) { for (;;) {
AVPacket pkt; AVPacket pkt;
int fifo_bytes; int fifo_bytes;
av_init_packet(&pkt); av_init_packet(&pkt);
pkt.stream_index= ost->index; pkt.stream_index = ost->index;
switch (ost->st->codec->codec_type) { switch (ost->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
...@@ -1658,14 +1662,14 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) ...@@ -1658,14 +1662,14 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
exit_program(1); exit_program(1);
} }
video_size += ret; video_size += ret;
if(enc->coded_frame && enc->coded_frame->key_frame) if (enc->coded_frame && enc->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
if (ost->logfile && enc->stats_out) { if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out); fprintf(ost->logfile, "%s", enc->stats_out);
} }
break; break;
default: default:
ret=-1; ret = -1;
} }
if (ret <= 0) if (ret <= 0)
...@@ -1673,7 +1677,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) ...@@ -1673,7 +1677,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
pkt.data = bit_buffer; pkt.data = bit_buffer;
pkt.size = ret; pkt.size = ret;
if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters); write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
} }
} }
...@@ -1695,7 +1699,7 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost) ...@@ -1695,7 +1699,7 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
if (of->recording_time != INT64_MAX && if (of->recording_time != INT64_MAX &&
av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time, av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
(AVRational){1, 1000000}) >= 0) { (AVRational){ 1, 1000000 }) >= 0) {
ost->is_past_recording_time = 1; ost->is_past_recording_time = 1;
return 0; return 0;
} }
...@@ -1738,8 +1742,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p ...@@ -1738,8 +1742,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
opkt.flags = pkt->flags; opkt.flags = pkt->flags;
//FIXME remove the following 2 lines they shall be replaced by the bitstream filters // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
if( ost->st->codec->codec_id != CODEC_ID_H264 if ( ost->st->codec->codec_id != CODEC_ID_H264
&& ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
&& ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
) { ) {
...@@ -2028,12 +2032,12 @@ static int output_packet(InputStream *ist, ...@@ -2028,12 +2032,12 @@ static int output_packet(InputStream *ist,
avpkt = *pkt; avpkt = *pkt;
} }
if(pkt->dts != AV_NOPTS_VALUE) if (pkt->dts != AV_NOPTS_VALUE)
ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
if(pkt->pts != AV_NOPTS_VALUE) if (pkt->pts != AV_NOPTS_VALUE)
pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
//while we have more to decode or while the decoder did output something on EOF // while we have more to decode or while the decoder did output something on EOF
while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
int ret = 0; int ret = 0;
handle_eof: handle_eof:
...@@ -2046,7 +2050,7 @@ static int output_packet(InputStream *ist, ...@@ -2046,7 +2050,7 @@ static int output_packet(InputStream *ist,
ist->showed_multi_packet_warning = 1; ist->showed_multi_packet_warning = 1;
} }
switch(ist->st->codec->codec_type) { switch (ist->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ret = transcode_audio (ist, &avpkt, &got_output); ret = transcode_audio (ist, &avpkt, &got_output);
break; break;
...@@ -2083,7 +2087,7 @@ static int output_packet(InputStream *ist, ...@@ -2083,7 +2087,7 @@ static int output_packet(InputStream *ist,
break; break;
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
if (ist->st->codec->time_base.num != 0) { if (ist->st->codec->time_base.num != 0) {
int ticks = ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
ist->next_pts += ((int64_t)AV_TIME_BASE * ist->next_pts += ((int64_t)AV_TIME_BASE *
ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.num * ticks) /
ist->st->codec->time_base.den; ist->st->codec->time_base.den;
...@@ -2107,7 +2111,7 @@ static void print_sdp(OutputFile *output_files, int n) ...@@ -2107,7 +2111,7 @@ static void print_sdp(OutputFile *output_files, int n)
{ {
char sdp[2048]; char sdp[2048];
int i; int i;
AVFormatContext **avc = av_malloc(sizeof(*avc)*n); AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
if (!avc) if (!avc)
exit_program(1); exit_program(1);
...@@ -2158,7 +2162,7 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb ...@@ -2158,7 +2162,7 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb
assert_avoptions(ist->opts); assert_avoptions(ist->opts);
} }
ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames*AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
ist->next_pts = AV_NOPTS_VALUE; ist->next_pts = AV_NOPTS_VALUE;
init_pts_correction(&ist->pts_ctx); init_pts_correction(&ist->pts_ctx);
ist->is_start = 1; ist->is_start = 1;
...@@ -2249,9 +2253,9 @@ static int transcode_init(OutputFile *output_files, ...@@ -2249,9 +2253,9 @@ static int transcode_init(OutputFile *output_files,
} else } else
codec->time_base = ist->st->time_base; codec->time_base = ist->st->time_base;
switch(codec->codec_type) { switch (codec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
if(audio_volume != 256) { if (audio_volume != 256) {
av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n"); av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
exit_program(1); exit_program(1);
} }
...@@ -2292,7 +2296,7 @@ static int transcode_init(OutputFile *output_files, ...@@ -2292,7 +2296,7 @@ static int transcode_init(OutputFile *output_files,
ist->decoding_needed = 1; ist->decoding_needed = 1;
ost->encoding_needed = 1; ost->encoding_needed = 1;
switch(codec->codec_type) { switch (codec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ost->fifo = av_fifo_alloc(1024); ost->fifo = av_fifo_alloc(1024);
if (!ost->fifo) { if (!ost->fifo) {
...@@ -2303,7 +2307,7 @@ static int transcode_init(OutputFile *output_files, ...@@ -2303,7 +2307,7 @@ static int transcode_init(OutputFile *output_files,
if (!codec->sample_rate) if (!codec->sample_rate)
codec->sample_rate = icodec->sample_rate; codec->sample_rate = icodec->sample_rate;
choose_sample_rate(ost->st, ost->enc); choose_sample_rate(ost->st, ost->enc);
codec->time_base = (AVRational){1, codec->sample_rate}; codec->time_base = (AVRational){ 1, codec->sample_rate };
if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
codec->sample_fmt = icodec->sample_fmt; codec->sample_fmt = icodec->sample_fmt;
...@@ -2342,7 +2346,7 @@ static int transcode_init(OutputFile *output_files, ...@@ -2342,7 +2346,7 @@ static int transcode_init(OutputFile *output_files,
if (ost->video_resample) { if (ost->video_resample) {
#if !CONFIG_AVFILTER #if !CONFIG_AVFILTER
avcodec_get_frame_defaults(&ost->pict_tmp); avcodec_get_frame_defaults(&ost->pict_tmp);
if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt, if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
codec->width, codec->height)) { codec->width, codec->height)) {
av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n"); av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
exit_program(1); exit_program(1);
...@@ -2360,7 +2364,7 @@ static int transcode_init(OutputFile *output_files, ...@@ -2360,7 +2364,7 @@ static int transcode_init(OutputFile *output_files,
exit_program(1); exit_program(1);
} }
#endif #endif
codec->bits_per_raw_sample= 0; codec->bits_per_raw_sample = 0;
} }
ost->resample_height = icodec->height; ost->resample_height = icodec->height;
...@@ -2368,7 +2372,7 @@ static int transcode_init(OutputFile *output_files, ...@@ -2368,7 +2372,7 @@ static int transcode_init(OutputFile *output_files,
ost->resample_pix_fmt = icodec->pix_fmt; ost->resample_pix_fmt = icodec->pix_fmt;
if (!ost->frame_rate.num) if (!ost->frame_rate.num)
ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) { if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
ost->frame_rate = ost->enc->supported_framerates[idx]; ost->frame_rate = ost->enc->supported_framerates[idx];
...@@ -2416,9 +2420,9 @@ static int transcode_init(OutputFile *output_files, ...@@ -2416,9 +2420,9 @@ static int transcode_init(OutputFile *output_files,
} }
} }
} }
if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
int size = codec->width * codec->height; int size = codec->width * codec->height;
bit_buffer_size = FFMAX(bit_buffer_size, 6*size + 200); bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 200);
} }
} }
...@@ -2566,7 +2570,7 @@ static int transcode(OutputFile *output_files, ...@@ -2566,7 +2570,7 @@ static int transcode(OutputFile *output_files,
OutputStream *ost; OutputStream *ost;
InputStream *ist; InputStream *ist;
uint8_t *no_packet; uint8_t *no_packet;
int no_packet_count=0; int no_packet_count = 0;
int64_t timer_start; int64_t timer_start;
if (!(no_packet = av_mallocz(nb_input_files))) if (!(no_packet = av_mallocz(nb_input_files)))
...@@ -2581,14 +2585,14 @@ static int transcode(OutputFile *output_files, ...@@ -2581,14 +2585,14 @@ static int transcode(OutputFile *output_files,
timer_start = av_gettime(); timer_start = av_gettime();
for(; received_sigterm == 0;) { for (; received_sigterm == 0;) {
int file_index, ist_index; int file_index, ist_index;
AVPacket pkt; AVPacket pkt;
int64_t ipts_min; int64_t ipts_min;
double opts_min; double opts_min;
ipts_min = INT64_MAX; ipts_min = INT64_MAX;
opts_min= 1e100; opts_min = 1e100;
/* select the stream that we must read now by looking at the /* select the stream that we must read now by looking at the
smallest output pts */ smallest output pts */
...@@ -2606,14 +2610,15 @@ static int transcode(OutputFile *output_files, ...@@ -2606,14 +2610,15 @@ static int transcode(OutputFile *output_files,
continue; continue;
opts = ost->st->pts.val * av_q2d(ost->st->time_base); opts = ost->st->pts.val * av_q2d(ost->st->time_base);
ipts = ist->pts; ipts = ist->pts;
if (!input_files[ist->file_index].eof_reached){ if (!input_files[ist->file_index].eof_reached) {
if(ipts < ipts_min) { if (ipts < ipts_min) {
ipts_min = ipts; ipts_min = ipts;
if(input_sync ) file_index = ist->file_index; if (input_sync)
file_index = ist->file_index;
} }
if(opts < opts_min) { if (opts < opts_min) {
opts_min = opts; opts_min = opts;
if(!input_sync) file_index = ist->file_index; if (!input_sync) file_index = ist->file_index;
} }
} }
if (ost->frame_number >= ost->max_frames) { if (ost->frame_number >= ost->max_frames) {
...@@ -2625,8 +2630,8 @@ static int transcode(OutputFile *output_files, ...@@ -2625,8 +2630,8 @@ static int transcode(OutputFile *output_files,
} }
/* if none, if is finished */ /* if none, if is finished */
if (file_index < 0) { if (file_index < 0) {
if(no_packet_count){ if (no_packet_count) {
no_packet_count=0; no_packet_count = 0;
memset(no_packet, 0, nb_input_files); memset(no_packet, 0, nb_input_files);
usleep(10000); usleep(10000);
continue; continue;
...@@ -2636,9 +2641,9 @@ static int transcode(OutputFile *output_files, ...@@ -2636,9 +2641,9 @@ static int transcode(OutputFile *output_files,
/* read a frame from it and output it in the fifo */ /* read a frame from it and output it in the fifo */
is = input_files[file_index].ctx; is = input_files[file_index].ctx;
ret= av_read_frame(is, &pkt); ret = av_read_frame(is, &pkt);
if(ret == AVERROR(EAGAIN)){ if (ret == AVERROR(EAGAIN)) {
no_packet[file_index]=1; no_packet[file_index] = 1;
no_packet_count++; no_packet_count++;
continue; continue;
} }
...@@ -2650,7 +2655,7 @@ static int transcode(OutputFile *output_files, ...@@ -2650,7 +2655,7 @@ static int transcode(OutputFile *output_files,
continue; continue;
} }
no_packet_count=0; no_packet_count = 0;
memset(no_packet, 0, nb_input_files); memset(no_packet, 0, nb_input_files);
if (do_pkt_dump) { if (do_pkt_dump) {
...@@ -2671,27 +2676,31 @@ static int transcode(OutputFile *output_files, ...@@ -2671,27 +2676,31 @@ static int transcode(OutputFile *output_files,
if (pkt.pts != AV_NOPTS_VALUE) if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if(pkt.pts != AV_NOPTS_VALUE) if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts *= ist->ts_scale; pkt.pts *= ist->ts_scale;
if(pkt.dts != AV_NOPTS_VALUE) if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts *= ist->ts_scale; pkt.dts *= ist->ts_scale;
// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
// ist->next_pts,
// pkt.dts, input_files[ist->file_index].ts_offset,
// ist->st->codec->codec_type);
if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
&& (is->iformat->flags & AVFMT_TS_DISCONT)) { && (is->iformat->flags & AVFMT_TS_DISCONT)) {
int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
int64_t delta= pkt_dts - ist->next_pts; int64_t delta = pkt_dts - ist->next_pts;
if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->pts) && !copy_ts) {
input_files[ist->file_index].ts_offset -= delta; input_files[ist->file_index].ts_offset -= delta;
av_log(NULL, AV_LOG_DEBUG, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", av_log(NULL, AV_LOG_DEBUG,
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, input_files[ist->file_index].ts_offset); delta, input_files[ist->file_index].ts_offset);
pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if(pkt.pts != AV_NOPTS_VALUE) if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
} }
} }
//fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) { if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n", av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
...@@ -2721,7 +2730,7 @@ static int transcode(OutputFile *output_files, ...@@ -2721,7 +2730,7 @@ static int transcode(OutputFile *output_files,
term_exit(); term_exit();
/* write the trailer if needed and close file */ /* write the trailer if needed and close file */
for(i=0;i<nb_output_files;i++) { for (i = 0; i < nb_output_files; i++) {
os = output_files[i].ctx; os = output_files[i].ctx;
av_write_trailer(os); av_write_trailer(os);
} }
...@@ -2795,7 +2804,7 @@ static double parse_frame_aspect_ratio(const char *arg) ...@@ -2795,7 +2804,7 @@ static double parse_frame_aspect_ratio(const char *arg)
if (p) { if (p) {
x = strtol(arg, &end, 10); x = strtol(arg, &end, 10);
if (end == p) if (end == p)
y = strtol(end+1, &end, 10); y = strtol(end + 1, &end, 10);
if (x > 0 && y > 0) if (x > 0 && y > 0)
ar = (double)x / (double)y; ar = (double)x / (double)y;
} else } else
...@@ -3033,11 +3042,11 @@ static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int e ...@@ -3033,11 +3042,11 @@ static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int e
codec = encoder ? codec = encoder ?
avcodec_find_encoder_by_name(name) : avcodec_find_encoder_by_name(name) :
avcodec_find_decoder_by_name(name); avcodec_find_decoder_by_name(name);
if(!codec) { if (!codec) {
av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name); av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
exit_program(1); exit_program(1);
} }
if(codec->type != type) { if (codec->type != type) {
av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name); av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
exit_program(1); exit_program(1);
} }
...@@ -3085,7 +3094,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ...@@ -3085,7 +3094,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
switch (dec->codec_type) { switch (dec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
if (o->audio_disable) if (o->audio_disable)
st->discard= AVDISCARD_ALL; st->discard = AVDISCARD_ALL;
break; break;
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
rfps = ic->streams[i]->r_frame_rate.num; rfps = ic->streams[i]->r_frame_rate.num;
...@@ -3096,7 +3105,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ...@@ -3096,7 +3105,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
dec->width >>= dec->lowres; dec->width >>= dec->lowres;
} }
if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) { if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n", av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num, i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
...@@ -3104,9 +3113,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ...@@ -3104,9 +3113,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
} }
if (o->video_disable) if (o->video_disable)
st->discard= AVDISCARD_ALL; st->discard = AVDISCARD_ALL;
else if(video_discard) else if (video_discard)
st->discard= video_discard; st->discard = video_discard;
break; break;
case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_DATA:
break; break;
...@@ -3486,12 +3495,12 @@ static void parse_matrix_coeffs(uint16_t *dest, const char *str) ...@@ -3486,12 +3495,12 @@ static void parse_matrix_coeffs(uint16_t *dest, const char *str)
{ {
int i; int i;
const char *p = str; const char *p = str;
for(i = 0;; i++) { for (i = 0;; i++) {
dest[i] = atoi(p); dest[i] = atoi(p);
if(i == 63) if (i == 63)
break; break;
p = strchr(p, ','); p = strchr(p, ',');
if(!p) { if (!p) {
av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i); av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
exit_program(1); exit_program(1);
} }
...@@ -3557,33 +3566,33 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc) ...@@ -3557,33 +3566,33 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
} }
MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st); MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
for(i=0; p; i++){ for (i = 0; p; i++) {
int start, end, q; int start, end, q;
int e=sscanf(p, "%d,%d,%d", &start, &end, &q); int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
if(e!=3){ if (e != 3) {
av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n"); av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
exit_program(1); exit_program(1);
} }
video_enc->rc_override= video_enc->rc_override =
av_realloc(video_enc->rc_override, av_realloc(video_enc->rc_override,
sizeof(RcOverride)*(i+1)); sizeof(RcOverride) * (i + 1));
video_enc->rc_override[i].start_frame= start; video_enc->rc_override[i].start_frame = start;
video_enc->rc_override[i].end_frame = end; video_enc->rc_override[i].end_frame = end;
if(q>0){ if (q > 0) {
video_enc->rc_override[i].qscale= q; video_enc->rc_override[i].qscale = q;
video_enc->rc_override[i].quality_factor= 1.0; video_enc->rc_override[i].quality_factor = 1.0;
} }
else{ else {
video_enc->rc_override[i].qscale= 0; video_enc->rc_override[i].qscale = 0;
video_enc->rc_override[i].quality_factor= -q/100.0; video_enc->rc_override[i].quality_factor = -q/100.0;
} }
p= strchr(p, '/'); p = strchr(p, '/');
if(p) p++; if (p) p++;
} }
video_enc->rc_override_count=i; video_enc->rc_override_count = i;
if (!video_enc->rc_initial_buffer_occupancy) if (!video_enc->rc_initial_buffer_occupancy)
video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4; video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
video_enc->intra_dc_precision= intra_dc_precision - 8; video_enc->intra_dc_precision = intra_dc_precision - 8;
/* two pass mode */ /* two pass mode */
if (do_pass) { if (do_pass) {
...@@ -3734,7 +3743,7 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata) ...@@ -3734,7 +3743,7 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
av_dict_copy(&out_ch->metadata, in_ch->metadata, 0); av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
os->nb_chapters++; os->nb_chapters++;
os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters); os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
if (!os->chapters) if (!os->chapters)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
os->chapters[os->nb_chapters - 1] = out_ch; os->chapters[os->nb_chapters - 1] = out_ch;
...@@ -4113,36 +4122,36 @@ static void show_help(void) ...@@ -4113,36 +4122,36 @@ static void show_help(void)
static int opt_target(OptionsContext *o, const char *opt, const char *arg) static int opt_target(OptionsContext *o, const char *opt, const char *arg)
{ {
enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN; enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"}; static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
if(!strncmp(arg, "pal-", 4)) { if (!strncmp(arg, "pal-", 4)) {
norm = PAL; norm = PAL;
arg += 4; arg += 4;
} else if(!strncmp(arg, "ntsc-", 5)) { } else if (!strncmp(arg, "ntsc-", 5)) {
norm = NTSC; norm = NTSC;
arg += 5; arg += 5;
} else if(!strncmp(arg, "film-", 5)) { } else if (!strncmp(arg, "film-", 5)) {
norm = FILM; norm = FILM;
arg += 5; arg += 5;
} else { } else {
/* Try to determine PAL/NTSC by peeking in the input files */ /* Try to determine PAL/NTSC by peeking in the input files */
if(nb_input_files) { if (nb_input_files) {
int i, j, fr; int i, j, fr;
for (j = 0; j < nb_input_files; j++) { for (j = 0; j < nb_input_files; j++) {
for (i = 0; i < input_files[j].nb_streams; i++) { for (i = 0; i < input_files[j].nb_streams; i++) {
AVCodecContext *c = input_files[j].ctx->streams[i]->codec; AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
if(c->codec_type != AVMEDIA_TYPE_VIDEO) if (c->codec_type != AVMEDIA_TYPE_VIDEO)
continue; continue;
fr = c->time_base.den * 1000 / c->time_base.num; fr = c->time_base.den * 1000 / c->time_base.num;
if(fr == 25000) { if (fr == 25000) {
norm = PAL; norm = PAL;
break; break;
} else if((fr == 29970) || (fr == 23976)) { } else if ((fr == 29970) || (fr == 23976)) {
norm = NTSC; norm = NTSC;
break; break;
} }
} }
if(norm != UNKNOWN) if (norm != UNKNOWN)
break; break;
} }
} }
...@@ -4150,14 +4159,14 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4150,14 +4159,14 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC"); av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
} }
if(norm == UNKNOWN) { if (norm == UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n"); av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n"); av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n"); av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
exit_program(1); exit_program(1);
} }
if(!strcmp(arg, "vcd")) { if (!strcmp(arg, "vcd")) {
opt_video_codec(o, "c:v", "mpeg1video"); opt_video_codec(o, "c:v", "mpeg1video");
opt_audio_codec(o, "c:a", "mp2"); opt_audio_codec(o, "c:a", "mp2");
parse_option(o, "f", "vcd", options); parse_option(o, "f", "vcd", options);
...@@ -4183,8 +4192,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4183,8 +4192,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
and the first pack from the other stream, respectively, may also have and the first pack from the other stream, respectively, may also have
been written before. been written before.
So the real data starts at SCR 36000+3*1200. */ So the real data starts at SCR 36000+3*1200. */
o->mux_preload = (36000+3*1200) / 90000.0; //0.44 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
} else if(!strcmp(arg, "svcd")) { } else if (!strcmp(arg, "svcd")) {
opt_video_codec(o, "c:v", "mpeg2video"); opt_video_codec(o, "c:v", "mpeg2video");
opt_audio_codec(o, "c:a", "mp2"); opt_audio_codec(o, "c:a", "mp2");
...@@ -4196,8 +4205,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4196,8 +4205,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
opt_default("b", "2040000"); opt_default("b", "2040000");
opt_default("maxrate", "2516000"); opt_default("maxrate", "2516000");
opt_default("minrate", "0"); //1145000; opt_default("minrate", "0"); // 1145000;
opt_default("bufsize", "1835008"); //224*1024*8; opt_default("bufsize", "1835008"); // 224*1024*8;
opt_default("flags", "+scan_offset"); opt_default("flags", "+scan_offset");
...@@ -4206,7 +4215,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4206,7 +4215,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
opt_default("packetsize", "2324"); opt_default("packetsize", "2324");
} else if(!strcmp(arg, "dvd")) { } else if (!strcmp(arg, "dvd")) {
opt_video_codec(o, "c:v", "mpeg2video"); opt_video_codec(o, "c:v", "mpeg2video");
opt_audio_codec(o, "c:a", "ac3"); opt_audio_codec(o, "c:a", "ac3");
...@@ -4218,8 +4227,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4218,8 +4227,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
opt_default("b", "6000000"); opt_default("b", "6000000");
opt_default("maxrate", "9000000"); opt_default("maxrate", "9000000");
opt_default("minrate", "0"); //1500000; opt_default("minrate", "0"); // 1500000;
opt_default("bufsize", "1835008"); //224*1024*8; opt_default("bufsize", "1835008"); // 224*1024*8;
opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack. opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
...@@ -4227,7 +4236,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4227,7 +4236,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
opt_default("b:a", "448000"); opt_default("b:a", "448000");
parse_option(o, "ar", "48000", options); parse_option(o, "ar", "48000", options);
} else if(!strncmp(arg, "dv", 2)) { } else if (!strncmp(arg, "dv", 2)) {
parse_option(o, "f", "dv", options); parse_option(o, "f", "dv", options);
...@@ -4249,7 +4258,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg) ...@@ -4249,7 +4258,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
static int opt_vstats_file(const char *opt, const char *arg) static int opt_vstats_file(const char *opt, const char *arg)
{ {
av_free (vstats_filename); av_free (vstats_filename);
vstats_filename=av_strdup (arg); vstats_filename = av_strdup (arg);
return 0; return 0;
} }
...@@ -4437,7 +4446,7 @@ int main(int argc, char **argv) ...@@ -4437,7 +4446,7 @@ int main(int argc, char **argv)
/* parse options */ /* parse options */
parse_options(&o, argc, argv, options, opt_output_file); parse_options(&o, argc, argv, options, opt_output_file);
if(nb_output_files <= 0 && nb_input_files == 0) { if (nb_output_files <= 0 && nb_input_files == 0) {
show_usage(); show_usage();
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name); av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
exit_program(1); exit_program(1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment