avconv.c 84.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * avconv main
 * Copyright (c) 2000-2011 The libav developers.
 *
 * This file is part of Libav.
 *
 * Libav is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * Libav is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with Libav; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "config.h"
#include <ctype.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <errno.h>
#include <signal.h>
#include <limits.h>
30 31
#include <stdint.h>

32 33 34
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
35
#include "libavresample/avresample.h"
36
#include "libavutil/opt.h"
37
#include "libavutil/channel_layout.h"
38 39 40
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/fifo.h"
41
#include "libavutil/internal.h"
42 43 44 45 46 47
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/libm.h"
48
#include "libavutil/imgutils.h"
49
#include "libavutil/time.h"
50 51 52
#include "libavformat/os_support.h"

# include "libavfilter/avfilter.h"
53
# include "libavfilter/buffersrc.h"
54
# include "libavfilter/buffersink.h"
55 56

#if HAVE_SYS_RESOURCE_H
57
#include <sys/time.h>
58 59 60 61 62 63 64 65 66 67 68 69 70 71
#include <sys/types.h>
#include <sys/resource.h>
#elif HAVE_GETPROCESSTIMES
#include <windows.h>
#endif
#if HAVE_GETPROCESSMEMORYINFO
#include <windows.h>
#include <psapi.h>
#endif

#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif

72 73 74 75
#if HAVE_PTHREADS
#include <pthread.h>
#endif

76 77
#include <time.h>

78
#include "avconv.h"
79 80 81 82 83 84 85 86 87 88 89 90
#include "cmdutils.h"

#include "libavutil/avassert.h"

const char program_name[] = "avconv";
const int program_birth_year = 2000;

static FILE *vstats_file;

static int nb_frames_drop = 0;


91

92
#if HAVE_PTHREADS
93 94 95 96
/* signal to input threads that they should exit; set by the main thread */
static int transcoding_finished;
#endif

97 98 99 100
InputStream **input_streams = NULL;
int        nb_input_streams = 0;
InputFile   **input_files   = NULL;
int        nb_input_files   = 0;
101

102 103 104 105
OutputStream **output_streams = NULL;
int         nb_output_streams = 0;
OutputFile   **output_files   = NULL;
int         nb_output_files   = 0;
106

107 108
FilterGraph **filtergraphs;
int        nb_filtergraphs;
109

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
static void term_exit(void)
{
    av_log(NULL, AV_LOG_QUIET, "");
}

static volatile int received_sigterm = 0;
static volatile int received_nb_signals = 0;

static void
sigterm_handler(int sig)
{
    received_sigterm = sig;
    received_nb_signals++;
    term_exit();
}

static void term_init(void)
{
Aneesh Dogra's avatar
Aneesh Dogra committed
128
    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
129 130 131 132 133 134
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
#ifdef SIGXCPU
    signal(SIGXCPU, sigterm_handler);
#endif
}

135
static int decode_interrupt_cb(void *ctx)
136 137 138 139
{
    return received_nb_signals > 1;
}

140
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
141

142
static void avconv_cleanup(int ret)
143
{
144 145 146
    int i, j;

    for (i = 0; i < nb_filtergraphs; i++) {
147 148 149 150 151
        FilterGraph *fg = filtergraphs[i];
        avfilter_graph_free(&fg->graph);
        for (j = 0; j < fg->nb_inputs; j++) {
            av_freep(&fg->inputs[j]->name);
            av_freep(&fg->inputs[j]);
152
        }
153 154 155 156
        av_freep(&fg->inputs);
        for (j = 0; j < fg->nb_outputs; j++) {
            av_freep(&fg->outputs[j]->name);
            av_freep(&fg->outputs[j]);
157
        }
158 159 160
        av_freep(&fg->outputs);
        av_freep(&fg->graph_desc);

161 162 163
        av_freep(&filtergraphs[i]);
    }
    av_freep(&filtergraphs);
164 165

    /* close files */
Aneesh Dogra's avatar
Aneesh Dogra committed
166
    for (i = 0; i < nb_output_files; i++) {
167 168
        OutputFile *of = output_files[i];
        AVFormatContext *s = of->ctx;
169
        if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
170 171
            avio_close(s->pb);
        avformat_free_context(s);
172 173
        av_dict_free(&of->opts);

174
        av_freep(&output_files[i]);
175
    }
176
    for (i = 0; i < nb_output_streams; i++) {
177 178
        OutputStream *ost = output_streams[i];
        AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
179 180 181 182 183
        while (bsfc) {
            AVBitStreamFilterContext *next = bsfc->next;
            av_bitstream_filter_close(bsfc);
            bsfc = next;
        }
184 185 186 187
        ost->bitstream_filters = NULL;
        av_frame_free(&ost->filtered_frame);

        av_parser_close(ost->parser);
188

189 190 191
        av_freep(&ost->forced_keyframes);
        av_freep(&ost->avfilter);
        av_freep(&ost->logfile_prefix);
192

193 194
        avcodec_free_context(&ost->enc_ctx);

195
        av_freep(&output_streams[i]);
196
    }
Aneesh Dogra's avatar
Aneesh Dogra committed
197
    for (i = 0; i < nb_input_files; i++) {
198 199
        avformat_close_input(&input_files[i]->ctx);
        av_freep(&input_files[i]);
200
    }
201
    for (i = 0; i < nb_input_streams; i++) {
202 203 204 205
        InputStream *ist = input_streams[i];

        av_frame_free(&ist->decoded_frame);
        av_frame_free(&ist->filter_frame);
206
        av_dict_free(&ist->decoder_opts);
207 208 209
        av_freep(&ist->filters);
        av_freep(&ist->hwaccel_device);

210 211
        avcodec_free_context(&ist->dec_ctx);

212
        av_freep(&input_streams[i]);
213
    }
214 215 216 217 218 219 220

    if (vstats_file)
        fclose(vstats_file);
    av_free(vstats_filename);

    av_freep(&input_streams);
    av_freep(&input_files);
221
    av_freep(&output_streams);
222
    av_freep(&output_files);
223 224 225

    uninit_opts();

226
    avformat_network_deinit();
227 228

    if (received_sigterm) {
229 230
        av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
               (int) received_sigterm);
231 232 233 234
        exit (255);
    }
}

235
void assert_avoptions(AVDictionary *m)
236 237 238
{
    AVDictionaryEntry *t;
    if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
239
        av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
240
        exit_program(1);
241 242 243
    }
}

244
static void abort_codec_experimental(AVCodec *c, int encoder)
245 246 247
{
    const char *codec_string = encoder ? "encoder" : "decoder";
    AVCodec *codec;
248 249 250 251
    av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
            "results.\nAdd '-strict experimental' if you want to use it.\n",
            codec_string, c->name);
    codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
252
    if (!(codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
253 254
        av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
               codec_string, codec->name);
255
    exit_program(1);
256 257
}

258
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Aneesh Dogra's avatar
Aneesh Dogra committed
259
{
260
    AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
261
    AVCodecContext          *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
262 263
    int ret;

264 265 266 267 268 269 270 271
    /*
     * Audio encoders may split the packets --  #frames in != #packets out.
     * But there is no reordering, so we can limit the number of output packets
     * by simply dropping them here.
     * Counting encoded video frames needs to be done separately because of
     * reordering, see do_video_out()
     */
    if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
272 273
        if (ost->frame_number >= ost->max_frames) {
            av_free_packet(pkt);
274
            return;
275
        }
276 277
        ost->frame_number++;
    }
278 279 280 281 282
    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
                                              NULL);
        ost->quality = sd ? *(int *)sd : -1;
    }
283

Aneesh Dogra's avatar
Aneesh Dogra committed
284 285 286 287 288 289 290
    while (bsfc) {
        AVPacket new_pkt = *pkt;
        int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
                                           &new_pkt.data, &new_pkt.size,
                                           pkt->data, pkt->size,
                                           pkt->flags & AV_PKT_FLAG_KEY);
        if (a > 0) {
291
            av_free_packet(pkt);
292 293 294
            new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
                                           av_buffer_default_free, NULL, 0);
            if (!new_pkt.buf)
295
                exit_program(1);
Aneesh Dogra's avatar
Aneesh Dogra committed
296
        } else if (a < 0) {
297 298 299
            av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
                   bsfc->filter->name, pkt->stream_index,
                   avctx->codec ? avctx->codec->name : "copy");
300 301
            print_error("", a);
            if (exit_on_error)
302
                exit_program(1);
303
        }
Aneesh Dogra's avatar
Aneesh Dogra committed
304
        *pkt = new_pkt;
305

Aneesh Dogra's avatar
Aneesh Dogra committed
306
        bsfc = bsfc->next;
307 308
    }

309 310 311 312 313 314 315 316
    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
        ost->last_mux_dts != AV_NOPTS_VALUE &&
        pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
        av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
               "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
               ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
        if (exit_on_error) {
            av_log(NULL, AV_LOG_FATAL, "aborting.\n");
317
            exit_program(1);
318 319 320 321 322 323 324 325 326 327
        }
        av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
               "in incorrect timestamps in the output file.\n",
               ost->last_mux_dts + 1);
        pkt->dts = ost->last_mux_dts + 1;
        if (pkt->pts != AV_NOPTS_VALUE)
            pkt->pts = FFMAX(pkt->pts, pkt->dts);
    }
    ost->last_mux_dts = pkt->dts;

328
    ost->data_size += pkt->size;
329
    ost->packets_written++;
330

331
    pkt->stream_index = ost->index;
Aneesh Dogra's avatar
Aneesh Dogra committed
332 333
    ret = av_interleaved_write_frame(s, pkt);
    if (ret < 0) {
334
        print_error("av_interleaved_write_frame()", ret);
335
        exit_program(1);
336 337 338
    }
}

339 340
static int check_recording_time(OutputStream *ost)
{
341
    OutputFile *of = output_files[ost->file_index];
342 343

    if (of->recording_time != INT64_MAX &&
344
        av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
345
                      AV_TIME_BASE_Q) >= 0) {
346
        ost->finished = 1;
347 348 349 350 351
        return 0;
    }
    return 1;
}

352 353
static void do_audio_out(AVFormatContext *s, OutputStream *ost,
                         AVFrame *frame)
354
{
355
    AVCodecContext *enc = ost->enc_ctx;
356
    AVPacket pkt;
357
    int got_packet = 0;
358 359 360 361 362

    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

363
    if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
364
        frame->pts = ost->sync_opts;
365
    ost->sync_opts = frame->pts + frame->nb_samples;
366

367 368 369
    ost->samples_encoded += frame->nb_samples;
    ost->frames_encoded++;

370 371
    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
372
        exit_program(1);
373 374 375
    }

    if (got_packet) {
376
        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
377 378
        write_frame(s, &pkt, ost);
    }
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
}

static void do_subtitle_out(AVFormatContext *s,
                            OutputStream *ost,
                            InputStream *ist,
                            AVSubtitle *sub,
                            int64_t pts)
{
    static uint8_t *subtitle_out = NULL;
    int subtitle_out_max_size = 1024 * 1024;
    int subtitle_out_size, nb, i;
    AVCodecContext *enc;
    AVPacket pkt;

    if (pts == AV_NOPTS_VALUE) {
394
        av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
395
        if (exit_on_error)
396
            exit_program(1);
397 398 399
        return;
    }

400
    enc = ost->enc_ctx;
401 402 403 404 405 406 407 408

    if (!subtitle_out) {
        subtitle_out = av_malloc(subtitle_out_max_size);
    }

    /* Note: DVB subtitle need one packet to draw them and one other
       packet to clear them */
    /* XXX: signal it in the codec context ? */
409
    if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
410 411 412 413
        nb = 2;
    else
        nb = 1;

Aneesh Dogra's avatar
Aneesh Dogra committed
414
    for (i = 0; i < nb; i++) {
415 416 417 418
        ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
        if (!check_recording_time(ost))
            return;

419 420
        sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
        // start_display_time is required to be 0
Aneesh Dogra's avatar
Aneesh Dogra committed
421 422
        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
        sub->end_display_time  -= sub->start_display_time;
423
        sub->start_display_time = 0;
424 425 426

        ost->frames_encoded++;

427 428 429
        subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                    subtitle_out_max_size, sub);
        if (subtitle_out_size < 0) {
430
            av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
431
            exit_program(1);
432 433 434 435 436
        }

        av_init_packet(&pkt);
        pkt.data = subtitle_out;
        pkt.size = subtitle_out_size;
Aneesh Dogra's avatar
Aneesh Dogra committed
437
        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
438
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
439 440 441 442 443 444 445
            /* XXX: the pts correction is handled here. Maybe handling
               it in the codec would be better */
            if (i == 0)
                pkt.pts += 90 * sub->start_display_time;
            else
                pkt.pts += 90 * sub->end_display_time;
        }
446
        write_frame(s, &pkt, ost);
447 448 449
    }
}

450 451 452
static void do_video_out(AVFormatContext *s,
                         OutputStream *ost,
                         AVFrame *in_picture,
453
                         int *frame_size)
454
{
455 456
    int ret, format_video_sync;
    AVPacket pkt;
457
    AVCodecContext *enc = ost->enc_ctx;
458 459 460

    *frame_size = 0;

461
    format_video_sync = video_sync_method;
462 463 464
    if (format_video_sync == VSYNC_AUTO)
        format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
                            (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
465 466 467 468
    if (format_video_sync != VSYNC_PASSTHROUGH &&
        ost->frame_number &&
        in_picture->pts != AV_NOPTS_VALUE &&
        in_picture->pts < ost->sync_opts) {
469
        nb_frames_drop++;
470 471 472
        av_log(NULL, AV_LOG_WARNING,
               "*** dropping frame %d from stream %d at ts %"PRId64"\n",
               ost->frame_number, ost->st->index, in_picture->pts);
473
        return;
474
    }
475

476 477 478 479 480
    if (in_picture->pts == AV_NOPTS_VALUE)
        in_picture->pts = ost->sync_opts;
    ost->sync_opts = in_picture->pts;


481
    if (!ost->frame_number)
482
        ost->first_pts = in_picture->pts;
483

Anton Khirnov's avatar
Anton Khirnov committed
484 485 486
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
487

488
    if (ost->frame_number >= ost->max_frames)
Anton Khirnov's avatar
Anton Khirnov committed
489
        return;
490

Anton Khirnov's avatar
Anton Khirnov committed
491
    if (s->oformat->flags & AVFMT_RAWPICTURE &&
492
        enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
Anton Khirnov's avatar
Anton Khirnov committed
493 494 495
        /* raw pictures are written as AVPicture structure to
           avoid any copies. We support temporarily the older
           method. */
496 497
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
Anton Khirnov's avatar
Anton Khirnov committed
498 499
        enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
        enc->coded_frame->top_field_first  = in_picture->top_field_first;
500 501
FF_ENABLE_DEPRECATION_WARNINGS
#endif
Anton Khirnov's avatar
Anton Khirnov committed
502 503 504 505
        pkt.data   = (uint8_t *)in_picture;
        pkt.size   =  sizeof(AVPicture);
        pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
        pkt.flags |= AV_PKT_FLAG_KEY;
506

Anton Khirnov's avatar
Anton Khirnov committed
507 508 509
        write_frame(s, &pkt, ost);
    } else {
        int got_packet;
510

511
        if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
512 513 514
            ost->top_field_first >= 0)
            in_picture->top_field_first = !!ost->top_field_first;

515
        in_picture->quality = enc->global_quality;
516
        in_picture->pict_type = 0;
Anton Khirnov's avatar
Anton Khirnov committed
517
        if (ost->forced_kf_index < ost->forced_kf_count &&
518 519
            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
            in_picture->pict_type = AV_PICTURE_TYPE_I;
Anton Khirnov's avatar
Anton Khirnov committed
520 521
            ost->forced_kf_index++;
        }
522 523 524

        ost->frames_encoded++;

525
        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
Anton Khirnov's avatar
Anton Khirnov committed
526 527
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
528
            exit_program(1);
Anton Khirnov's avatar
Anton Khirnov committed
529
        }
530

Anton Khirnov's avatar
Anton Khirnov committed
531
        if (got_packet) {
532
            av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
Anton Khirnov's avatar
Anton Khirnov committed
533 534
            write_frame(s, &pkt, ost);
            *frame_size = pkt.size;
535

Anton Khirnov's avatar
Anton Khirnov committed
536 537 538
            /* if two pass, output log */
            if (ost->logfile && enc->stats_out) {
                fprintf(ost->logfile, "%s", enc->stats_out);
539 540
            }
        }
Anton Khirnov's avatar
Anton Khirnov committed
541 542 543 544 545 546 547 548
    }
    ost->sync_opts++;
    /*
     * For video, number of frames in == number of packets out.
     * But there may be reordering, so we can't throw away frames on encoder
     * flush, we need to limit them here, before they go into encoder.
     */
    ost->frame_number++;
549 550
}

Aneesh Dogra's avatar
Aneesh Dogra committed
551 552 553
static double psnr(double d)
{
    return -10.0 * log(d) / log(10.0);
554 555
}

556
static void do_video_stats(OutputStream *ost, int frame_size)
557 558 559 560 561 562 563 564 565 566
{
    AVCodecContext *enc;
    int frame_number;
    double ti1, bitrate, avg_bitrate;

    /* this is executed just the first time do_video_stats is called */
    if (!vstats_file) {
        vstats_file = fopen(vstats_filename, "w");
        if (!vstats_file) {
            perror("fopen");
567
            exit_program(1);
568 569 570
        }
    }

571
    enc = ost->enc_ctx;
572 573
    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
        frame_number = ost->frame_number;
574 575
        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
                ost->quality / (float)FF_QP2LAMBDA);
576 577 578

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
579
        if (enc->flags & AV_CODEC_FLAG_PSNR)
Aneesh Dogra's avatar
Aneesh Dogra committed
580
            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
581 582
FF_ENABLE_DEPRECATION_WARNINGS
#endif
583 584 585 586 587 588 589

        fprintf(vstats_file,"f_size= %6d ", frame_size);
        /* compute pts value */
        ti1 = ost->sync_opts * av_q2d(enc->time_base);
        if (ti1 < 0.01)
            ti1 = 0.01;

Aneesh Dogra's avatar
Aneesh Dogra committed
590
        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
591
        avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
592
        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
593
               (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
594 595
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
596
        fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
597 598
FF_ENABLE_DEPRECATION_WARNINGS
#endif
599 600 601
    }
}

602
/*
603 604 605
 * Read one frame for lavfi output for ost and encode it.
 */
static int poll_filter(OutputStream *ost)
606
{
607
    OutputFile    *of = output_files[ost->file_index];
608
    AVFrame *filtered_frame = NULL;
609 610
    int frame_size, ret;

611
    if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
612
        return AVERROR(ENOMEM);
613
    }
614 615 616
    filtered_frame = ost->filtered_frame;

    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
617
        !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
618
        ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
619
                                         ost->enc_ctx->frame_size);
620
    else
621
        ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
622

623 624
    if (ret < 0)
        return ret;
625

626
    if (filtered_frame->pts != AV_NOPTS_VALUE) {
627
        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
628
        filtered_frame->pts = av_rescale_q(filtered_frame->pts,
629
                                           ost->filter->filter->inputs[0]->time_base,
630
                                           ost->enc_ctx->time_base) -
631
                              av_rescale_q(start_time,
632
                                           AV_TIME_BASE_Q,
633
                                           ost->enc_ctx->time_base);
634
    }
635 636 637 638

    switch (ost->filter->filter->inputs[0]->type) {
    case AVMEDIA_TYPE_VIDEO:
        if (!ost->frame_aspect_ratio)
639
            ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
640

641
        do_video_out(of->ctx, ost, filtered_frame, &frame_size);
642
        if (vstats_filename && frame_size)
643
            do_video_stats(ost, frame_size);
644 645 646 647 648 649 650 651 652
        break;
    case AVMEDIA_TYPE_AUDIO:
        do_audio_out(of->ctx, ost, filtered_frame);
        break;
    default:
        // TODO support subtitle filters
        av_assert0(0);
    }

653
    av_frame_unref(filtered_frame);
654

655 656 657
    return 0;
}

658 659 660 661 662 663 664 665 666 667 668 669 670
static void finish_output_stream(OutputStream *ost)
{
    OutputFile *of = output_files[ost->file_index];
    int i;

    ost->finished = 1;

    if (of->shortest) {
        for (i = 0; i < of->ctx->nb_streams; i++)
            output_streams[of->ost_index + i]->finished = 1;
    }
}

671
/*
672 673 674 675 676 677 678 679 680
 * Read as many frames from possible from lavfi and encode them.
 *
 * Always read from the active stream with the lowest timestamp. If no frames
 * are available for it then return EAGAIN and wait for more input. This way we
 * can use lavfi sources that generate unlimited amount of frames without memory
 * usage exploding.
 */
static int poll_filters(void)
{
681
    int i, ret = 0;
682 683 684 685 686 687 688 689 690

    while (ret >= 0 && !received_sigterm) {
        OutputStream *ost = NULL;
        int64_t min_pts = INT64_MAX;

        /* choose output stream with the lowest timestamp */
        for (i = 0; i < nb_output_streams; i++) {
            int64_t pts = output_streams[i]->sync_opts;

691
            if (!output_streams[i]->filter || output_streams[i]->finished)
692 693
                continue;

694
            pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
695 696 697 698 699 700 701 702 703 704 705 706 707
                               AV_TIME_BASE_Q);
            if (pts < min_pts) {
                min_pts = pts;
                ost = output_streams[i];
            }
        }

        if (!ost)
            break;

        ret = poll_filter(ost);

        if (ret == AVERROR_EOF) {
708
            finish_output_stream(ost);
709 710 711 712 713 714 715 716
            ret = 0;
        } else if (ret == AVERROR(EAGAIN))
            return 0;
    }

    return ret;
}

717 718 719 720 721
static void print_final_stats(int64_t total_size)
{
    uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
    uint64_t data_size = 0;
    float percent = -1.0;
722
    int i, j;
723 724 725

    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];
726
        switch (ost->enc_ctx->codec_type) {
727 728 729 730
            case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
            case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
            default:                 other_size += ost->data_size; break;
        }
731
        extra_size += ost->enc_ctx->extradata_size;
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
        data_size  += ost->data_size;
    }

    if (data_size && total_size >= data_size)
        percent = 100.0 * (total_size - data_size) / data_size;

    av_log(NULL, AV_LOG_INFO, "\n");
    av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
           video_size / 1024.0,
           audio_size / 1024.0,
           other_size / 1024.0,
           extra_size / 1024.0);
    if (percent >= 0.0)
        av_log(NULL, AV_LOG_INFO, "%f%%", percent);
    else
        av_log(NULL, AV_LOG_INFO, "unknown");
    av_log(NULL, AV_LOG_INFO, "\n");
749 750 751 752 753 754 755 756 757 758 759

    /* print verbose per-stream stats */
    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];
        uint64_t total_packets = 0, total_size = 0;

        av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
               i, f->ctx->filename);

        for (j = 0; j < f->nb_streams; j++) {
            InputStream *ist = input_streams[f->ist_index + j];
760
            enum AVMediaType type = ist->dec_ctx->codec_type;
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793

            total_size    += ist->data_size;
            total_packets += ist->nb_packets;

            av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
                   i, j, media_type_string(type));
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
                   ist->nb_packets, ist->data_size);

            if (ist->decoding_needed) {
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
                       ist->frames_decoded);
                if (type == AVMEDIA_TYPE_AUDIO)
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
                av_log(NULL, AV_LOG_VERBOSE, "; ");
            }

            av_log(NULL, AV_LOG_VERBOSE, "\n");
        }

        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
               total_packets, total_size);
    }

    for (i = 0; i < nb_output_files; i++) {
        OutputFile *of = output_files[i];
        uint64_t total_packets = 0, total_size = 0;

        av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
               i, of->ctx->filename);

        for (j = 0; j < of->ctx->nb_streams; j++) {
            OutputStream *ost = output_streams[of->ost_index + j];
794
            enum AVMediaType type = ost->enc_ctx->codec_type;
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817

            total_size    += ost->data_size;
            total_packets += ost->packets_written;

            av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
                   i, j, media_type_string(type));
            if (ost->encoding_needed) {
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
                       ost->frames_encoded);
                if (type == AVMEDIA_TYPE_AUDIO)
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
                av_log(NULL, AV_LOG_VERBOSE, "; ");
            }

            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
                   ost->packets_written, ost->data_size);

            av_log(NULL, AV_LOG_VERBOSE, "\n");
        }

        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
               total_packets, total_size);
    }
818 819
}

820
static void print_report(int is_last_report, int64_t timer_start)
821 822 823 824 825 826 827 828 829 830 831
{
    char buf[1024];
    OutputStream *ost;
    AVFormatContext *oc;
    int64_t total_size;
    AVCodecContext *enc;
    int frame_number, vid, i;
    double bitrate, ti1, pts;
    static int64_t last_time = -1;
    static int qp_histogram[52];

832 833 834
    if (!print_stats && !is_last_report)
        return;

835 836 837
    if (!is_last_report) {
        int64_t cur_time;
        /* display the report every 0.5 seconds */
838
        cur_time = av_gettime_relative();
839 840 841 842 843 844 845 846 847 848
        if (last_time == -1) {
            last_time = cur_time;
            return;
        }
        if ((cur_time - last_time) < 500000)
            return;
        last_time = cur_time;
    }


849
    oc = output_files[0]->ctx;
850 851

    total_size = avio_size(oc->pb);
852
    if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
Aneesh Dogra's avatar
Aneesh Dogra committed
853
        total_size = avio_tell(oc->pb);
854 855 856 857 858 859 860
    if (total_size < 0) {
        char errbuf[128];
        av_strerror(total_size, errbuf, sizeof(errbuf));
        av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
               "avio_tell() failed: %s\n", errbuf);
        total_size = 0;
    }
861 862 863 864

    buf[0] = '\0';
    ti1 = 1e10;
    vid = 0;
865
    for (i = 0; i < nb_output_streams; i++) {
866
        float q = -1;
867
        ost = output_streams[i];
868
        enc = ost->enc_ctx;
869 870 871
        if (!ost->stream_copy)
            q = ost->quality / (float) FF_QP2LAMBDA;

872 873 874 875
        if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
        }
        if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
876
            float t = (av_gettime_relative() - timer_start) / 1000000.0;
877 878 879

            frame_number = ost->frame_number;
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
Aneesh Dogra's avatar
Aneesh Dogra committed
880 881
                     frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
            if (is_last_report)
882
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
Aneesh Dogra's avatar
Aneesh Dogra committed
883
            if (qp_hist) {
884 885
                int j;
                int qp = lrintf(q);
Aneesh Dogra's avatar
Aneesh Dogra committed
886
                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
887
                    qp_histogram[qp]++;
Aneesh Dogra's avatar
Aneesh Dogra committed
888
                for (j = 0; j < 32; j++)
889
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
890
            }
891 892 893

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
894
            if (enc->flags & AV_CODEC_FLAG_PSNR) {
895
                int j;
Aneesh Dogra's avatar
Aneesh Dogra committed
896 897 898
                double error, error_sum = 0;
                double scale, scale_sum = 0;
                char type[3] = { 'Y','U','V' };
899
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
Aneesh Dogra's avatar
Aneesh Dogra committed
900 901 902 903 904 905 906
                for (j = 0; j < 3; j++) {
                    if (is_last_report) {
                        error = enc->error[j];
                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                    } else {
                        error = enc->coded_frame->error[j];
                        scale = enc->width * enc->height * 255.0 * 255.0;
907
                    }
Aneesh Dogra's avatar
Aneesh Dogra committed
908 909
                    if (j)
                        scale /= 4;
910 911
                    error_sum += error;
                    scale_sum += scale;
Aneesh Dogra's avatar
Aneesh Dogra committed
912
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
913
                }
Aneesh Dogra's avatar
Aneesh Dogra committed
914
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
915
            }
916 917
FF_ENABLE_DEPRECATION_WARNINGS
#endif
918 919 920
            vid = 1;
        }
        /* compute min output value */
921
        pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
922 923 924 925 926 927
        if ((pts < ti1) && (pts > 0))
            ti1 = pts;
    }
    if (ti1 < 0.01)
        ti1 = 0.01;

928
    bitrate = (double)(total_size * 8) / ti1 / 1000.0;
929

930
    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
931 932 933
            "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
            (double)total_size / 1024, ti1, bitrate);

934 935 936
    if (nb_frames_drop)
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
                 nb_frames_drop);
937

938
    av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
939

940
    fflush(stderr);
941

942 943
    if (is_last_report)
        print_final_stats(total_size);
944

945 946
}

947
static void flush_encoders(void)
948 949 950
{
    int i, ret;

951
    for (i = 0; i < nb_output_streams; i++) {
952
        OutputStream   *ost = output_streams[i];
953
        AVCodecContext *enc = ost->enc_ctx;
954
        AVFormatContext *os = output_files[ost->file_index]->ctx;
955
        int stop_encoding = 0;
956

957
        if (!ost->encoding_needed)
958 959
            continue;

960
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
961
            continue;
962
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
963 964
            continue;

Aneesh Dogra's avatar
Aneesh Dogra committed
965
        for (;;) {
966 967
            int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
            const char *desc;
968

969
            switch (enc->codec_type) {
970
            case AVMEDIA_TYPE_AUDIO:
971 972
                encode = avcodec_encode_audio2;
                desc   = "Audio";
973 974
                break;
            case AVMEDIA_TYPE_VIDEO:
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
                encode = avcodec_encode_video2;
                desc   = "Video";
                break;
            default:
                stop_encoding = 1;
            }

            if (encode) {
                AVPacket pkt;
                int got_packet;
                av_init_packet(&pkt);
                pkt.data = NULL;
                pkt.size = 0;

                ret = encode(enc, &pkt, NULL, &got_packet);
990
                if (ret < 0) {
991
                    av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
992
                    exit_program(1);
993 994 995 996
                }
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
                }
997
                if (!got_packet) {
998 999 1000
                    stop_encoding = 1;
                    break;
                }
1001
                av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1002
                write_frame(os, &pkt, ost);
1003
            }
1004

1005
            if (stop_encoding)
1006
                break;
1007 1008 1009 1010
        }
    }
}

1011 1012 1013 1014 1015
/*
 * Check whether a packet from ist should be written into ost at this time
 */
static int check_output_constraints(InputStream *ist, OutputStream *ost)
{
1016 1017
    OutputFile *of = output_files[ost->file_index];
    int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
1018 1019 1020 1021

    if (ost->source_index != ist_index)
        return 0;

1022
    if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1023 1024 1025 1026 1027 1028 1029
        return 0;

    return 1;
}

static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
{
1030
    OutputFile *of = output_files[ost->file_index];
1031
    InputFile   *f = input_files [ist->file_index];
1032 1033
    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1034 1035 1036 1037 1038 1039 1040 1041
    AVPacket opkt;

    av_init_packet(&opkt);

    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
        !ost->copy_initial_nonkeyframes)
        return;

1042
    if (of->recording_time != INT64_MAX &&
1043
        ist->last_dts >= of->recording_time + start_time) {
1044
        ost->finished = 1;
1045 1046 1047
        return;
    }

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
    if (f->recording_time != INT64_MAX) {
        start_time = f->ctx->start_time;
        if (f->start_time != AV_NOPTS_VALUE)
            start_time += f->start_time;
        if (ist->last_dts >= f->recording_time + start_time) {
            ost->finished = 1;
            return;
        }
    }

1058
    /* force the input stream PTS */
1059
    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1060 1061 1062 1063 1064 1065 1066 1067
        ost->sync_opts++;

    if (pkt->pts != AV_NOPTS_VALUE)
        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
    else
        opkt.pts = AV_NOPTS_VALUE;

    if (pkt->dts == AV_NOPTS_VALUE)
1068
        opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1069 1070 1071 1072 1073 1074 1075
    else
        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
    opkt.dts -= ost_tb_start_time;

    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
    opkt.flags    = pkt->flags;

Aneesh Dogra's avatar
Aneesh Dogra committed
1076
    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1077 1078 1079 1080
    if (  ost->enc_ctx->codec_id != AV_CODEC_ID_H264
       && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
       && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
       && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1081
       ) {
1082 1083 1084 1085
        if (av_parser_change(ost->parser, ost->st->codec,
                             &opkt.data, &opkt.size,
                             pkt->data, pkt->size,
                             pkt->flags & AV_PKT_FLAG_KEY)) {
1086 1087
            opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
            if (!opkt.buf)
1088
                exit_program(1);
1089
        }
1090 1091 1092 1093 1094
    } else {
        opkt.data = pkt->data;
        opkt.size = pkt->size;
    }

1095
    write_frame(of->ctx, &opkt, ost);
1096 1097
}

1098
int guess_input_channel_layout(InputStream *ist)
1099
{
1100
    AVCodecContext *dec = ist->dec_ctx;
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115

    if (!dec->channel_layout) {
        char layout_name[256];

        dec->channel_layout = av_get_default_channel_layout(dec->channels);
        if (!dec->channel_layout)
            return 0;
        av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                     dec->channels, dec->channel_layout);
        av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
               "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    }
    return 1;
}

1116
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1117
{
1118
    AVFrame *decoded_frame, *f;
1119
    AVCodecContext *avctx = ist->dec_ctx;
1120
    int i, ret, err = 0, resample_changed;
1121

1122
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1123
        return AVERROR(ENOMEM);
1124 1125
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
        return AVERROR(ENOMEM);
1126
    decoded_frame = ist->decoded_frame;
1127

1128
    ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1129
    if (!*got_output || ret < 0)
1130
        return ret;
1131

1132 1133 1134
    ist->samples_decoded += decoded_frame->nb_samples;
    ist->frames_decoded++;

1135 1136 1137
    /* if the decoder provides a pts, use it instead of the last packet pts.
       the decoder could be delaying output by a packet or more. */
    if (decoded_frame->pts != AV_NOPTS_VALUE)
1138
        ist->next_dts = decoded_frame->pts;
1139
    else if (pkt->pts != AV_NOPTS_VALUE)
1140
        decoded_frame->pts = pkt->pts;
1141
    pkt->pts           = AV_NOPTS_VALUE;
1142

1143 1144 1145 1146 1147 1148
    resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                       ist->resample_channels       != avctx->channels               ||
                       ist->resample_channel_layout != decoded_frame->channel_layout ||
                       ist->resample_sample_rate    != decoded_frame->sample_rate;
    if (resample_changed) {
        char layout1[64], layout2[64];
1149

1150 1151 1152 1153
        if (!guess_input_channel_layout(ist)) {
            av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                   "layout for Input Stream #%d.%d\n", ist->file_index,
                   ist->st->index);
1154
            exit_program(1);
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
        }
        decoded_frame->channel_layout = avctx->channel_layout;

        av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                     ist->resample_channel_layout);
        av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                     decoded_frame->channel_layout);

        av_log(NULL, AV_LOG_INFO,
               "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
               ist->file_index, ist->st->index,
               ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
               ist->resample_channels, layout1,
               decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
               avctx->channels, layout2);

        ist->resample_sample_fmt     = decoded_frame->format;
        ist->resample_sample_rate    = decoded_frame->sample_rate;
        ist->resample_channel_layout = decoded_frame->channel_layout;
        ist->resample_channels       = avctx->channels;

        for (i = 0; i < nb_filtergraphs; i++)
            if (ist_in_filtergraph(filtergraphs[i], ist) &&
                configure_filtergraph(filtergraphs[i]) < 0) {
                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1180
                exit_program(1);
1181
            }
1182
    }
1183

1184 1185 1186
    if (decoded_frame->pts != AV_NOPTS_VALUE)
        decoded_frame->pts = av_rescale_q(decoded_frame->pts,
                                          ist->st->time_base,
1187
                                          (AVRational){1, avctx->sample_rate});
1188 1189 1190 1191 1192 1193 1194 1195
    for (i = 0; i < ist->nb_filters; i++) {
        if (i < ist->nb_filters - 1) {
            f = ist->filter_frame;
            err = av_frame_ref(f, decoded_frame);
            if (err < 0)
                break;
        } else
            f = decoded_frame;
1196

1197 1198 1199 1200 1201 1202 1203 1204
        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
        if (err < 0)
            break;
    }

    av_frame_unref(ist->filter_frame);
    av_frame_unref(decoded_frame);
    return err < 0 ? err : ret;
1205 1206
}

1207
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1208
{
1209 1210
    AVFrame *decoded_frame, *f;
    int i, ret = 0, err = 0, resample_changed;
1211

1212 1213 1214
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
        return AVERROR(ENOMEM);
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1215
        return AVERROR(ENOMEM);
1216
    decoded_frame = ist->decoded_frame;
1217

1218
    ret = avcodec_decode_video2(ist->dec_ctx,
1219
                                decoded_frame, got_output, pkt);
1220
    if (!*got_output || ret < 0)
1221
        return ret;
1222

1223 1224
    ist->frames_decoded++;

1225
    if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1226
        err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1227 1228 1229 1230 1231
        if (err < 0)
            goto fail;
    }
    ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;

1232 1233
    decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
                                           decoded_frame->pkt_dts);
1234 1235
    pkt->size = 0;

1236 1237 1238
    if (ist->st->sample_aspect_ratio.num)
        decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
    resample_changed = ist->resample_width   != decoded_frame->width  ||
                       ist->resample_height  != decoded_frame->height ||
                       ist->resample_pix_fmt != decoded_frame->format;
    if (resample_changed) {
        av_log(NULL, AV_LOG_INFO,
               "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
               ist->file_index, ist->st->index,
               ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
               decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));

1249
        ret = poll_filters();
1250 1251 1252 1253 1254 1255
        if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN))) {
            char errbuf[128];
            av_strerror(ret, errbuf, sizeof(errbuf));

            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
        }
1256

1257 1258 1259 1260
        ist->resample_width   = decoded_frame->width;
        ist->resample_height  = decoded_frame->height;
        ist->resample_pix_fmt = decoded_frame->format;

1261 1262 1263
        for (i = 0; i < nb_filtergraphs; i++)
            if (ist_in_filtergraph(filtergraphs[i], ist) &&
                configure_filtergraph(filtergraphs[i]) < 0) {
1264
                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1265
                exit_program(1);
1266 1267
            }
    }
1268

1269
    for (i = 0; i < ist->nb_filters; i++) {
1270 1271 1272 1273 1274
        if (i < ist->nb_filters - 1) {
            f = ist->filter_frame;
            err = av_frame_ref(f, decoded_frame);
            if (err < 0)
                break;
1275
        } else
1276 1277 1278 1279 1280
            f = decoded_frame;

        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
        if (err < 0)
            break;
1281 1282
    }

1283
fail:
1284 1285 1286
    av_frame_unref(ist->filter_frame);
    av_frame_unref(decoded_frame);
    return err < 0 ? err : ret;
1287 1288
}

1289 1290 1291
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
{
    AVSubtitle subtitle;
1292
    int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1293 1294 1295 1296
                                          &subtitle, got_output, pkt);
    if (ret < 0)
        return ret;
    if (!*got_output)
1297
        return ret;
1298

1299 1300
    ist->frames_decoded++;

1301
    for (i = 0; i < nb_output_streams; i++) {
1302
        OutputStream *ost = output_streams[i];
1303 1304 1305 1306

        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
            continue;

1307
        do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1308 1309 1310
    }

    avsubtitle_free(&subtitle);
1311
    return ret;
1312 1313
}

1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
static int send_filter_eof(InputStream *ist)
{
    int i, ret;
    for (i = 0; i < ist->nb_filters; i++) {
        ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
        if (ret < 0)
            return ret;
    }
    return 0;
}

1325
/* pkt = NULL means EOF (needed to flush decoder buffers) */
1326
static void process_input_packet(InputStream *ist, const AVPacket *pkt)
1327
{
1328
    int i;
1329 1330 1331
    int got_output;
    AVPacket avpkt;

1332
    if (ist->next_dts == AV_NOPTS_VALUE)
1333
        ist->next_dts = ist->last_dts;
1334

1335
    if (!pkt) {
1336 1337 1338 1339 1340 1341 1342 1343 1344
        /* EOF handling */
        av_init_packet(&avpkt);
        avpkt.data = NULL;
        avpkt.size = 0;
        goto handle_eof;
    } else {
        avpkt = *pkt;
    }

Aneesh Dogra's avatar
Aneesh Dogra committed
1345
    if (pkt->dts != AV_NOPTS_VALUE)
1346
        ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1347

Aneesh Dogra's avatar
Aneesh Dogra committed
1348
    // while we have more to decode or while the decoder did output something on EOF
1349
    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1350
        int ret = 0;
1351 1352
    handle_eof:

1353
        ist->last_dts = ist->next_dts;
1354

1355
        if (avpkt.size && avpkt.size != pkt->size &&
1356
            !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
1357 1358
            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1359 1360
            ist->showed_multi_packet_warning = 1;
        }
1361

1362
        switch (ist->dec_ctx->codec_type) {
1363
        case AVMEDIA_TYPE_AUDIO:
1364
            ret = decode_audio    (ist, &avpkt, &got_output);
1365 1366
            break;
        case AVMEDIA_TYPE_VIDEO:
1367
            ret = decode_video    (ist, &avpkt, &got_output);
1368 1369
            if (avpkt.duration)
                ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1370
            else if (ist->st->avg_frame_rate.num)
1371
                ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1372
                                              AV_TIME_BASE_Q);
1373
            else if (ist->dec_ctx->framerate.num != 0) {
1374
                int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1375
                                                   ist->dec_ctx->ticks_per_frame;
1376
                ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->framerate, AV_TIME_BASE_Q);
1377
            }
1378 1379 1380 1381
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ret = transcode_subtitles(ist, &avpkt, &got_output);
            break;
Anton Khirnov's avatar
Anton Khirnov committed
1382
        default:
1383 1384 1385 1386 1387 1388 1389 1390 1391
            return;
        }

        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
                   ist->file_index, ist->st->index);
            if (exit_on_error)
                exit_program(1);
            break;
Anton Khirnov's avatar
Anton Khirnov committed
1392
        }
1393

1394 1395 1396 1397 1398
        // touch data and size only if not EOF
        if (pkt) {
            avpkt.data += ret;
            avpkt.size -= ret;
        }
1399
        if (!got_output) {
1400
            continue;
1401
        }
1402 1403
    }

1404 1405 1406 1407 1408 1409 1410 1411 1412
    /* after flushing, send an EOF on all the filter inputs attached to the stream */
    if (!pkt && ist->decoding_needed) {
        int ret = send_filter_eof(ist);
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
            exit_program(1);
        }
    }

1413
    /* handle stream copy */
1414
    if (!ist->decoding_needed) {
1415
        ist->last_dts = ist->next_dts;
1416
        switch (ist->dec_ctx->codec_type) {
1417
        case AVMEDIA_TYPE_AUDIO:
1418 1419
            ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
                             ist->dec_ctx->sample_rate;
1420 1421
            break;
        case AVMEDIA_TYPE_VIDEO:
1422
            if (ist->dec_ctx->framerate.num != 0) {
1423
                int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1424
                ist->next_dts += ((int64_t)AV_TIME_BASE *
1425 1426
                                  ist->dec_ctx->framerate.den * ticks) /
                                  ist->dec_ctx->framerate.num;
1427 1428 1429 1430
            }
            break;
        }
    }
1431
    for (i = 0; pkt && i < nb_output_streams; i++) {
1432
        OutputStream *ost = output_streams[i];
1433 1434 1435 1436 1437 1438 1439

        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
            continue;

        do_streamcopy(ist, ost, pkt);
    }

1440
    return;
1441 1442
}

1443
static void print_sdp(void)
1444
{
1445
    char sdp[16384];
1446
    int i;
1447
    AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1448 1449

    if (!avc)
1450
        exit_program(1);
1451
    for (i = 0; i < nb_output_files; i++)
1452
        avc[i] = output_files[i]->ctx;
1453

1454
    av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1455 1456
    printf("SDP:\n%s\n", sdp);
    fflush(stdout);
1457
    av_freep(&avc);
1458 1459
}

1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
{
    int i;
    for (i = 0; hwaccels[i].name; i++)
        if (hwaccels[i].pix_fmt == pix_fmt)
            return &hwaccels[i];
    return NULL;
}

static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
{
    InputStream *ist = s->opaque;
    const enum AVPixelFormat *p;
    int ret;

    for (p = pix_fmts; *p != -1; p++) {
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
        const HWAccel *hwaccel;

        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
            break;

        hwaccel = get_hwaccel(*p);
        if (!hwaccel ||
            (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
            (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
            continue;

        ret = hwaccel->init(s);
        if (ret < 0) {
            if (ist->hwaccel_id == hwaccel->id) {
                av_log(NULL, AV_LOG_FATAL,
                       "%s hwaccel requested for input stream #%d:%d, "
                       "but cannot be initialized.\n", hwaccel->name,
                       ist->file_index, ist->st->index);
1495
                return AV_PIX_FMT_NONE;
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
            }
            continue;
        }
        ist->active_hwaccel_id = hwaccel->id;
        ist->hwaccel_pix_fmt   = *p;
        break;
    }

    return *p;
}

static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
    InputStream *ist = s->opaque;

    if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
        return ist->hwaccel_get_buffer(s, frame, flags);

    return avcodec_default_get_buffer2(s, frame, flags);
}

1517
static int init_input_stream(int ist_index, char *error, int error_len)
1518
{
1519
    int ret;
1520
    InputStream *ist = input_streams[ist_index];
1521 1522 1523
    if (ist->decoding_needed) {
        AVCodec *codec = ist->dec;
        if (!codec) {
1524
            snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1525
                    ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1526 1527 1528
            return AVERROR(EINVAL);
        }

1529 1530 1531 1532
        ist->dec_ctx->opaque                = ist;
        ist->dec_ctx->get_format            = get_format;
        ist->dec_ctx->get_buffer2           = get_buffer;
        ist->dec_ctx->thread_safe_callbacks = 1;
1533

1534
        av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1535

1536 1537
        if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
            av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1538
        if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1539
            char errbuf[128];
1540 1541
            if (ret == AVERROR_EXPERIMENTAL)
                abort_codec_experimental(codec, 0);
1542 1543 1544 1545 1546 1547 1548

            av_strerror(ret, errbuf, sizeof(errbuf));

            snprintf(error, error_len,
                     "Error while opening decoder for input stream "
                     "#%d:%d : %s",
                     ist->file_index, ist->st->index, errbuf);
1549
            return ret;
1550
        }
1551
        assert_avoptions(ist->decoder_opts);
1552 1553
    }

1554
    ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1555
    ist->next_dts = AV_NOPTS_VALUE;
1556 1557 1558 1559 1560
    init_pts_correction(&ist->pts_ctx);

    return 0;
}

1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
static InputStream *get_input_stream(OutputStream *ost)
{
    if (ost->source_index >= 0)
        return input_streams[ost->source_index];

    if (ost->filter) {
        FilterGraph *fg = ost->filter->graph;
        int i;

        for (i = 0; i < fg->nb_inputs; i++)
1571
            if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1572 1573 1574 1575 1576 1577
                return fg->inputs[i]->ist;
    }

    return NULL;
}

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
static int init_output_stream(OutputStream *ost, char *error, int error_len)
{
    int ret = 0;

    if (ost->encoding_needed) {
        AVCodec      *codec = ost->enc;
        AVCodecContext *dec = NULL;
        InputStream *ist;

        if ((ist = get_input_stream(ost)))
            dec = ist->dec_ctx;
        if (dec && dec->subtitle_header) {
            ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
            if (!ost->enc_ctx->subtitle_header)
                return AVERROR(ENOMEM);
            memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
            ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
        }
        if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
            av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
        av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);

        if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
            if (ret == AVERROR_EXPERIMENTAL)
                abort_codec_experimental(codec, 1);
            snprintf(error, error_len,
                     "Error while opening encoder for output stream #%d:%d - "
                     "maybe incorrect parameters such as bit_rate, rate, width or height",
                    ost->file_index, ost->index);
            return ret;
        }
        assert_avoptions(ost->encoder_opts);
        if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
            av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
                                         "It takes bits/s as argument, not kbits/s\n");

        ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL,
                   "Error initializing the output stream codec context.\n");
            exit_program(1);
        }

        ost->st->time_base = ost->enc_ctx->time_base;
    } else {
        ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
        if (ret < 0)
            return ret;
        ost->st->time_base = ost->st->codec->time_base;
    }

    return ret;
}

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
static void parse_forced_key_frames(char *kf, OutputStream *ost,
                                    AVCodecContext *avctx)
{
    char *p;
    int n = 1, i;
    int64_t t;

    for (p = kf; *p; p++)
        if (*p == ',')
            n++;
    ost->forced_kf_count = n;
    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
    if (!ost->forced_kf_pts) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1646
        exit_program(1);
1647
    }
1648 1649

    p = kf;
1650
    for (i = 0; i < n; i++) {
1651 1652 1653 1654 1655
        char *next = strchr(p, ',');

        if (next)
            *next++ = 0;

1656 1657
        t = parse_time_or_die("force_key_frames", p, 1);
        ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1658 1659

        p = next;
1660 1661 1662
    }
}

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
static void set_encoder_id(OutputFile *of, OutputStream *ost)
{
    AVDictionaryEntry *e;

    uint8_t *encoder_string;
    int encoder_string_len;
    int format_flags = 0;

    e = av_dict_get(of->opts, "fflags", NULL, 0);
    if (e) {
        const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
        if (!o)
            return;
        av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
    }

    encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
    encoder_string     = av_mallocz(encoder_string_len);
    if (!encoder_string)
        exit_program(1);

    if (!(format_flags & AVFMT_FLAG_BITEXACT))
        av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
    av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
    av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
                AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
}

1691
static int transcode_init(void)
1692
{
1693
    int ret = 0, i, j, k;
1694
    AVFormatContext *oc;
1695
    OutputStream *ost;
1696 1697 1698
    InputStream *ist;
    char error[1024];
    int want_sdp = 1;
1699

1700 1701
    /* init framerate emulation */
    for (i = 0; i < nb_input_files; i++) {
1702
        InputFile *ifile = input_files[i];
1703 1704
        if (ifile->rate_emu)
            for (j = 0; j < ifile->nb_streams; j++)
1705
                input_streams[j + ifile->ist_index]->start = av_gettime_relative();
1706
    }
1707 1708

    /* for each output stream, we compute the right encoding parameters */
1709
    for (i = 0; i < nb_output_streams; i++) {
1710
        AVCodecContext *enc_ctx;
1711
        AVCodecContext *dec_ctx = NULL;
1712 1713
        ost = output_streams[i];
        oc  = output_files[ost->file_index]->ctx;
1714
        ist = get_input_stream(ost);
1715

1716 1717 1718
        if (ost->attachment_filename)
            continue;

1719
        enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
1720

1721
        if (ist) {
1722
            dec_ctx = ist->dec_ctx;
1723 1724

            ost->st->disposition          = ist->st->disposition;
1725 1726
            enc_ctx->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
            enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1727
        }
1728

1729
        if (ost->stream_copy) {
1730
            AVRational sar;
1731 1732 1733 1734
            uint64_t extra_size;

            av_assert0(ist && !ost->filter);

1735
            extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
1736

1737
            if (extra_size > INT_MAX) {
1738
                return AVERROR(EINVAL);
1739
            }
1740 1741

            /* if stream_copy is selected, no need to decode or encode */
1742 1743
            enc_ctx->codec_id   = dec_ctx->codec_id;
            enc_ctx->codec_type = dec_ctx->codec_type;
1744

1745
            if (!enc_ctx->codec_tag) {
1746
                if (!oc->oformat->codec_tag ||
1747
                     av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
1748
                     av_codec_get_tag(oc->oformat->codec_tag, dec_ctx->codec_id) <= 0)
1749
                    enc_ctx->codec_tag = dec_ctx->codec_tag;
1750 1751
            }

1752 1753 1754 1755 1756 1757
            enc_ctx->bit_rate       = dec_ctx->bit_rate;
            enc_ctx->rc_max_rate    = dec_ctx->rc_max_rate;
            enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
            enc_ctx->field_order    = dec_ctx->field_order;
            enc_ctx->extradata      = av_mallocz(extra_size);
            if (!enc_ctx->extradata) {
1758
                return AVERROR(ENOMEM);
1759
            }
1760 1761
            memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
            enc_ctx->extradata_size = dec_ctx->extradata_size;
1762
            if (!copy_tb) {
1763 1764 1765 1766
                enc_ctx->time_base      = dec_ctx->time_base;
                enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
                av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
                          enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
1767
            } else
1768
                enc_ctx->time_base = ist->st->time_base;
1769

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
            if (ist->st->nb_side_data) {
                ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
                                                      sizeof(*ist->st->side_data));
                if (!ost->st->side_data)
                    return AVERROR(ENOMEM);

                for (j = 0; j < ist->st->nb_side_data; j++) {
                    const AVPacketSideData *sd_src = &ist->st->side_data[j];
                    AVPacketSideData *sd_dst = &ost->st->side_data[j];

                    sd_dst->data = av_malloc(sd_src->size);
                    if (!sd_dst->data)
                        return AVERROR(ENOMEM);
                    memcpy(sd_dst->data, sd_src->data, sd_src->size);
                    sd_dst->size = sd_src->size;
                    sd_dst->type = sd_src->type;
                    ost->st->nb_side_data++;
                }
            }

1790
            ost->parser = av_parser_init(enc_ctx->codec_id);
1791

1792
            switch (enc_ctx->codec_type) {
1793
            case AVMEDIA_TYPE_AUDIO:
Aneesh Dogra's avatar
Aneesh Dogra committed
1794
                if (audio_volume != 256) {
1795
                    av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1796
                    exit_program(1);
1797
                }
1798 1799 1800 1801 1802 1803
                enc_ctx->channel_layout     = dec_ctx->channel_layout;
                enc_ctx->sample_rate        = dec_ctx->sample_rate;
                enc_ctx->channels           = dec_ctx->channels;
                enc_ctx->frame_size         = dec_ctx->frame_size;
                enc_ctx->audio_service_type = dec_ctx->audio_service_type;
                enc_ctx->block_align        = dec_ctx->block_align;
1804 1805
                break;
            case AVMEDIA_TYPE_VIDEO:
1806 1807 1808 1809
                enc_ctx->pix_fmt            = dec_ctx->pix_fmt;
                enc_ctx->width              = dec_ctx->width;
                enc_ctx->height             = dec_ctx->height;
                enc_ctx->has_b_frames       = dec_ctx->has_b_frames;
1810
                if (ost->frame_aspect_ratio)
1811
                    sar = av_d2q(ost->frame_aspect_ratio * enc_ctx->height / enc_ctx->width, 255);
1812 1813 1814
                else if (ist->st->sample_aspect_ratio.num)
                    sar = ist->st->sample_aspect_ratio;
                else
1815
                    sar = dec_ctx->sample_aspect_ratio;
1816
                ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
1817 1818
                break;
            case AVMEDIA_TYPE_SUBTITLE:
1819 1820
                enc_ctx->width  = dec_ctx->width;
                enc_ctx->height = dec_ctx->height;
1821 1822
                break;
            case AVMEDIA_TYPE_DATA:
1823
            case AVMEDIA_TYPE_ATTACHMENT:
1824 1825 1826 1827 1828
                break;
            default:
                abort();
            }
        } else {
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
            if (!ost->enc) {
                /* should only happen when a default codec is not present. */
                snprintf(error, sizeof(error), "Automatic encoder selection "
                         "failed for output stream #%d:%d. Default encoder for "
                         "format %s is probably disabled. Please choose an "
                         "encoder manually.\n", ost->file_index, ost->index,
                         oc->oformat->name);
                ret = AVERROR(EINVAL);
                goto dump_format;
            }
1839

1840 1841
            set_encoder_id(output_files[ost->file_index], ost);

1842 1843 1844 1845 1846 1847 1848 1849 1850
            /*
             * We want CFR output if and only if one of those is true:
             * 1) user specified output framerate with -r
             * 2) user specified -vsync cfr
             * 3) output format is CFR and the user didn't force vsync to
             *    something else than CFR
             *
             * in such a case, set ost->frame_rate
             */
1851
            if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1852 1853 1854 1855
                !ost->frame_rate.num && ist &&
                (video_sync_method ==  VSYNC_CFR ||
                 (video_sync_method ==  VSYNC_AUTO &&
                  !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
                if (ist->framerate.num)
                    ost->frame_rate = ist->framerate;
                else if (ist->st->avg_frame_rate.num)
                    ost->frame_rate = ist->st->avg_frame_rate;
                else {
                    av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
                           "for the output stream #%d:%d, but no information "
                           "about the input framerate is available. Falling "
                           "back to a default value of 25fps. Use the -r option "
                           "if you want a different framerate.\n",
                           ost->file_index, ost->index);
                    ost->frame_rate = (AVRational){ 25, 1 };
                }
1869

1870 1871 1872 1873 1874 1875
                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                    ost->frame_rate = ost->enc->supported_framerates[idx];
                }
            }

1876
            if (!ost->filter &&
1877 1878
                (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
                 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
1879 1880
                    FilterGraph *fg;
                    fg = init_simple_filtergraph(ist, ost);
1881
                    if (configure_filtergraph(fg)) {
1882
                        av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1883
                        exit_program(1);
1884
                    }
1885
            }
1886

1887
            switch (enc_ctx->codec_type) {
1888
            case AVMEDIA_TYPE_AUDIO:
1889 1890 1891 1892 1893
                enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
                enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
                enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
                enc_ctx->channels       = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
                enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
1894 1895
                break;
            case AVMEDIA_TYPE_VIDEO:
1896
                enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
1897

1898 1899 1900
                enc_ctx->width  = ost->filter->filter->inputs[0]->w;
                enc_ctx->height = ost->filter->filter->inputs[0]->h;
                enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1901
                    ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1902
                    av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
1903
                    ost->filter->filter->inputs[0]->sample_aspect_ratio;
1904
                enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
1905

1906 1907
                ost->st->avg_frame_rate = ost->frame_rate;

1908
                if (dec_ctx &&
1909 1910 1911 1912
                    (enc_ctx->width   != dec_ctx->width  ||
                     enc_ctx->height  != dec_ctx->height ||
                     enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
                    enc_ctx->bits_per_raw_sample = 0;
1913 1914
                }

1915 1916
                if (ost->forced_keyframes)
                    parse_forced_key_frames(ost->forced_keyframes, ost,
1917
                                            ost->enc_ctx);
1918 1919
                break;
            case AVMEDIA_TYPE_SUBTITLE:
1920
                enc_ctx->time_base = (AVRational){1, 1000};
1921 1922 1923 1924 1925 1926 1927 1928 1929
                break;
            default:
                abort();
                break;
            }
        }
    }

    /* open each encoder */
1930
    for (i = 0; i < nb_output_streams; i++) {
1931 1932 1933
        ret = init_output_stream(output_streams[i], error, sizeof(error));
        if (ret < 0)
            goto dump_format;
1934 1935
    }

1936 1937
    /* init input streams */
    for (i = 0; i < nb_input_streams; i++)
1938
        if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1939
            goto dump_format;
1940

1941 1942
    /* discard unused programs */
    for (i = 0; i < nb_input_files; i++) {
1943
        InputFile *ifile = input_files[i];
1944 1945 1946 1947 1948
        for (j = 0; j < ifile->ctx->nb_programs; j++) {
            AVProgram *p = ifile->ctx->programs[j];
            int discard  = AVDISCARD_ALL;

            for (k = 0; k < p->nb_stream_indexes; k++)
1949
                if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1950 1951 1952 1953 1954 1955 1956
                    discard = AVDISCARD_DEFAULT;
                    break;
                }
            p->discard = discard;
        }
    }

1957
    /* open files and write file headers */
1958
    for (i = 0; i < nb_output_files; i++) {
1959
        oc = output_files[i]->ctx;
1960
        oc->interrupt_callback = int_cb;
1961 1962
        if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
            char errbuf[128];
1963 1964 1965 1966 1967
            av_strerror(ret, errbuf, sizeof(errbuf));
            snprintf(error, sizeof(error),
                     "Could not write header for output file #%d "
                     "(incorrect codec parameters ?): %s",
                     i, errbuf);
1968 1969 1970
            ret = AVERROR(EINVAL);
            goto dump_format;
        }
1971
        assert_avoptions(output_files[i]->opts);
1972
        if (strcmp(oc->oformat->name, "rtp")) {
1973 1974 1975 1976 1977 1978 1979
            want_sdp = 0;
        }
    }

 dump_format:
    /* dump the file output parameters - cannot be done before in case
       of stream copy */
1980
    for (i = 0; i < nb_output_files; i++) {
1981
        av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
1982 1983 1984
    }

    /* dump the stream mapping */
1985
    av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
1986 1987 1988 1989 1990 1991 1992
    for (i = 0; i < nb_input_streams; i++) {
        ist = input_streams[i];

        for (j = 0; j < ist->nb_filters; j++) {
            if (ist->filters[j]->graph->graph_desc) {
                av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                       ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
1993
                       ist->filters[j]->name);
1994 1995 1996 1997 1998 1999 2000
                if (nb_filtergraphs > 1)
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
                av_log(NULL, AV_LOG_INFO, "\n");
            }
        }
    }

2001
    for (i = 0; i < nb_output_streams; i++) {
2002
        ost = output_streams[i];
2003 2004 2005 2006 2007 2008 2009

        if (ost->attachment_filename) {
            /* an attached file */
            av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
                   ost->attachment_filename, ost->file_index, ost->index);
            continue;
        }
2010 2011 2012

        if (ost->filter && ost->filter->graph->graph_desc) {
            /* output from a complex graph */
2013
            av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
2014 2015 2016 2017 2018 2019 2020 2021
            if (nb_filtergraphs > 1)
                av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);

            av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
                   ost->index, ost->enc ? ost->enc->name : "?");
            continue;
        }

2022
        av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
2023 2024
               input_streams[ost->source_index]->file_index,
               input_streams[ost->source_index]->st->index,
2025 2026
               ost->file_index,
               ost->index);
2027
        if (ost->sync_ist != input_streams[ost->source_index])
2028
            av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2029 2030
                   ost->sync_ist->file_index,
                   ost->sync_ist->st->index);
2031
        if (ost->stream_copy)
2032
            av_log(NULL, AV_LOG_INFO, " (copy)");
2033 2034 2035 2036 2037 2038 2039
        else {
            const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
            const AVCodec *out_codec   = ost->enc;
            const char *decoder_name   = "?";
            const char *in_codec_name  = "?";
            const char *encoder_name   = "?";
            const char *out_codec_name = "?";
2040
            const AVCodecDescriptor *desc;
2041 2042 2043

            if (in_codec) {
                decoder_name  = in_codec->name;
2044 2045 2046
                desc = avcodec_descriptor_get(in_codec->id);
                if (desc)
                    in_codec_name = desc->name;
2047 2048 2049 2050 2051 2052
                if (!strcmp(decoder_name, in_codec_name))
                    decoder_name = "native";
            }

            if (out_codec) {
                encoder_name   = out_codec->name;
2053 2054 2055
                desc = avcodec_descriptor_get(out_codec->id);
                if (desc)
                    out_codec_name = desc->name;
2056
                if (!strcmp(encoder_name, out_codec_name))
2057 2058 2059 2060 2061 2062 2063
                    encoder_name = "native";
            }

            av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
                   in_codec_name, decoder_name,
                   out_codec_name, encoder_name);
        }
2064
        av_log(NULL, AV_LOG_INFO, "\n");
2065 2066 2067
    }

    if (ret) {
2068
        av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2069
        return ret;
2070 2071 2072
    }

    if (want_sdp) {
2073
        print_sdp();
2074 2075
    }

2076 2077 2078
    return 0;
}

2079
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2080 2081 2082 2083 2084 2085 2086 2087 2088
static int need_output(void)
{
    int i;

    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost    = output_streams[i];
        OutputFile *of       = output_files[ost->file_index];
        AVFormatContext *os  = output_files[ost->file_index]->ctx;

2089
        if (ost->finished ||
2090 2091
            (os->pb && avio_tell(os->pb) >= of->limit_filesize))
            continue;
2092
        if (ost->frame_number >= ost->max_frames) {
2093 2094
            int j;
            for (j = 0; j < of->ctx->nb_streams; j++)
2095
                output_streams[of->ost_index + j]->finished = 1;
2096 2097 2098 2099 2100 2101 2102 2103 2104
            continue;
        }

        return 1;
    }

    return 0;
}

2105
static InputFile *select_input_file(void)
2106
{
2107
    InputFile *ifile = NULL;
2108
    int64_t ipts_min = INT64_MAX;
2109
    int i;
2110 2111 2112 2113 2114

    for (i = 0; i < nb_input_streams; i++) {
        InputStream *ist = input_streams[i];
        int64_t ipts     = ist->last_dts;

2115
        if (ist->discard || input_files[ist->file_index]->eagain)
2116 2117 2118 2119
            continue;
        if (!input_files[ist->file_index]->eof_reached) {
            if (ipts < ipts_min) {
                ipts_min = ipts;
2120
                ifile    = input_files[ist->file_index];
2121 2122 2123 2124
            }
        }
    }

2125
    return ifile;
2126 2127
}

2128
#if HAVE_PTHREADS
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
static void *input_thread(void *arg)
{
    InputFile *f = arg;
    int ret = 0;

    while (!transcoding_finished && ret >= 0) {
        AVPacket pkt;
        ret = av_read_frame(f->ctx, &pkt);

        if (ret == AVERROR(EAGAIN)) {
2139
            av_usleep(10000);
2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
            ret = 0;
            continue;
        } else if (ret < 0)
            break;

        pthread_mutex_lock(&f->fifo_lock);
        while (!av_fifo_space(f->fifo))
            pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);

        av_dup_packet(&pkt);
        av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);

        pthread_mutex_unlock(&f->fifo_lock);
    }

    f->finished = 1;
    return NULL;
}

static void free_input_threads(void)
{
    int i;

    if (nb_input_files == 1)
        return;

    transcoding_finished = 1;

    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];
        AVPacket pkt;

2172
        if (!f->fifo || f->joined)
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
            continue;

        pthread_mutex_lock(&f->fifo_lock);
        while (av_fifo_size(f->fifo)) {
            av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
            av_free_packet(&pkt);
        }
        pthread_cond_signal(&f->fifo_cond);
        pthread_mutex_unlock(&f->fifo_lock);

        pthread_join(f->thread, NULL);
        f->joined = 1;

        while (av_fifo_size(f->fifo)) {
            av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
            av_free_packet(&pkt);
        }
        av_fifo_free(f->fifo);
    }
}

static int init_input_threads(void)
{
    int i, ret;

    if (nb_input_files == 1)
        return 0;

    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];

        if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
            return AVERROR(ENOMEM);

        pthread_mutex_init(&f->fifo_lock, NULL);
        pthread_cond_init (&f->fifo_cond, NULL);

        if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
            return AVERROR(ret);
    }
    return 0;
}

static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
{
    int ret = 0;

    pthread_mutex_lock(&f->fifo_lock);

    if (av_fifo_size(f->fifo)) {
        av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
        pthread_cond_signal(&f->fifo_cond);
    } else {
        if (f->finished)
            ret = AVERROR_EOF;
        else
            ret = AVERROR(EAGAIN);
    }

    pthread_mutex_unlock(&f->fifo_lock);

    return ret;
}
#endif

static int get_input_packet(InputFile *f, AVPacket *pkt)
{
2240 2241 2242 2243 2244
    if (f->rate_emu) {
        int i;
        for (i = 0; i < f->nb_streams; i++) {
            InputStream *ist = input_streams[f->ist_index + i];
            int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2245
            int64_t now = av_gettime_relative() - ist->start;
2246 2247 2248 2249 2250
            if (pts > now)
                return AVERROR(EAGAIN);
        }
    }

2251
#if HAVE_PTHREADS
2252 2253 2254 2255 2256 2257
    if (nb_input_files > 1)
        return get_input_packet_mt(f, pkt);
#endif
    return av_read_frame(f->ctx, pkt);
}

2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
static int got_eagain(void)
{
    int i;
    for (i = 0; i < nb_input_files; i++)
        if (input_files[i]->eagain)
            return 1;
    return 0;
}

static void reset_eagain(void)
{
    int i;
    for (i = 0; i < nb_input_files; i++)
        input_files[i]->eagain = 0;
}

2274
/*
2275 2276 2277 2278 2279
 * Read one packet from an input file and send it for
 * - decoding -> lavfi (audio/video)
 * - decoding -> encoding -> muxing (subtitles)
 * - muxing (streamcopy)
 *
2280
 * Return
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
 * - 0 -- one packet was read and processed
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
 *   this function should be called again
 * - AVERROR_EOF -- this function should not be called again
 */
static int process_input(void)
{
    InputFile *ifile;
    AVFormatContext *is;
    InputStream *ist;
    AVPacket pkt;
    int ret, i, j;

    /* select the stream that we must read now */
    ifile = select_input_file();
    /* if none, if is finished */
    if (!ifile) {
        if (got_eagain()) {
            reset_eagain();
            av_usleep(10000);
            return AVERROR(EAGAIN);
        }
        av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
        return AVERROR_EOF;
    }

    is  = ifile->ctx;
    ret = get_input_packet(ifile, &pkt);

    if (ret == AVERROR(EAGAIN)) {
        ifile->eagain = 1;
        return ret;
    }
    if (ret < 0) {
        if (ret != AVERROR_EOF) {
            print_error(is->filename, ret);
            if (exit_on_error)
2318
                exit_program(1);
2319 2320 2321 2322 2323 2324
        }
        ifile->eof_reached = 1;

        for (i = 0; i < ifile->nb_streams; i++) {
            ist = input_streams[ifile->ist_index + i];
            if (ist->decoding_needed)
2325
                process_input_packet(ist, NULL);
2326 2327 2328 2329 2330 2331 2332

            /* mark all outputs that don't go through lavfi as finished */
            for (j = 0; j < nb_output_streams; j++) {
                OutputStream *ost = output_streams[j];

                if (ost->source_index == ifile->ist_index + i &&
                    (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2333
                    finish_output_stream(ost);
2334 2335 2336
            }
        }

2337
        return AVERROR(EAGAIN);
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
    }

    reset_eagain();

    if (do_pkt_dump) {
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                         is->streams[pkt.stream_index]);
    }
    /* the following test is needed in case new streams appear
       dynamically in stream : we ignore them */
    if (pkt.stream_index >= ifile->nb_streams)
        goto discard_packet;

    ist = input_streams[ifile->ist_index + pkt.stream_index];
2352 2353

    ist->data_size += pkt.size;
2354
    ist->nb_packets++;
2355

2356 2357 2358
    if (ist->discard)
        goto discard_packet;

2359 2360 2361 2362 2363 2364 2365 2366
    /* add the stream-global side data to the first packet */
    if (ist->nb_packets == 1)
        for (i = 0; i < ist->st->nb_side_data; i++) {
            AVPacketSideData *src_sd = &ist->st->side_data[i];
            uint8_t *dst_data;

            if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
                continue;
2367 2368
            if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
                continue;
2369 2370 2371 2372 2373 2374 2375 2376

            dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
            if (!dst_data)
                exit_program(1);

            memcpy(dst_data, src_sd->data, src_sd->size);
        }

2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts *= ist->ts_scale;
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts *= ist->ts_scale;

2387 2388 2389
    if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
         ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
        pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
        (is->iformat->flags & AVFMT_TS_DISCONT)) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ist->next_dts;

        if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
            ifile->ts_offset -= delta;
            av_log(NULL, AV_LOG_DEBUG,
                   "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                   delta, ifile->ts_offset);
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
        }
    }

2405
    process_input_packet(ist, &pkt);
2406 2407 2408 2409 2410 2411 2412

discard_packet:
    av_free_packet(&pkt);

    return 0;
}

2413 2414 2415
/*
 * The following code is the main loop of the file converter
 */
2416
static int transcode(void)
2417
{
2418 2419
    int ret, i, need_input = 1;
    AVFormatContext *os;
2420 2421 2422 2423
    OutputStream *ost;
    InputStream *ist;
    int64_t timer_start;

2424
    ret = transcode_init();
2425 2426 2427
    if (ret < 0)
        goto fail;

2428
    av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2429 2430
    term_init();

2431
    timer_start = av_gettime_relative();
2432

2433
#if HAVE_PTHREADS
2434 2435 2436 2437
    if ((ret = init_input_threads()) < 0)
        goto fail;
#endif

Anton Khirnov's avatar
Anton Khirnov committed
2438
    while (!received_sigterm) {
2439
        /* check if there's any stream where output is still needed */
2440 2441
        if (!need_output()) {
            av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2442
            break;
2443
        }
2444

2445 2446 2447 2448 2449
        /* read and process one input packet if needed */
        if (need_input) {
            ret = process_input();
            if (ret == AVERROR_EOF)
                need_input = 0;
2450 2451
        }

2452
        ret = poll_filters();
2453
        if (ret < 0) {
2454
            if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
2455
                continue;
2456 2457 2458
            } else {
                char errbuf[128];
                av_strerror(ret, errbuf, sizeof(errbuf));
2459

2460 2461 2462
                av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
                break;
            }
2463 2464 2465
        }

        /* dump report by using the output first video and audio streams */
2466
        print_report(0, timer_start);
2467
    }
2468
#if HAVE_PTHREADS
2469 2470
    free_input_threads();
#endif
2471 2472 2473

    /* at the end of stream, we must flush the decoder buffers */
    for (i = 0; i < nb_input_streams; i++) {
2474
        ist = input_streams[i];
2475
        if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2476
            process_input_packet(ist, NULL);
2477 2478
        }
    }
2479
    poll_filters();
2480
    flush_encoders();
2481 2482 2483 2484

    term_exit();

    /* write the trailer if needed and close file */
Aneesh Dogra's avatar
Aneesh Dogra committed
2485
    for (i = 0; i < nb_output_files; i++) {
2486
        os = output_files[i]->ctx;
2487 2488 2489 2490
        av_write_trailer(os);
    }

    /* dump report by using the first video and audio streams */
2491
    print_report(1, timer_start);
2492 2493

    /* close each encoder */
2494
    for (i = 0; i < nb_output_streams; i++) {
2495
        ost = output_streams[i];
2496
        if (ost->encoding_needed) {
2497
            av_freep(&ost->enc_ctx->stats_in);
2498 2499 2500 2501 2502
        }
    }

    /* close each decoder */
    for (i = 0; i < nb_input_streams; i++) {
2503
        ist = input_streams[i];
2504
        if (ist->decoding_needed) {
2505
            avcodec_close(ist->dec_ctx);
2506
            if (ist->hwaccel_uninit)
2507
                ist->hwaccel_uninit(ist->dec_ctx);
2508 2509 2510 2511 2512 2513 2514
        }
    }

    /* finished ! */
    ret = 0;

 fail:
2515
#if HAVE_PTHREADS
2516 2517
    free_input_threads();
#endif
2518

2519 2520
    if (output_streams) {
        for (i = 0; i < nb_output_streams; i++) {
2521
            ost = output_streams[i];
2522 2523 2524 2525 2526 2527
            if (ost) {
                if (ost->logfile) {
                    fclose(ost->logfile);
                    ost->logfile = NULL;
                }
                av_free(ost->forced_kf_pts);
2528
                av_dict_free(&ost->encoder_opts);
2529
                av_dict_free(&ost->resample_opts);
2530 2531 2532 2533 2534 2535
            }
        }
    }
    return ret;
}

2536
static int64_t getutime(void)
2537
{
2538 2539
#if HAVE_GETRUSAGE
    struct rusage rusage;
2540

2541 2542 2543 2544 2545 2546 2547 2548 2549
    getrusage(RUSAGE_SELF, &rusage);
    return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
#elif HAVE_GETPROCESSTIMES
    HANDLE proc;
    FILETIME c, e, k, u;
    proc = GetCurrentProcess();
    GetProcessTimes(proc, &c, &e, &k, &u);
    return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
#else
2550
    return av_gettime_relative();
2551
#endif
2552 2553
}

2554
static int64_t getmaxrss(void)
2555
{
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567
#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
    struct rusage rusage;
    getrusage(RUSAGE_SELF, &rusage);
    return (int64_t)rusage.ru_maxrss * 1024;
#elif HAVE_GETPROCESSMEMORYINFO
    HANDLE proc;
    PROCESS_MEMORY_COUNTERS memcounters;
    proc = GetCurrentProcess();
    memcounters.cb = sizeof(memcounters);
    GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
    return memcounters.PeakPagefileUsage;
#else
2568
    return 0;
2569
#endif
2570 2571 2572 2573
}

int main(int argc, char **argv)
{
2574
    int ret;
2575 2576
    int64_t ti;

2577
    register_exit(avconv_cleanup);
2578

2579
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
2580
    parse_loglevel(argc, argv, options);
2581 2582 2583 2584 2585 2586 2587

    avcodec_register_all();
#if CONFIG_AVDEVICE
    avdevice_register_all();
#endif
    avfilter_register_all();
    av_register_all();
2588
    avformat_network_init();
2589 2590 2591

    show_banner();

2592 2593 2594
    /* parse options and open all input/output files */
    ret = avconv_parse_options(argc, argv);
    if (ret < 0)
2595
        exit_program(1);
2596

Aneesh Dogra's avatar
Aneesh Dogra committed
2597
    if (nb_output_files <= 0 && nb_input_files == 0) {
2598
        show_usage();
2599
        av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2600
        exit_program(1);
2601 2602 2603 2604 2605
    }

    /* file converter / grab */
    if (nb_output_files <= 0) {
        fprintf(stderr, "At least one output file must be specified\n");
2606
        exit_program(1);
2607 2608 2609
    }

    ti = getutime();
2610
    if (transcode() < 0)
2611
        exit_program(1);
2612 2613 2614 2615 2616 2617
    ti = getutime() - ti;
    if (do_benchmark) {
        int maxrss = getmaxrss() / 1024;
        printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
    }

2618
    exit_program(0);
2619
    return 0;
2620
}