ffmpeg.c 145 KB
Newer Older
Fabrice Bellard's avatar
Fabrice Bellard committed
1
/*
Fabrice Bellard's avatar
Fabrice Bellard committed
2
 * Copyright (c) 2000-2003 Fabrice Bellard
Fabrice Bellard's avatar
Fabrice Bellard committed
3
 *
4 5 6
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
7 8
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
Fabrice Bellard's avatar
Fabrice Bellard committed
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
Fabrice Bellard's avatar
Fabrice Bellard committed
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
Fabrice Bellard's avatar
Fabrice Bellard committed
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Fabrice Bellard's avatar
Fabrice Bellard committed
19
 */
20

21 22 23 24 25
/**
 * @file
 * multimedia converter based on the FFmpeg libraries
 */

26 27 28 29 30 31
#include "config.h"
#include <ctype.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <errno.h>
32
#include <limits.h>
33 34
#include <stdint.h>

35
#if HAVE_ISATTY
36 37 38 39
#if HAVE_IO_H
#include <io.h>
#endif
#if HAVE_UNISTD_H
40
#include <unistd.h>
41
#endif
42
#endif
43

44 45
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
46
#include "libswresample/swresample.h"
47
#include "libavutil/opt.h"
48
#include "libavutil/channel_layout.h"
49 50
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
51
#include "libavutil/fifo.h"
52
#include "libavutil/intreadwrite.h"
53
#include "libavutil/dict.h"
54
#include "libavutil/mathematics.h"
55
#include "libavutil/pixdesc.h"
56
#include "libavutil/avstring.h"
57
#include "libavutil/libm.h"
58
#include "libavutil/imgutils.h"
59
#include "libavutil/timestamp.h"
60
#include "libavutil/bprint.h"
61
#include "libavutil/time.h"
62
#include "libavutil/threadmessage.h"
63
#include "libavformat/os_support.h"
64

65
# include "libavfilter/avcodec.h"
66
# include "libavfilter/avfilter.h"
67
# include "libavfilter/buffersrc.h"
68
# include "libavfilter/buffersink.h"
69

70
#if HAVE_SYS_RESOURCE_H
71
#include <sys/time.h>
72
#include <sys/types.h>
73
#include <sys/resource.h>
74
#elif HAVE_GETPROCESSTIMES
75 76
#include <windows.h>
#endif
77 78 79 80
#if HAVE_GETPROCESSMEMORYINFO
#include <windows.h>
#include <psapi.h>
#endif
81

82
#if HAVE_SYS_SELECT_H
83 84 85
#include <sys/select.h>
#endif

86 87 88 89 90 91
#if HAVE_TERMIOS_H
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <termios.h>
#elif HAVE_KBHIT
92
#include <conio.h>
Fabrice Bellard's avatar
Fabrice Bellard committed
93
#endif
94

95 96 97 98
#if HAVE_PTHREADS
#include <pthread.h>
#endif

99
#include <time.h>
Fabrice Bellard's avatar
Fabrice Bellard committed
100

101
#include "ffmpeg.h"
Fabrice Bellard's avatar
Fabrice Bellard committed
102 103
#include "cmdutils.h"

104
#include "libavutil/avassert.h"
105

106
const char program_name[] = "ffmpeg";
107
const int program_birth_year = 2000;
108

109
static FILE *vstats_file;
110

111 112 113 114 115 116 117 118 119
const char *const forced_keyframes_const_names[] = {
    "n",
    "n_forced",
    "prev_forced_n",
    "prev_forced_t",
    "t",
    NULL
};

120
static void do_video_stats(OutputStream *ost, int frame_size);
121
static int64_t getutime(void);
122
static int64_t getmaxrss(void);
123

124
static int run_as_daemon  = 0;
125 126
static int nb_frames_dup = 0;
static int nb_frames_drop = 0;
127
static int64_t decode_error_stat[2];
128

129
static int current_time;
130
AVIOContext *progress_avio = NULL;
131

132 133
static uint8_t *subtitle_out;

Stefano Sabatini's avatar
Stefano Sabatini committed
134
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
Fabrice Bellard's avatar
Fabrice Bellard committed
135

136 137 138 139
InputStream **input_streams = NULL;
int        nb_input_streams = 0;
InputFile   **input_files   = NULL;
int        nb_input_files   = 0;
140

141 142 143 144
OutputStream **output_streams = NULL;
int         nb_output_streams = 0;
OutputFile   **output_files   = NULL;
int         nb_output_files   = 0;
Fabrice Bellard's avatar
Fabrice Bellard committed
145

146 147
FilterGraph **filtergraphs;
int        nb_filtergraphs;
Fabrice Bellard's avatar
Fabrice Bellard committed
148

149 150 151 152
#if HAVE_TERMIOS_H

/* init terminal so that we can grab keys */
static struct termios oldtty;
153
static int restore_tty;
154 155
#endif

156
#if HAVE_PTHREADS
157
static void free_input_threads(void);
158
#endif
159

160 161 162 163 164
/* sub2video hack:
   Convert subtitles to video with alpha to insert them in filter graphs.
   This is a temporary solution until libavfilter gets real subtitles support.
 */

165 166 167 168 169 170 171 172 173 174 175 176 177 178
static int sub2video_get_blank_frame(InputStream *ist)
{
    int ret;
    AVFrame *frame = ist->sub2video.frame;

    av_frame_unref(frame);
    ist->sub2video.frame->width  = ist->sub2video.w;
    ist->sub2video.frame->height = ist->sub2video.h;
    ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
    if ((ret = av_frame_get_buffer(frame, 32)) < 0)
        return ret;
    memset(frame->data[0], 0, frame->height * frame->linesize[0]);
    return 0;
}
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210

static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
                                AVSubtitleRect *r)
{
    uint32_t *pal, *dst2;
    uint8_t *src, *src2;
    int x, y;

    if (r->type != SUBTITLE_BITMAP) {
        av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
        return;
    }
    if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
        av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
        return;
    }

    dst += r->y * dst_linesize + r->x * 4;
    src = r->pict.data[0];
    pal = (uint32_t *)r->pict.data[1];
    for (y = 0; y < r->h; y++) {
        dst2 = (uint32_t *)dst;
        src2 = src;
        for (x = 0; x < r->w; x++)
            *(dst2++) = pal[*(src2++)];
        dst += dst_linesize;
        src += r->pict.linesize[0];
    }
}

static void sub2video_push_ref(InputStream *ist, int64_t pts)
{
211
    AVFrame *frame = ist->sub2video.frame;
212 213
    int i;

214 215
    av_assert1(frame->data[0]);
    ist->sub2video.last_pts = frame->pts = pts;
216
    for (i = 0; i < ist->nb_filters; i++)
217 218 219
        av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
                                     AV_BUFFERSRC_FLAG_KEEP_REF |
                                     AV_BUFFERSRC_FLAG_PUSH);
220 221
}

222
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
223 224
{
    int w = ist->sub2video.w, h = ist->sub2video.h;
225
    AVFrame *frame = ist->sub2video.frame;
226 227
    int8_t *dst;
    int     dst_linesize;
228 229
    int num_rects, i;
    int64_t pts, end_pts;
230

231
    if (!frame)
232
        return;
233
    if (sub) {
234
        pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
235
                                 AV_TIME_BASE_Q, ist->st->time_base);
236
        end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
237 238 239 240 241 242 243
                                 AV_TIME_BASE_Q, ist->st->time_base);
        num_rects = sub->num_rects;
    } else {
        pts       = ist->sub2video.end_pts;
        end_pts   = INT64_MAX;
        num_rects = 0;
    }
244
    if (sub2video_get_blank_frame(ist) < 0) {
245
        av_log(ist->dec_ctx, AV_LOG_ERROR,
246 247 248 249 250
               "Impossible to get a blank canvas.\n");
        return;
    }
    dst          = frame->data    [0];
    dst_linesize = frame->linesize[0];
251
    for (i = 0; i < num_rects; i++)
252 253
        sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
    sub2video_push_ref(ist, pts);
254
    ist->sub2video.end_pts = end_pts;
255 256 257 258 259 260 261 262 263 264 265 266 267 268
}

static void sub2video_heartbeat(InputStream *ist, int64_t pts)
{
    InputFile *infile = input_files[ist->file_index];
    int i, j, nb_reqs;
    int64_t pts2;

    /* When a frame is read from a file, examine all sub2video streams in
       the same file and send the sub2video frame again. Otherwise, decoded
       video frames could be accumulating in the filter graph while a filter
       (possibly overlay) is desperately waiting for a subtitle frame. */
    for (i = 0; i < infile->nb_streams; i++) {
        InputStream *ist2 = input_streams[infile->ist_index + i];
269
        if (!ist2->sub2video.frame)
270 271
            continue;
        /* subtitles seem to be usually muxed ahead of other streams;
Lou Logan's avatar
Lou Logan committed
272
           if not, subtracting a larger time here is necessary */
273 274 275 276
        pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
        /* do not send the heartbeat frame if the subtitle is already ahead */
        if (pts2 <= ist2->sub2video.last_pts)
            continue;
277
        if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
278
            sub2video_update(ist2, NULL);
279 280 281 282 283 284 285 286 287 288 289
        for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
            nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
        if (nb_reqs)
            sub2video_push_ref(ist2, pts2);
    }
}

static void sub2video_flush(InputStream *ist)
{
    int i;

290 291
    if (ist->sub2video.end_pts < INT64_MAX)
        sub2video_update(ist, NULL);
292 293 294 295 296 297
    for (i = 0; i < ist->nb_filters; i++)
        av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
}

/* end of sub2video hack */

298
static void term_exit_sigsafe(void)
299
{
300 301 302 303
#if HAVE_TERMIOS_H
    if(restore_tty)
        tcsetattr (0, TCSANOW, &oldtty);
#endif
304
}
305

306 307 308 309 310 311
void term_exit(void)
{
    av_log(NULL, AV_LOG_QUIET, "%s", "");
    term_exit_sigsafe();
}

312
static volatile int received_sigterm = 0;
313
static volatile int received_nb_signals = 0;
314
static volatile int transcode_init_done = 0;
315
static int main_return_code = 0;
316

317 318
static void
sigterm_handler(int sig)
319 320 321
{
    received_sigterm = sig;
    received_nb_signals++;
322
    term_exit_sigsafe();
323
    if(received_nb_signals > 3)
324
        exit(123);
325 326
}

327
void term_init(void)
328
{
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
#if HAVE_TERMIOS_H
    if(!run_as_daemon){
        struct termios tty;
        int istty = 1;
#if HAVE_ISATTY
        istty = isatty(0) && isatty(2);
#endif
        if (istty && tcgetattr (0, &tty) == 0) {
            oldtty = tty;
            restore_tty = 1;

            tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
                             |INLCR|IGNCR|ICRNL|IXON);
            tty.c_oflag |= OPOST;
            tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
            tty.c_cflag &= ~(CSIZE|PARENB);
            tty.c_cflag |= CS8;
            tty.c_cc[VMIN] = 1;
            tty.c_cc[VTIME] = 0;

            tcsetattr (0, TCSANOW, &tty);
350
        }
351
        signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
352
    }
353 354 355
#endif
    avformat_network_deinit();

Aneesh Dogra's avatar
Aneesh Dogra committed
356
    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
357 358 359 360
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
#ifdef SIGXCPU
    signal(SIGXCPU, sigterm_handler);
#endif
361 362
}

363 364
/* read a key without blocking */
static int read_key(void)
365
{
366 367 368 369 370
    unsigned char ch;
#if HAVE_TERMIOS_H
    int n = 1;
    struct timeval tv;
    fd_set rfds;
371

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
    FD_ZERO(&rfds);
    FD_SET(0, &rfds);
    tv.tv_sec = 0;
    tv.tv_usec = 0;
    n = select(1, &rfds, NULL, NULL, &tv);
    if (n > 0) {
        n = read(0, &ch, 1);
        if (n == 1)
            return ch;

        return n;
    }
#elif HAVE_KBHIT
#    if HAVE_PEEKNAMEDPIPE
    static int is_pipe;
    static HANDLE input_handle;
    DWORD dw, nchars;
    if(!input_handle){
        input_handle = GetStdHandle(STD_INPUT_HANDLE);
        is_pipe = !GetConsoleMode(input_handle, &dw);
    }
393

394 395 396 397 398 399
    if (stdin->_cnt > 0) {
        read(0, &ch, 1);
        return ch;
    }
    if (is_pipe) {
        /* When running under a GUI, you will end here. */
rogerdpack's avatar
rogerdpack committed
400 401
        if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
            // input pipe may have been closed by the program that ran ffmpeg
402
            return -1;
rogerdpack's avatar
rogerdpack committed
403
        }
404 405 406 407 408 409
        //Read it
        if(nchars != 0) {
            read(0, &ch, 1);
            return ch;
        }else{
            return -1;
410
        }
411 412 413 414 415 416
    }
#    endif
    if(kbhit())
        return(getch());
#endif
    return -1;
417 418
}

419
static int decode_interrupt_cb(void *ctx)
420
{
421
    return received_nb_signals > transcode_init_done;
422 423
}

424
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
425

426
static void ffmpeg_cleanup(int ret)
427
{
428
    int i, j;
429

430 431 432 433 434
    if (do_benchmark) {
        int maxrss = getmaxrss() / 1024;
        printf("bench: maxrss=%ikB\n", maxrss);
    }

435
    for (i = 0; i < nb_filtergraphs; i++) {
436 437 438 439 440
        FilterGraph *fg = filtergraphs[i];
        avfilter_graph_free(&fg->graph);
        for (j = 0; j < fg->nb_inputs; j++) {
            av_freep(&fg->inputs[j]->name);
            av_freep(&fg->inputs[j]);
441
        }
442 443 444 445
        av_freep(&fg->inputs);
        for (j = 0; j < fg->nb_outputs; j++) {
            av_freep(&fg->outputs[j]->name);
            av_freep(&fg->outputs[j]);
446
        }
447 448 449
        av_freep(&fg->outputs);
        av_freep(&fg->graph_desc);

450 451 452
        av_freep(&filtergraphs[i]);
    }
    av_freep(&filtergraphs);
453

454
    av_freep(&subtitle_out);
455

456
    /* close files */
Aneesh Dogra's avatar
Aneesh Dogra committed
457
    for (i = 0; i < nb_output_files; i++) {
458 459
        OutputFile *of = output_files[i];
        AVFormatContext *s = of->ctx;
460
        if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
461
            avio_closep(&s->pb);
462
        avformat_free_context(s);
463 464
        av_dict_free(&of->opts);

465
        av_freep(&output_files[i]);
466
    }
467
    for (i = 0; i < nb_output_streams; i++) {
468 469
        OutputStream *ost = output_streams[i];
        AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
470 471 472 473 474
        while (bsfc) {
            AVBitStreamFilterContext *next = bsfc->next;
            av_bitstream_filter_close(bsfc);
            bsfc = next;
        }
475 476
        ost->bitstream_filters = NULL;
        av_frame_free(&ost->filtered_frame);
477
        av_frame_free(&ost->last_frame);
478 479

        av_parser_close(ost->parser);
480

481
        av_freep(&ost->forced_keyframes);
482
        av_expr_free(ost->forced_keyframes_pexpr);
483 484
        av_freep(&ost->avfilter);
        av_freep(&ost->logfile_prefix);
485

486 487 488
        av_freep(&ost->audio_channels_map);
        ost->audio_channels_mapped = 0;

489 490
        avcodec_free_context(&ost->enc_ctx);

491
        av_freep(&output_streams[i]);
492
    }
493 494 495
#if HAVE_PTHREADS
    free_input_threads();
#endif
Aneesh Dogra's avatar
Aneesh Dogra committed
496
    for (i = 0; i < nb_input_files; i++) {
497 498
        avformat_close_input(&input_files[i]->ctx);
        av_freep(&input_files[i]);
499
    }
500
    for (i = 0; i < nb_input_streams; i++) {
501 502 503 504
        InputStream *ist = input_streams[i];

        av_frame_free(&ist->decoded_frame);
        av_frame_free(&ist->filter_frame);
505
        av_dict_free(&ist->decoder_opts);
506 507
        avsubtitle_free(&ist->prev_sub.subtitle);
        av_frame_free(&ist->sub2video.frame);
508 509 510
        av_freep(&ist->filters);
        av_freep(&ist->hwaccel_device);

511 512
        avcodec_free_context(&ist->dec_ctx);

513
        av_freep(&input_streams[i]);
514
    }
515

516 517
    if (vstats_file)
        fclose(vstats_file);
518
    av_freep(&vstats_filename);
519

520 521
    av_freep(&input_streams);
    av_freep(&input_files);
522
    av_freep(&output_streams);
523
    av_freep(&output_files);
524

525
    uninit_opts();
526

527
    avformat_network_deinit();
528

529
    if (received_sigterm) {
530 531
        av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
               (int) received_sigterm);
532
    } else if (ret && transcode_init_done) {
533
        av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
534
    }
535
    term_exit();
536 537
}

538 539 540 541 542 543 544 545 546
void remove_avoptions(AVDictionary **a, AVDictionary *b)
{
    AVDictionaryEntry *t = NULL;

    while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
        av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
    }
}

547
void assert_avoptions(AVDictionary *m)
548
{
549 550
    AVDictionaryEntry *t;
    if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
551
        av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
552
        exit_program(1);
553
    }
554
}
555

556
static void abort_codec_experimental(AVCodec *c, int encoder)
557
{
558
    exit_program(1);
559
}
560

561
static void update_benchmark(const char *fmt, ...)
562
{
563 564 565 566
    if (do_benchmark_all) {
        int64_t t = getutime();
        va_list va;
        char buf[1024];
567

568 569 570 571 572
        if (fmt) {
            va_start(va, fmt);
            vsnprintf(buf, sizeof(buf), fmt, va);
            va_end(va);
            printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
573
        }
574
        current_time = t;
575 576 577
    }
}

578
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
579 580 581 582 583 584 585 586
{
    int i;
    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost2 = output_streams[i];
        ost2->finished |= ost == ost2 ? this_stream : others;
    }
}

587
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
588
{
589
    AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
590
    AVCodecContext          *avctx = ost->st->codec;
591 592
    int ret;

593 594 595 596 597 598 599 600
    if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
        ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (ost->st->codec->extradata) {
            memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
            ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
        }
    }

601 602 603
    if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
        (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
        pkt->pts = pkt->dts = AV_NOPTS_VALUE;
604

605 606 607 608 609 610 611 612
    /*
     * Audio encoders may split the packets --  #frames in != #packets out.
     * But there is no reordering, so we can limit the number of output packets
     * by simply dropping them here.
     * Counting encoded video frames needs to be done separately because of
     * reordering, see do_video_out()
     */
    if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
613 614
        if (ost->frame_number >= ost->max_frames) {
            av_free_packet(pkt);
615
            return;
616
        }
617
        ost->frame_number++;
618 619
    }

620 621 622
    if (bsfc)
        av_packet_split_side_data(pkt);

Aneesh Dogra's avatar
Aneesh Dogra committed
623 624
    while (bsfc) {
        AVPacket new_pkt = *pkt;
625 626 627 628 629
        AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
                                                 bsfc->filter->name,
                                                 NULL, 0);
        int a = av_bitstream_filter_filter(bsfc, avctx,
                                           bsf_arg ? bsf_arg->value : NULL,
Aneesh Dogra's avatar
Aneesh Dogra committed
630 631 632
                                           &new_pkt.data, &new_pkt.size,
                                           pkt->data, pkt->size,
                                           pkt->flags & AV_PKT_FLAG_KEY);
633 634 635 636 637 638
        if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
            uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
            if(t) {
                memcpy(t, new_pkt.data, new_pkt.size);
                memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
                new_pkt.data = t;
639
                new_pkt.buf = NULL;
640 641 642 643
                a = 1;
            } else
                a = AVERROR(ENOMEM);
        }
Aneesh Dogra's avatar
Aneesh Dogra committed
644
        if (a > 0) {
645 646
            pkt->side_data = NULL;
            pkt->side_data_elems = 0;
647
            av_free_packet(pkt);
648 649 650
            new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
                                           av_buffer_default_free, NULL, 0);
            if (!new_pkt.buf)
651
                exit_program(1);
Aneesh Dogra's avatar
Aneesh Dogra committed
652
        } else if (a < 0) {
653
            av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
654 655
                   bsfc->filter->name, pkt->stream_index,
                   avctx->codec ? avctx->codec->name : "copy");
656 657
            print_error("", a);
            if (exit_on_error)
658
                exit_program(1);
659
        }
Aneesh Dogra's avatar
Aneesh Dogra committed
660
        *pkt = new_pkt;
661

Aneesh Dogra's avatar
Aneesh Dogra committed
662
        bsfc = bsfc->next;
663 664
    }

665
    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
666 667 668 669 670 671 672 673 674 675 676
        if (pkt->dts != AV_NOPTS_VALUE &&
            pkt->pts != AV_NOPTS_VALUE &&
            pkt->dts > pkt->pts) {
            av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
                   pkt->dts, pkt->pts,
                   ost->file_index, ost->st->index);
            pkt->pts =
            pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
                     - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
                     - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
        }
677
     if(
678
        (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
679
        pkt->dts != AV_NOPTS_VALUE &&
680 681 682
        ost->last_mux_dts != AV_NOPTS_VALUE) {
      int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
      if (pkt->dts < max) {
683
        int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
684
        av_log(s, loglevel, "Non-monotonous DTS in output stream "
685 686 687 688
               "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
               ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
        if (exit_on_error) {
            av_log(NULL, AV_LOG_FATAL, "aborting.\n");
689
            exit_program(1);
690
        }
691
        av_log(s, loglevel, "changing to %"PRId64". This may result "
692
               "in incorrect timestamps in the output file.\n",
693
               max);
694
        if(pkt->pts >= pkt->dts)
695
            pkt->pts = FFMAX(pkt->pts, max);
696
        pkt->dts = max;
697
      }
698
     }
699 700 701
    }
    ost->last_mux_dts = pkt->dts;

702
    ost->data_size += pkt->size;
703
    ost->packets_written++;
704

705
    pkt->stream_index = ost->index;
706 707 708

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
709
                "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
710
                av_get_media_type_string(ost->enc_ctx->codec_type),
711
                av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
712 713
                av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
                pkt->size
714 715 716
              );
    }

Aneesh Dogra's avatar
Aneesh Dogra committed
717 718
    ret = av_interleaved_write_frame(s, pkt);
    if (ret < 0) {
719
        print_error("av_interleaved_write_frame()", ret);
720
        main_return_code = 1;
721
        close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
722
    }
723
    av_free_packet(pkt);
724 725
}

726 727 728 729
static void close_output_stream(OutputStream *ost)
{
    OutputFile *of = output_files[ost->file_index];

730
    ost->finished |= ENCODER_FINISHED;
731
    if (of->shortest) {
732
        int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
733
        of->recording_time = FFMIN(of->recording_time, end);
734 735 736
    }
}

737
static int check_recording_time(OutputStream *ost)
738
{
739
    OutputFile *of = output_files[ost->file_index];
740

741
    if (of->recording_time != INT64_MAX &&
742
        av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
743
                      AV_TIME_BASE_Q) >= 0) {
744
        close_output_stream(ost);
745
        return 0;
746
    }
747 748
    return 1;
}
749

750 751
static void do_audio_out(AVFormatContext *s, OutputStream *ost,
                         AVFrame *frame)
752
{
753
    AVCodecContext *enc = ost->enc_ctx;
754
    AVPacket pkt;
755
    int got_packet = 0;
756

757 758 759
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
760

761 762
    if (!check_recording_time(ost))
        return;
763

764
    if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
765
        frame->pts = ost->sync_opts;
766
    ost->sync_opts = frame->pts + frame->nb_samples;
767 768
    ost->samples_encoded += frame->nb_samples;
    ost->frames_encoded++;
769

770 771
    av_assert0(pkt.size || !pkt.data);
    update_benchmark(NULL);
772 773 774 775 776 777
    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
               "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
               av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
               enc->time_base.num, enc->time_base.den);
    }
778

779
    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
780
        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
781
        exit_program(1);
782
    }
783
    update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
784

785
    if (got_packet) {
786
        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
787

788 789 790 791 792 793
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
                   "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                   av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                   av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
        }
794

795
        write_frame(s, &pkt, ost);
796 797 798
    }
}

799 800 801
static void do_subtitle_out(AVFormatContext *s,
                            OutputStream *ost,
                            InputStream *ist,
802
                            AVSubtitle *sub)
803
{
804 805 806 807
    int subtitle_out_max_size = 1024 * 1024;
    int subtitle_out_size, nb, i;
    AVCodecContext *enc;
    AVPacket pkt;
808
    int64_t pts;
809

810
    if (sub->pts == AV_NOPTS_VALUE) {
811
        av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
812
        if (exit_on_error)
813
            exit_program(1);
814
        return;
815
    }
816

817
    enc = ost->enc_ctx;
818

819 820
    if (!subtitle_out) {
        subtitle_out = av_malloc(subtitle_out_max_size);
821 822 823 824
        if (!subtitle_out) {
            av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
            exit_program(1);
        }
825 826
    }

827 828 829
    /* Note: DVB subtitle need one packet to draw them and one other
       packet to clear them */
    /* XXX: signal it in the codec context ? */
830
    if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
831 832 833
        nb = 2;
    else
        nb = 1;
834

835
    /* shift timestamp to honor -ss and make check_recording_time() work with -t */
836 837 838
    pts = sub->pts;
    if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
        pts -= output_files[ost->file_index]->start_time;
Aneesh Dogra's avatar
Aneesh Dogra committed
839
    for (i = 0; i < nb; i++) {
840 841
        unsigned save_num_rects = sub->num_rects;

842
        ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
843 844
        if (!check_recording_time(ost))
            return;
845

846
        sub->pts = pts;
847
        // start_display_time is required to be 0
Aneesh Dogra's avatar
Aneesh Dogra committed
848 849
        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
        sub->end_display_time  -= sub->start_display_time;
850
        sub->start_display_time = 0;
851 852
        if (i == 1)
            sub->num_rects = 0;
853 854 855

        ost->frames_encoded++;

856 857
        subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                    subtitle_out_max_size, sub);
858 859
        if (i == 1)
            sub->num_rects = save_num_rects;
860
        if (subtitle_out_size < 0) {
861
            av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
862
            exit_program(1);
863 864 865 866 867
        }

        av_init_packet(&pkt);
        pkt.data = subtitle_out;
        pkt.size = subtitle_out_size;
Aneesh Dogra's avatar
Aneesh Dogra committed
868
        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
869
        pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
870
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
871 872 873 874 875 876 877
            /* XXX: the pts correction is handled here. Maybe handling
               it in the codec would be better */
            if (i == 0)
                pkt.pts += 90 * sub->start_display_time;
            else
                pkt.pts += 90 * sub->end_display_time;
        }
878
        pkt.dts = pkt.pts;
879
        write_frame(s, &pkt, ost);
880
    }
881
}
882

883 884
static void do_video_out(AVFormatContext *s,
                         OutputStream *ost,
885 886
                         AVFrame *next_picture,
                         double sync_ipts)
887
{
888 889
    int ret, format_video_sync;
    AVPacket pkt;
890
    AVCodecContext *enc = ost->enc_ctx;
891
    AVCodecContext *mux_enc = ost->st->codec;
892
    int nb_frames, nb0_frames, i;
893
    double delta, delta0;
894 895 896
    double duration = 0;
    int frame_size = 0;
    InputStream *ist = NULL;
897
    AVFilterContext *filter = ost->filter->filter;
898

899 900
    if (ost->source_index >= 0)
        ist = input_streams[ost->source_index];
901

902 903
    if (filter->inputs[0]->frame_rate.num > 0 &&
        filter->inputs[0]->frame_rate.den > 0)
904 905 906 907
        duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));

    if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
        duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
908

909 910 911 912 913 914 915 916
    if (!ost->filters_script &&
        !ost->filters &&
        next_picture &&
        ist &&
        lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
        duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
    }

917 918
    delta0 = sync_ipts - ost->sync_opts;
    delta  = delta0 + duration;
919

920
    /* by default, we output a single frame */
921
    nb0_frames = 0;
922
    nb_frames = 1;
923

924
    format_video_sync = video_sync_method;
925 926 927 928 929
    if (format_video_sync == VSYNC_AUTO) {
        if(!strcmp(s->oformat->name, "avi")) {
            format_video_sync = VSYNC_VFR;
        } else
            format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
930 931 932 933 934 935
        if (   ist
            && format_video_sync == VSYNC_CFR
            && input_files[ist->file_index]->ctx->nb_streams == 1
            && input_files[ist->file_index]->input_ts_offset == 0) {
            format_video_sync = VSYNC_VSCFR;
        }
936 937 938
        if (format_video_sync == VSYNC_CFR && copy_ts) {
            format_video_sync = VSYNC_VSCFR;
        }
939
    }
940

941 942 943 944 945
    if (delta0 < 0 &&
        delta > 0 &&
        format_video_sync != VSYNC_PASSTHROUGH &&
        format_video_sync != VSYNC_DROP) {
        double cor = FFMIN(-delta0, duration);
946 947 948 949
        if (delta0 < -0.6) {
            av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
        } else
            av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
950 951 952 953 954
        sync_ipts += cor;
        duration -= cor;
        delta0 += cor;
    }

955
    switch (format_video_sync) {
956 957 958 959
    case VSYNC_VSCFR:
        if (ost->frame_number == 0 && delta - duration >= 0.5) {
            av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
            delta = duration;
960
            delta0 = 0;
961 962
            ost->sync_opts = lrint(sync_ipts);
        }
963 964
    case VSYNC_CFR:
        // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
965 966 967
        if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
            nb_frames = 0;
        } else if (delta < -1.1)
968
            nb_frames = 0;
969
        else if (delta > 1.1) {
970
            nb_frames = lrintf(delta);
971 972 973
            if (delta0 > 1.1)
                nb0_frames = lrintf(delta0 - 0.6);
        }
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
        break;
    case VSYNC_VFR:
        if (delta <= -0.6)
            nb_frames = 0;
        else if (delta > 0.6)
            ost->sync_opts = lrint(sync_ipts);
        break;
    case VSYNC_DROP:
    case VSYNC_PASSTHROUGH:
        ost->sync_opts = lrint(sync_ipts);
        break;
    default:
        av_assert0(0);
    }

    nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
990 991
    nb0_frames = FFMIN(nb0_frames, nb_frames);
    if (nb0_frames == 0 && ost->last_droped) {
992
        nb_frames_drop++;
993
        av_log(NULL, AV_LOG_VERBOSE,
994
               "*** dropping frame %d from stream %d at ts %"PRId64"\n",
995
               ost->frame_number, ost->st->index, ost->last_frame->pts);
996 997
    }
    if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
998
        if (nb_frames > dts_error_threshold * 30) {
999
            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1000 1001
            nb_frames_drop++;
            return;
1002
        }
1003
        nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1004
        av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1005
    }
1006
    ost->last_droped = nb_frames == nb0_frames;
1007

1008 1009
  /* duplicates frame if needed */
  for (i = 0; i < nb_frames; i++) {
1010
    AVFrame *in_picture;
Anton Khirnov's avatar
Anton Khirnov committed
1011 1012 1013
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
1014

1015 1016 1017 1018 1019
    if (i < nb0_frames && ost->last_frame) {
        in_picture = ost->last_frame;
    } else
        in_picture = next_picture;

1020
    in_picture->pts = ost->sync_opts;
1021

1022
#if 1
1023
    if (!check_recording_time(ost))
1024
#else
1025
    if (ost->frame_number >= ost->max_frames)
1026
#endif
Anton Khirnov's avatar
Anton Khirnov committed
1027
        return;
1028

Anton Khirnov's avatar
Anton Khirnov committed
1029
    if (s->oformat->flags & AVFMT_RAWPICTURE &&
1030
        enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
Anton Khirnov's avatar
Anton Khirnov committed
1031 1032 1033
        /* raw pictures are written as AVPicture structure to
           avoid any copies. We support temporarily the older
           method. */
1034 1035
        if (in_picture->interlaced_frame)
            mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1036
        else
1037
            mux_enc->field_order = AV_FIELD_PROGRESSIVE;
Anton Khirnov's avatar
Anton Khirnov committed
1038 1039 1040 1041
        pkt.data   = (uint8_t *)in_picture;
        pkt.size   =  sizeof(AVPicture);
        pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
        pkt.flags |= AV_PKT_FLAG_KEY;
1042

1043
        write_frame(s, &pkt, ost);
Anton Khirnov's avatar
Anton Khirnov committed
1044
    } else {
1045 1046
        int got_packet, forced_keyframe = 0;
        double pts_time;
Fabrice Bellard's avatar
Fabrice Bellard committed
1047

1048
        if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
1049 1050
            ost->top_field_first >= 0)
            in_picture->top_field_first = !!ost->top_field_first;
1051

1052
        if (in_picture->interlaced_frame) {
1053
            if (enc->codec->id == AV_CODEC_ID_MJPEG)
1054
                mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1055
            else
1056
                mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1057
        } else
1058
            mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1059

1060
        in_picture->quality = enc->global_quality;
1061
        in_picture->pict_type = 0;
1062

1063 1064
        pts_time = in_picture->pts != AV_NOPTS_VALUE ?
            in_picture->pts * av_q2d(enc->time_base) : NAN;
Anton Khirnov's avatar
Anton Khirnov committed
1065
        if (ost->forced_kf_index < ost->forced_kf_count &&
1066
            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
Anton Khirnov's avatar
Anton Khirnov committed
1067
            ost->forced_kf_index++;
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
            forced_keyframe = 1;
        } else if (ost->forced_keyframes_pexpr) {
            double res;
            ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
            res = av_expr_eval(ost->forced_keyframes_pexpr,
                               ost->forced_keyframes_expr_const_values, NULL);
            av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
                    ost->forced_keyframes_expr_const_values[FKF_N],
                    ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
                    ost->forced_keyframes_expr_const_values[FKF_T],
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
                    res);
            if (res) {
                forced_keyframe = 1;
                ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
                    ost->forced_keyframes_expr_const_values[FKF_N];
                ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
                    ost->forced_keyframes_expr_const_values[FKF_T];
                ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
            }

            ost->forced_keyframes_expr_const_values[FKF_N] += 1;
Anton Khirnov's avatar
Anton Khirnov committed
1091
        }
1092

1093
        if (forced_keyframe) {
1094
            in_picture->pict_type = AV_PICTURE_TYPE_I;
1095 1096 1097
            av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
        }

1098
        update_benchmark(NULL);
1099 1100 1101 1102 1103 1104 1105
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
                   "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
                   av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
                   enc->time_base.num, enc->time_base.den);
        }

1106 1107
        ost->frames_encoded++;

1108
        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1109
        update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
Anton Khirnov's avatar
Anton Khirnov committed
1110 1111
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1112
            exit_program(1);
Anton Khirnov's avatar
Anton Khirnov committed
1113
        }
1114

Anton Khirnov's avatar
Anton Khirnov committed
1115
        if (got_packet) {
1116 1117 1118 1119 1120 1121 1122
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                       "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                       av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
                       av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
            }

1123 1124
            if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
                pkt.pts = ost->sync_opts;
1125

1126
            av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1127

1128 1129 1130 1131 1132 1133 1134 1135
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
            }

            frame_size = pkt.size;
1136
            write_frame(s, &pkt, ost);
1137

Anton Khirnov's avatar
Anton Khirnov committed
1138 1139 1140
            /* if two pass, output log */
            if (ost->logfile && enc->stats_out) {
                fprintf(ost->logfile, "%s", enc->stats_out);
1141
            }
1142
        }
1143
    }
Anton Khirnov's avatar
Anton Khirnov committed
1144 1145 1146 1147 1148 1149 1150
    ost->sync_opts++;
    /*
     * For video, number of frames in == number of packets out.
     * But there may be reordering, so we can't throw away frames on encoder
     * flush, we need to limit them here, before they go into encoder.
     */
    ost->frame_number++;
1151

1152
    if (vstats_filename && frame_size)
1153
        do_video_stats(ost, frame_size);
1154
  }
1155 1156 1157 1158 1159

    if (!ost->last_frame)
        ost->last_frame = av_frame_alloc();
    av_frame_unref(ost->last_frame);
    av_frame_ref(ost->last_frame, next_picture);
Fabrice Bellard's avatar
Fabrice Bellard committed
1160 1161
}

Aneesh Dogra's avatar
Aneesh Dogra committed
1162
static double psnr(double d)
Fabrice Bellard's avatar
Fabrice Bellard committed
1163
{
Aneesh Dogra's avatar
Aneesh Dogra committed
1164
    return -10.0 * log(d) / log(10.0);
1165
}
1166

1167
static void do_video_stats(OutputStream *ost, int frame_size)
1168
{
1169 1170 1171
    AVCodecContext *enc;
    int frame_number;
    double ti1, bitrate, avg_bitrate;
1172

1173 1174 1175 1176 1177
    /* this is executed just the first time do_video_stats is called */
    if (!vstats_file) {
        vstats_file = fopen(vstats_filename, "w");
        if (!vstats_file) {
            perror("fopen");
1178
            exit_program(1);
1179
        }
1180
    }
1181

1182
    enc = ost->enc_ctx;
1183
    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1184
        frame_number = ost->st->nb_frames;
1185 1186
        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
        if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
Aneesh Dogra's avatar
Aneesh Dogra committed
1187
            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1188

1189 1190
        fprintf(vstats_file,"f_size= %6d ", frame_size);
        /* compute pts value */
1191
        ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1192 1193
        if (ti1 < 0.01)
            ti1 = 0.01;
1194

Aneesh Dogra's avatar
Aneesh Dogra committed
1195
        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1196
        avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1197
        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1198
               (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1199
        fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1200
    }
1201
}
1202

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static void finish_output_stream(OutputStream *ost)
{
    OutputFile *of = output_files[ost->file_index];
    int i;

    ost->finished = ENCODER_FINISHED | MUXER_FINISHED;

    if (of->shortest) {
        for (i = 0; i < of->ctx->nb_streams; i++)
            output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
    }
}

1216
/**
1217 1218 1219 1220 1221 1222
 * Get and encode new output from any of the filtergraphs, without causing
 * activity.
 *
 * @return  0 for success, <0 for severe errors
 */
static int reap_filters(void)
1223 1224
{
    AVFrame *filtered_frame = NULL;
1225
    int i;
1226

1227 1228 1229 1230
    /* Reap all buffers present in the buffer sinks */
    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];
        OutputFile    *of = output_files[ost->file_index];
1231
        AVFilterContext *filter;
1232
        AVCodecContext *enc = ost->enc_ctx;
1233 1234 1235 1236
        int ret = 0;

        if (!ost->filter)
            continue;
1237
        filter = ost->filter->filter;
1238

1239
        if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1240
            return AVERROR(ENOMEM);
1241
        }
1242 1243 1244
        filtered_frame = ost->filtered_frame;

        while (1) {
1245
            double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1246
            ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1247 1248 1249 1250
                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
            if (ret < 0) {
                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                    av_log(NULL, AV_LOG_WARNING,
1251
                           "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1252
                }
1253 1254
                break;
            }
1255 1256 1257 1258
            if (ost->finished) {
                av_frame_unref(filtered_frame);
                continue;
            }
1259
            if (filtered_frame->pts != AV_NOPTS_VALUE) {
1260
                int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
                AVRational tb = enc->time_base;
                int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);

                tb.den <<= extra_bits;
                float_pts =
                    av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
                    av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
                float_pts /= 1 << extra_bits;
                // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
                float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);

1272
                filtered_frame->pts =
1273 1274
                    av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
                    av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1275
            }
1276 1277 1278
            //if (ost->source_index >= 0)
            //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold

1279
            switch (filter->inputs[0]->type) {
1280
            case AVMEDIA_TYPE_VIDEO:
1281
                if (!ost->frame_aspect_ratio.num)
1282
                    enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1283

1284
                if (debug_ts) {
1285
                    av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1286
                            av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1287
                            float_pts,
1288 1289 1290
                            enc->time_base.num, enc->time_base.den);
                }

1291
                do_video_out(of->ctx, ost, filtered_frame, float_pts);
1292 1293
                break;
            case AVMEDIA_TYPE_AUDIO:
1294 1295
                if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
                    enc->channels != av_frame_get_channels(filtered_frame)) {
1296 1297 1298 1299
                    av_log(NULL, AV_LOG_ERROR,
                           "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
                    break;
                }
1300 1301 1302 1303 1304 1305 1306
                do_audio_out(of->ctx, ost, filtered_frame);
                break;
            default:
                // TODO support subtitle filters
                av_assert0(0);
            }

1307
            av_frame_unref(filtered_frame);
1308
        }
1309
    }
1310 1311

    return 0;
1312 1313
}

1314 1315 1316 1317 1318 1319
static void print_final_stats(int64_t total_size)
{
    uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
    uint64_t subtitle_size = 0;
    uint64_t data_size = 0;
    float percent = -1.0;
1320
    int i, j;
1321 1322 1323

    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];
1324
        switch (ost->enc_ctx->codec_type) {
1325 1326 1327 1328 1329
            case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
            case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
            case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
            default:                 other_size += ost->data_size; break;
        }
1330
        extra_size += ost->enc_ctx->extradata_size;
1331 1332 1333
        data_size  += ost->data_size;
    }

1334
    if (data_size && total_size>0 && total_size >= data_size)
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
        percent = 100.0 * (total_size - data_size) / data_size;

    av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
           video_size / 1024.0,
           audio_size / 1024.0,
           subtitle_size / 1024.0,
           other_size / 1024.0,
           extra_size / 1024.0);
    if (percent >= 0.0)
        av_log(NULL, AV_LOG_INFO, "%f%%", percent);
    else
        av_log(NULL, AV_LOG_INFO, "unknown");
    av_log(NULL, AV_LOG_INFO, "\n");
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358

    /* print verbose per-stream stats */
    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];
        uint64_t total_packets = 0, total_size = 0;

        av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
               i, f->ctx->filename);

        for (j = 0; j < f->nb_streams; j++) {
            InputStream *ist = input_streams[f->ist_index + j];
1359
            enum AVMediaType type = ist->dec_ctx->codec_type;
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392

            total_size    += ist->data_size;
            total_packets += ist->nb_packets;

            av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
                   i, j, media_type_string(type));
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
                   ist->nb_packets, ist->data_size);

            if (ist->decoding_needed) {
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
                       ist->frames_decoded);
                if (type == AVMEDIA_TYPE_AUDIO)
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
                av_log(NULL, AV_LOG_VERBOSE, "; ");
            }

            av_log(NULL, AV_LOG_VERBOSE, "\n");
        }

        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
               total_packets, total_size);
    }

    for (i = 0; i < nb_output_files; i++) {
        OutputFile *of = output_files[i];
        uint64_t total_packets = 0, total_size = 0;

        av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
               i, of->ctx->filename);

        for (j = 0; j < of->ctx->nb_streams; j++) {
            OutputStream *ost = output_streams[of->ost_index + j];
1393
            enum AVMediaType type = ost->enc_ctx->codec_type;
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416

            total_size    += ost->data_size;
            total_packets += ost->packets_written;

            av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
                   i, j, media_type_string(type));
            if (ost->encoding_needed) {
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
                       ost->frames_encoded);
                if (type == AVMEDIA_TYPE_AUDIO)
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
                av_log(NULL, AV_LOG_VERBOSE, "; ");
            }

            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
                   ost->packets_written, ost->data_size);

            av_log(NULL, AV_LOG_VERBOSE, "\n");
        }

        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
               total_packets, total_size);
    }
1417 1418 1419 1420 1421
    if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
        av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
    }
}

1422
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1423
{
1424
    char buf[1024];
1425
    AVBPrint buf_script;
1426 1427 1428 1429 1430
    OutputStream *ost;
    AVFormatContext *oc;
    int64_t total_size;
    AVCodecContext *enc;
    int frame_number, vid, i;
1431
    double bitrate;
1432
    int64_t pts = INT64_MIN;
1433 1434
    static int64_t last_time = -1;
    static int qp_histogram[52];
1435
    int hours, mins, secs, us;
1436

1437
    if (!print_stats && !is_last_report && !progress_avio)
1438
        return;
1439

1440 1441 1442
    if (!is_last_report) {
        if (last_time == -1) {
            last_time = cur_time;
1443
            return;
Ramiro Polla's avatar
Ramiro Polla committed
1444
        }
1445 1446 1447
        if ((cur_time - last_time) < 500000)
            return;
        last_time = cur_time;
1448 1449
    }

1450

1451
    oc = output_files[0]->ctx;
1452

1453
    total_size = avio_size(oc->pb);
1454
    if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
Aneesh Dogra's avatar
Aneesh Dogra committed
1455
        total_size = avio_tell(oc->pb);
1456

1457 1458
    buf[0] = '\0';
    vid = 0;
1459
    av_bprint_init(&buf_script, 0, 1);
1460
    for (i = 0; i < nb_output_streams; i++) {
1461
        float q = -1;
1462
        ost = output_streams[i];
1463
        enc = ost->enc_ctx;
1464
        if (!ost->stream_copy && enc->coded_frame)
Aneesh Dogra's avatar
Aneesh Dogra committed
1465
            q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1466 1467
        if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1468 1469
            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                       ost->file_index, ost->index, q);
1470
        }
1471
        if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1472
            float fps, t = (cur_time-timer_start) / 1000000.0;
1473

1474
            frame_number = ost->frame_number;
1475 1476 1477 1478 1479 1480 1481
            fps = t > 1 ? frame_number / t : 0;
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                     frame_number, fps < 9.95, fps, q);
            av_bprintf(&buf_script, "frame=%d\n", frame_number);
            av_bprintf(&buf_script, "fps=%.1f\n", fps);
            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                       ost->file_index, ost->index, q);
Aneesh Dogra's avatar
Aneesh Dogra committed
1482
            if (is_last_report)
1483
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
Aneesh Dogra's avatar
Aneesh Dogra committed
1484
            if (qp_hist) {
1485 1486
                int j;
                int qp = lrintf(q);
Aneesh Dogra's avatar
Aneesh Dogra committed
1487
                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1488
                    qp_histogram[qp]++;
Aneesh Dogra's avatar
Aneesh Dogra committed
1489
                for (j = 0; j < 32; j++)
1490
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
Fabrice Bellard's avatar
Fabrice Bellard committed
1491
            }
1492
            if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1493
                int j;
Aneesh Dogra's avatar
Aneesh Dogra committed
1494 1495
                double error, error_sum = 0;
                double scale, scale_sum = 0;
1496
                double p;
Aneesh Dogra's avatar
Aneesh Dogra committed
1497
                char type[3] = { 'Y','U','V' };
1498
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
Aneesh Dogra's avatar
Aneesh Dogra committed
1499 1500 1501 1502 1503 1504 1505
                for (j = 0; j < 3; j++) {
                    if (is_last_report) {
                        error = enc->error[j];
                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                    } else {
                        error = enc->coded_frame->error[j];
                        scale = enc->width * enc->height * 255.0 * 255.0;
1506
                    }
Aneesh Dogra's avatar
Aneesh Dogra committed
1507 1508
                    if (j)
                        scale /= 4;
1509 1510
                    error_sum += error;
                    scale_sum += scale;
1511 1512 1513
                    p = psnr(error / scale);
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1514
                               ost->file_index, ost->index, type[j] | 32, p);
1515
                }
1516
                p = psnr(error_sum / scale_sum);
Aneesh Dogra's avatar
Aneesh Dogra committed
1517
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1518 1519
                av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
                           ost->file_index, ost->index, p);
1520 1521
            }
            vid = 1;
Fabrice Bellard's avatar
Fabrice Bellard committed
1522
        }
1523
        /* compute min output value */
1524 1525
        if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
            pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1526
                                          ost->st->time_base, AV_TIME_BASE_Q));
1527 1528
        if (is_last_report)
            nb_frames_drop += ost->last_droped;
Fabrice Bellard's avatar
Fabrice Bellard committed
1529 1530
    }

1531 1532
    secs = FFABS(pts) / AV_TIME_BASE;
    us = FFABS(pts) % AV_TIME_BASE;
1533 1534 1535 1536
    mins = secs / 60;
    secs %= 60;
    hours = mins / 60;
    mins %= 60;
1537

1538
    bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1539

1540 1541 1542 1543
    if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                 "size=N/A time=");
    else                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                 "size=%8.0fkB time=", total_size / 1024.0);
1544 1545
    if (pts < 0)
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1546 1547 1548
    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
             "%02d:%02d:%02d.%02d ", hours, mins, secs,
             (100 * us) / AV_TIME_BASE);
1549 1550 1551 1552 1553 1554 1555 1556 1557

    if (bitrate < 0) {
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
        av_bprintf(&buf_script, "bitrate=N/A\n");
    }else{
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
        av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
    }

1558 1559
    if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
    else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1560 1561 1562
    av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
    av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
               hours, mins, secs, us);
1563

1564 1565 1566
    if (nb_frames_dup || nb_frames_drop)
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                nb_frames_dup, nb_frames_drop);
1567 1568
    av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
    av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1569

1570
    if (print_stats || is_last_report) {
1571
        const char end = is_last_report ? '\n' : '\r';
1572
        if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1573
            fprintf(stderr, "%s    %c", buf, end);
1574
        } else
1575
            av_log(NULL, AV_LOG_INFO, "%s    %c", buf, end);
1576

1577
    fflush(stderr);
Fabrice Bellard's avatar
Fabrice Bellard committed
1578 1579
    }

1580 1581 1582 1583 1584 1585 1586 1587
    if (progress_avio) {
        av_bprintf(&buf_script, "progress=%s\n",
                   is_last_report ? "end" : "continue");
        avio_write(progress_avio, buf_script.str,
                   FFMIN(buf_script.len, buf_script.size - 1));
        avio_flush(progress_avio);
        av_bprint_finalize(&buf_script, NULL);
        if (is_last_report) {
1588
            avio_closep(&progress_avio);
1589 1590
        }
    }
1591

1592 1593
    if (is_last_report)
        print_final_stats(total_size);
1594
}
1595

1596
static void flush_encoders(void)
1597 1598
{
    int i, ret;
1599

1600
    for (i = 0; i < nb_output_streams; i++) {
1601
        OutputStream   *ost = output_streams[i];
1602
        AVCodecContext *enc = ost->enc_ctx;
1603
        AVFormatContext *os = output_files[ost->file_index]->ctx;
1604
        int stop_encoding = 0;
1605

1606
        if (!ost->encoding_needed)
1607 1608
            continue;

1609
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1610
            continue;
1611
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1612 1613
            continue;

Aneesh Dogra's avatar
Aneesh Dogra committed
1614
        for (;;) {
1615 1616
            int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
            const char *desc;
1617

1618
            switch (enc->codec_type) {
1619
            case AVMEDIA_TYPE_AUDIO:
1620 1621
                encode = avcodec_encode_audio2;
                desc   = "Audio";
1622 1623
                break;
            case AVMEDIA_TYPE_VIDEO:
1624 1625 1626 1627 1628 1629 1630 1631 1632
                encode = avcodec_encode_video2;
                desc   = "Video";
                break;
            default:
                stop_encoding = 1;
            }

            if (encode) {
                AVPacket pkt;
Michael Niedermayer's avatar
Michael Niedermayer committed
1633
                int pkt_size;
1634 1635 1636 1637 1638
                int got_packet;
                av_init_packet(&pkt);
                pkt.data = NULL;
                pkt.size = 0;

1639
                update_benchmark(NULL);
1640
                ret = encode(enc, &pkt, NULL, &got_packet);
1641
                update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1642
                if (ret < 0) {
1643
                    av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1644
                    exit_program(1);
1645 1646 1647 1648
                }
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
                }
1649
                if (!got_packet) {
1650 1651 1652
                    stop_encoding = 1;
                    break;
                }
1653
                if (ost->finished & MUXER_FINISHED) {
1654 1655 1656
                    av_free_packet(&pkt);
                    continue;
                }
1657
                av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
Michael Niedermayer's avatar
Michael Niedermayer committed
1658
                pkt_size = pkt.size;
1659
                write_frame(os, &pkt, ost);
1660
                if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
Michael Niedermayer's avatar
Michael Niedermayer committed
1661
                    do_video_stats(ost, pkt_size);
1662
                }
1663
            }
1664

1665
            if (stop_encoding)
1666
                break;
1667 1668
        }
    }
1669
}
1670

1671 1672 1673 1674 1675
/*
 * Check whether a packet from ist should be written into ost at this time
 */
static int check_output_constraints(InputStream *ist, OutputStream *ost)
{
1676 1677
    OutputFile *of = output_files[ost->file_index];
    int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
1678

1679 1680
    if (ost->source_index != ist_index)
        return 0;
1681

1682 1683 1684
    if (ost->finished)
        return 0;

1685
    if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1686
        return 0;
1687

1688 1689
    return 1;
}
1690

1691 1692
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
{
1693
    OutputFile *of = output_files[ost->file_index];
1694
    InputFile   *f = input_files [ist->file_index];
1695 1696
    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1697
    int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1698
    AVPicture pict;
1699
    AVPacket opkt;
1700

1701 1702 1703 1704 1705 1706
    av_init_packet(&opkt);

    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
        !ost->copy_initial_nonkeyframes)
        return;

1707
    if (pkt->pts == AV_NOPTS_VALUE) {
1708
        if (!ost->frame_number && ist->pts < start_time &&
1709 1710 1711 1712 1713 1714 1715
            !ost->copy_prior_start)
            return;
    } else {
        if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
            !ost->copy_prior_start)
            return;
    }
1716

1717
    if (of->recording_time != INT64_MAX &&
1718
        ist->pts >= of->recording_time + start_time) {
1719
        close_output_stream(ost);
1720
        return;
1721 1722
    }

1723 1724 1725 1726
    if (f->recording_time != INT64_MAX) {
        start_time = f->ctx->start_time;
        if (f->start_time != AV_NOPTS_VALUE)
            start_time += f->start_time;
1727 1728
        if (ist->pts >= f->recording_time + start_time) {
            close_output_stream(ost);
1729 1730 1731 1732
            return;
        }
    }

1733
    /* force the input stream PTS */
1734
    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1735
        ost->sync_opts++;
Fabrice Bellard's avatar
Fabrice Bellard committed
1736

1737 1738 1739 1740
    if (pkt->pts != AV_NOPTS_VALUE)
        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
    else
        opkt.pts = AV_NOPTS_VALUE;
1741

1742
    if (pkt->dts == AV_NOPTS_VALUE)
1743
        opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1744 1745 1746
    else
        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
    opkt.dts -= ost_tb_start_time;
1747

1748
    if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1749
        int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1750
        if(!duration)
1751
            duration = ist->dec_ctx->frame_size;
1752
        opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1753
                                               (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1754
                                               ost->st->time_base) - ost_tb_start_time;
1755 1756
    }

1757 1758
    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
    opkt.flags    = pkt->flags;
1759

Aneesh Dogra's avatar
Aneesh Dogra committed
1760
    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1761 1762 1763 1764
    if (  ost->enc_ctx->codec_id != AV_CODEC_ID_H264
       && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
       && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
       && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1765
       ) {
1766 1767 1768 1769
        if (av_parser_change(ost->parser, ost->st->codec,
                             &opkt.data, &opkt.size,
                             pkt->data, pkt->size,
                             pkt->flags & AV_PKT_FLAG_KEY)) {
1770 1771
            opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
            if (!opkt.buf)
1772
                exit_program(1);
1773
        }
1774 1775 1776
    } else {
        opkt.data = pkt->data;
        opkt.size = pkt->size;
1777
    }
1778
    av_copy_packet_side_data(&opkt, pkt);
1779

1780 1781 1782 1783 1784 1785 1786
    if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
        /* store AVPicture in AVPacket, as expected by the output format */
        avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
        opkt.data = (uint8_t *)&pict;
        opkt.size = sizeof(AVPicture);
        opkt.flags |= AV_PKT_FLAG_KEY;
    }
1787

1788
    write_frame(of->ctx, &opkt, ost);
1789
}
1790

1791
int guess_input_channel_layout(InputStream *ist)
1792
{
1793
    AVCodecContext *dec = ist->dec_ctx;
1794

1795 1796
    if (!dec->channel_layout) {
        char layout_name[256];
1797

1798 1799
        if (dec->channels > ist->guess_layout_max)
            return 0;
1800 1801 1802 1803 1804 1805 1806
        dec->channel_layout = av_get_default_channel_layout(dec->channels);
        if (!dec->channel_layout)
            return 0;
        av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                     dec->channels, dec->channel_layout);
        av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
               "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1807
    }
1808
    return 1;
1809 1810
}

1811
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1812
{
1813
    AVFrame *decoded_frame, *f;
1814
    AVCodecContext *avctx = ist->dec_ctx;
1815
    int i, ret, err = 0, resample_changed;
1816
    AVRational decoded_frame_tb;
1817

1818
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1819
        return AVERROR(ENOMEM);
1820 1821
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
        return AVERROR(ENOMEM);
1822
    decoded_frame = ist->decoded_frame;
1823

1824
    update_benchmark(NULL);
1825
    ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1826
    update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1827 1828

    if (ret >= 0 && avctx->sample_rate <= 0) {
1829
        av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1830
        ret = AVERROR_INVALIDDATA;
1831
    }
1832

1833 1834 1835
    if (*got_output || ret<0 || pkt->size)
        decode_error_stat[ret<0] ++;

1836 1837
    if (!*got_output || ret < 0) {
        if (!pkt->size) {
1838
            for (i = 0; i < ist->nb_filters; i++)
1839
#if 1
1840
                av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1841
#else
1842
                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1843
#endif
1844
        }
1845
        return ret;
1846 1847
    }

1848 1849 1850
    ist->samples_decoded += decoded_frame->nb_samples;
    ist->frames_decoded++;

1851 1852 1853 1854 1855 1856 1857 1858
#if 1
    /* increment next_dts to use for the case where the input stream does not
       have timestamps or there are multiple frames in the packet */
    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                     avctx->sample_rate;
    ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                     avctx->sample_rate;
#endif
1859

1860 1861 1862 1863 1864 1865
    resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                       ist->resample_channels       != avctx->channels               ||
                       ist->resample_channel_layout != decoded_frame->channel_layout ||
                       ist->resample_sample_rate    != decoded_frame->sample_rate;
    if (resample_changed) {
        char layout1[64], layout2[64];
1866

1867 1868 1869 1870
        if (!guess_input_channel_layout(ist)) {
            av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                   "layout for Input Stream #%d.%d\n", ist->file_index,
                   ist->st->index);
1871
            exit_program(1);
1872 1873
        }
        decoded_frame->channel_layout = avctx->channel_layout;
1874

1875 1876 1877 1878
        av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                     ist->resample_channel_layout);
        av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                     decoded_frame->channel_layout);
1879

1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
        av_log(NULL, AV_LOG_INFO,
               "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
               ist->file_index, ist->st->index,
               ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
               ist->resample_channels, layout1,
               decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
               avctx->channels, layout2);

        ist->resample_sample_fmt     = decoded_frame->format;
        ist->resample_sample_rate    = decoded_frame->sample_rate;
        ist->resample_channel_layout = decoded_frame->channel_layout;
        ist->resample_channels       = avctx->channels;

        for (i = 0; i < nb_filtergraphs; i++)
1894 1895 1896 1897
            if (ist_in_filtergraph(filtergraphs[i], ist)) {
                FilterGraph *fg = filtergraphs[i];
                if (configure_filtergraph(fg) < 0) {
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1898
                    exit_program(1);
1899
                }
1900
            }
1901 1902
    }

1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
    /* if the decoder provides a pts, use it instead of the last packet pts.
       the decoder could be delaying output by a packet or more. */
    if (decoded_frame->pts != AV_NOPTS_VALUE) {
        ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
        decoded_frame_tb   = avctx->time_base;
    } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
        decoded_frame->pts = decoded_frame->pkt_pts;
        decoded_frame_tb   = ist->st->time_base;
    } else if (pkt->pts != AV_NOPTS_VALUE) {
        decoded_frame->pts = pkt->pts;
        decoded_frame_tb   = ist->st->time_base;
    }else {
        decoded_frame->pts = ist->dts;
        decoded_frame_tb   = AV_TIME_BASE_Q;
    }
1918
    pkt->pts           = AV_NOPTS_VALUE;
1919
    if (decoded_frame->pts != AV_NOPTS_VALUE)
1920
        decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1921 1922
                                              (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
                                              (AVRational){1, avctx->sample_rate});
1923 1924 1925 1926 1927 1928 1929 1930
    for (i = 0; i < ist->nb_filters; i++) {
        if (i < ist->nb_filters - 1) {
            f = ist->filter_frame;
            err = av_frame_ref(f, decoded_frame);
            if (err < 0)
                break;
        } else
            f = decoded_frame;
1931
        err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
1932
                                     AV_BUFFERSRC_FLAG_PUSH);
1933 1934
        if (err == AVERROR_EOF)
            err = 0; /* ignore */
1935 1936 1937
        if (err < 0)
            break;
    }
1938
    decoded_frame->pts = AV_NOPTS_VALUE;
1939

1940 1941 1942
    av_frame_unref(ist->filter_frame);
    av_frame_unref(decoded_frame);
    return err < 0 ? err : ret;
1943 1944
}

1945
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1946
{
1947 1948
    AVFrame *decoded_frame, *f;
    int i, ret = 0, err = 0, resample_changed;
1949 1950
    int64_t best_effort_timestamp;
    AVRational *frame_sample_aspect;
1951

1952 1953 1954
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
        return AVERROR(ENOMEM);
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1955
        return AVERROR(ENOMEM);
1956
    decoded_frame = ist->decoded_frame;
1957
    pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
1958

1959
    update_benchmark(NULL);
1960
    ret = avcodec_decode_video2(ist->dec_ctx,
1961
                                decoded_frame, got_output, pkt);
1962
    update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
1963 1964 1965

    // The following line may be required in some cases where there is no parser
    // or the parser does not has_b_frames correctly
1966
    if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
1967 1968 1969 1970 1971 1972 1973 1974 1975
        if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
            ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
        } else
            av_log_ask_for_sample(
                ist->dec_ctx,
                "has_b_frames is larger in decoder than demuxer %d > %d ",
                ist->dec_ctx->has_b_frames,
                ist->st->codec->has_b_frames
            );
1976
    }
1977 1978 1979 1980

    if (*got_output || ret<0 || pkt->size)
        decode_error_stat[ret<0] ++;

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
    if (*got_output && ret >= 0) {
        if (ist->dec_ctx->width  != decoded_frame->width ||
            ist->dec_ctx->height != decoded_frame->height ||
            ist->dec_ctx->pix_fmt != decoded_frame->format) {
            av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
                decoded_frame->width,
                decoded_frame->height,
                decoded_frame->format,
                ist->dec_ctx->width,
                ist->dec_ctx->height,
                ist->dec_ctx->pix_fmt);
        }
    }

1995 1996
    if (!*got_output || ret < 0) {
        if (!pkt->size) {
1997
            for (i = 0; i < ist->nb_filters; i++)
1998
#if 1
1999
                av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2000
#else
2001
                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2002
#endif
2003
        }
2004
        return ret;
2005
    }
2006

2007 2008
    if(ist->top_field_first>=0)
        decoded_frame->top_field_first = ist->top_field_first;
2009

2010 2011
    ist->frames_decoded++;

2012
    if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2013
        err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2014 2015 2016
        if (err < 0)
            goto fail;
    }
2017 2018
    ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;

2019 2020 2021
    best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
    if(best_effort_timestamp != AV_NOPTS_VALUE)
        ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2022

2023 2024
    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2025 2026 2027 2028 2029 2030 2031
               "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
               ist->st->index, av_ts2str(decoded_frame->pts),
               av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
               best_effort_timestamp,
               av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
               decoded_frame->key_frame, decoded_frame->pict_type,
               ist->st->time_base.num, ist->st->time_base.den);
2032 2033
    }

2034
    pkt->size = 0;
2035

2036 2037
    if (ist->st->sample_aspect_ratio.num)
        decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2038

2039 2040 2041 2042 2043 2044 2045 2046 2047
    resample_changed = ist->resample_width   != decoded_frame->width  ||
                       ist->resample_height  != decoded_frame->height ||
                       ist->resample_pix_fmt != decoded_frame->format;
    if (resample_changed) {
        av_log(NULL, AV_LOG_INFO,
               "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
               ist->file_index, ist->st->index,
               ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
               decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2048

2049 2050 2051
        ist->resample_width   = decoded_frame->width;
        ist->resample_height  = decoded_frame->height;
        ist->resample_pix_fmt = decoded_frame->format;
2052

2053 2054
        for (i = 0; i < nb_filtergraphs; i++) {
            if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2055
                configure_filtergraph(filtergraphs[i]) < 0) {
2056
                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2057
                exit_program(1);
2058
            }
2059
        }
2060 2061
    }

2062
    frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2063
    for (i = 0; i < ist->nb_filters; i++) {
2064 2065
        if (!frame_sample_aspect->num)
            *frame_sample_aspect = ist->st->sample_aspect_ratio;
2066

2067 2068 2069 2070 2071
        if (i < ist->nb_filters - 1) {
            f = ist->filter_frame;
            err = av_frame_ref(f, decoded_frame);
            if (err < 0)
                break;
2072
        } else
2073
            f = decoded_frame;
2074
        ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2075 2076 2077
        if (ret == AVERROR_EOF) {
            ret = 0; /* ignore */
        } else if (ret < 0) {
2078 2079
            av_log(NULL, AV_LOG_FATAL,
                   "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2080
            exit_program(1);
2081
        }
2082
    }
2083

2084
fail:
2085 2086 2087
    av_frame_unref(ist->filter_frame);
    av_frame_unref(decoded_frame);
    return err < 0 ? err : ret;
2088 2089
}

2090
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2091
{
2092
    AVSubtitle subtitle;
2093
    int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2094
                                          &subtitle, got_output, pkt);
2095 2096 2097 2098

    if (*got_output || ret<0 || pkt->size)
        decode_error_stat[ret<0] ++;

2099 2100 2101
    if (ret < 0 || !*got_output) {
        if (!pkt->size)
            sub2video_flush(ist);
2102
        return ret;
2103
    }
2104

2105
    if (ist->fix_sub_duration) {
2106
        int end = 1;
2107
        if (ist->prev_sub.got_output) {
2108 2109
            end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
                             1000, AV_TIME_BASE);
2110
            if (end < ist->prev_sub.subtitle.end_display_time) {
2111
                av_log(ist->dec_ctx, AV_LOG_DEBUG,
2112 2113 2114
                       "Subtitle duration reduced from %d to %d%s\n",
                       ist->prev_sub.subtitle.end_display_time, end,
                       end <= 0 ? ", dropping it" : "");
2115 2116 2117 2118 2119 2120
                ist->prev_sub.subtitle.end_display_time = end;
            }
        }
        FFSWAP(int,        *got_output, ist->prev_sub.got_output);
        FFSWAP(int,        ret,         ist->prev_sub.ret);
        FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
2121 2122
        if (end <= 0)
            goto out;
2123 2124
    }

2125 2126 2127
    if (!*got_output)
        return ret;

2128
    sub2video_update(ist, &subtitle);
2129

2130
    if (!subtitle.num_rects)
2131
        goto out;
2132

2133 2134
    ist->frames_decoded++;

2135
    for (i = 0; i < nb_output_streams; i++) {
2136
        OutputStream *ost = output_streams[i];
2137

2138 2139
        if (!check_output_constraints(ist, ost) || !ost->encoding_needed
            || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2140
            continue;
2141

2142
        do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2143
    }
2144

2145
out:
2146
    avsubtitle_free(&subtitle);
2147
    return ret;
2148
}
2149

2150
/* pkt = NULL means EOF (needed to flush decoder buffers) */
2151
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2152
{
2153
    int ret = 0, i;
2154
    int got_output = 0;
Fabrice Bellard's avatar
Fabrice Bellard committed
2155

2156
    AVPacket avpkt;
2157
    if (!ist->saw_first_ts) {
2158
        ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2159
        ist->pts = 0;
2160
        if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2161 2162
            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
            ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2163
        }
2164 2165
        ist->saw_first_ts = 1;
    }
2166

2167
    if (ist->next_dts == AV_NOPTS_VALUE)
2168 2169 2170
        ist->next_dts = ist->dts;
    if (ist->next_pts == AV_NOPTS_VALUE)
        ist->next_pts = ist->pts;
2171

2172
    if (!pkt) {
2173 2174 2175 2176 2177 2178 2179 2180
        /* EOF handling */
        av_init_packet(&avpkt);
        avpkt.data = NULL;
        avpkt.size = 0;
        goto handle_eof;
    } else {
        avpkt = *pkt;
    }
2181

2182 2183
    if (pkt->dts != AV_NOPTS_VALUE) {
        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2184
        if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2185
            ist->next_pts = ist->pts = ist->dts;
2186
    }
2187

Aneesh Dogra's avatar
Aneesh Dogra committed
2188
    // while we have more to decode or while the decoder did output something on EOF
2189
    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2190
        int duration;
2191
    handle_eof:
2192

2193 2194
        ist->pts = ist->next_pts;
        ist->dts = ist->next_dts;
2195

2196 2197
        if (avpkt.size && avpkt.size != pkt->size &&
            !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2198 2199
            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2200
            ist->showed_multi_packet_warning = 1;
Michael Niedermayer's avatar
Michael Niedermayer committed
2201
        }
2202

2203
        switch (ist->dec_ctx->codec_type) {
2204
        case AVMEDIA_TYPE_AUDIO:
2205
            ret = decode_audio    (ist, &avpkt, &got_output);
2206 2207
            break;
        case AVMEDIA_TYPE_VIDEO:
2208
            ret = decode_video    (ist, &avpkt, &got_output);
2209 2210
            if (avpkt.duration) {
                duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2211
            } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2212
                int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2213
                duration = ((int64_t)AV_TIME_BASE *
2214 2215
                                ist->dec_ctx->framerate.den * ticks) /
                                ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2216 2217
            } else
                duration = 0;
2218

2219 2220 2221 2222
            if(ist->dts != AV_NOPTS_VALUE && duration) {
                ist->next_dts += duration;
            }else
                ist->next_dts = AV_NOPTS_VALUE;
2223

2224 2225
            if (got_output)
                ist->next_pts += duration; //FIXME the duration is not correct in some cases
2226 2227 2228 2229
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ret = transcode_subtitles(ist, &avpkt, &got_output);
            break;
Anton Khirnov's avatar
Anton Khirnov committed
2230 2231
        default:
            return -1;
2232
        }
Fabrice Bellard's avatar
Fabrice Bellard committed
2233

2234 2235
        if (ret < 0)
            return ret;
2236

2237 2238
        avpkt.dts=
        avpkt.pts= AV_NOPTS_VALUE;
2239

2240 2241
        // touch data and size only if not EOF
        if (pkt) {
2242
            if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2243
                ret = avpkt.size;
2244 2245
            avpkt.data += ret;
            avpkt.size -= ret;
Fabrice Bellard's avatar
Fabrice Bellard committed
2246
        }
2247
        if (!got_output) {
2248
            continue;
Fabrice Bellard's avatar
Fabrice Bellard committed
2249
        }
2250 2251
        if (got_output && !pkt)
            break;
Fabrice Bellard's avatar
Fabrice Bellard committed
2252 2253
    }

2254
    /* handle stream copy */
2255
    if (!ist->decoding_needed) {
2256
        ist->dts = ist->next_dts;
2257
        switch (ist->dec_ctx->codec_type) {
2258
        case AVMEDIA_TYPE_AUDIO:
2259 2260
            ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
                             ist->dec_ctx->sample_rate;
2261 2262
            break;
        case AVMEDIA_TYPE_VIDEO:
2263
            if (ist->framerate.num) {
2264 2265 2266 2267
                // TODO: Remove work-around for c99-to-c89 issue 7
                AVRational time_base_q = AV_TIME_BASE_Q;
                int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
                ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2268
            } else if (pkt->duration) {
2269
                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2270
            } else if(ist->dec_ctx->framerate.num != 0) {
2271
                int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2272
                ist->next_dts += ((int64_t)AV_TIME_BASE *
2273 2274
                                  ist->dec_ctx->framerate.den * ticks) /
                                  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
Fabrice Bellard's avatar
Fabrice Bellard committed
2275
            }
2276
            break;
Fabrice Bellard's avatar
Fabrice Bellard committed
2277
        }
2278 2279
        ist->pts = ist->dts;
        ist->next_pts = ist->next_dts;
Fabrice Bellard's avatar
Fabrice Bellard committed
2280
    }
2281
    for (i = 0; pkt && i < nb_output_streams; i++) {
2282
        OutputStream *ost = output_streams[i];
2283

2284 2285
        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
            continue;
2286

2287 2288
        do_streamcopy(ist, ost, pkt);
    }
2289

2290
    return got_output;
2291 2292
}

2293
static void print_sdp(void)
Fabrice Bellard's avatar
Fabrice Bellard committed
2294
{
2295
    char sdp[16384];
2296
    int i;
2297 2298
    int j;
    AVIOContext *sdp_pb;
2299
    AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
Fabrice Bellard's avatar
Fabrice Bellard committed
2300

2301
    if (!avc)
2302
        exit_program(1);
2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
    for (i = 0, j = 0; i < nb_output_files; i++) {
        if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
            avc[j] = output_files[i]->ctx;
            j++;
        }
    }

    av_sdp_create(avc, j, sdp, sizeof(sdp));

    if (!sdp_filename) {
        printf("SDP:\n%s\n", sdp);
        fflush(stdout);
    } else {
        if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
            av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
        } else {
            avio_printf(sdp_pb, "SDP:\n%s", sdp);
2320
            avio_closep(&sdp_pb);
2321
            av_freep(&sdp_filename);
2322 2323
        }
    }
2324

2325
    av_freep(&avc);
2326 2327
}

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
{
    int i;
    for (i = 0; hwaccels[i].name; i++)
        if (hwaccels[i].pix_fmt == pix_fmt)
            return &hwaccels[i];
    return NULL;
}

static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
{
    InputStream *ist = s->opaque;
    const enum AVPixelFormat *p;
    int ret;

    for (p = pix_fmts; *p != -1; p++) {
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
        const HWAccel *hwaccel;

        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
            break;

        hwaccel = get_hwaccel(*p);
        if (!hwaccel ||
            (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
            (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
            continue;

        ret = hwaccel->init(s);
        if (ret < 0) {
            if (ist->hwaccel_id == hwaccel->id) {
                av_log(NULL, AV_LOG_FATAL,
                       "%s hwaccel requested for input stream #%d:%d, "
                       "but cannot be initialized.\n", hwaccel->name,
                       ist->file_index, ist->st->index);
2363
                return AV_PIX_FMT_NONE;
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
            }
            continue;
        }
        ist->active_hwaccel_id = hwaccel->id;
        ist->hwaccel_pix_fmt   = *p;
        break;
    }

    return *p;
}

static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
    InputStream *ist = s->opaque;

    if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
        return ist->hwaccel_get_buffer(s, frame, flags);

    return avcodec_default_get_buffer2(s, frame, flags);
}

2385
static int init_input_stream(int ist_index, char *error, int error_len)
2386
{
2387
    int ret;
2388
    InputStream *ist = input_streams[ist_index];
2389

2390 2391 2392
    if (ist->decoding_needed) {
        AVCodec *codec = ist->dec;
        if (!codec) {
2393
            snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2394
                    avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2395
            return AVERROR(EINVAL);
2396 2397
        }

2398 2399 2400 2401
        ist->dec_ctx->opaque                = ist;
        ist->dec_ctx->get_format            = get_format;
        ist->dec_ctx->get_buffer2           = get_buffer;
        ist->dec_ctx->thread_safe_callbacks = 1;
2402

2403
        av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2404 2405
        if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
           (ist->decoding_needed & DECODING_FOR_OST)) {
2406
            av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2407 2408 2409
            if (ist->decoding_needed & DECODING_FOR_FILTER)
                av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
        }
2410

2411 2412
        if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
            av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2413
        if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2414 2415
            if (ret == AVERROR_EXPERIMENTAL)
                abort_codec_experimental(codec, 0);
2416 2417 2418 2419

            snprintf(error, error_len,
                     "Error while opening decoder for input stream "
                     "#%d:%d : %s",
2420
                     ist->file_index, ist->st->index, av_err2str(ret));
2421
            return ret;
2422
        }
2423
        assert_avoptions(ist->decoder_opts);
2424
    }
2425

2426
    ist->next_pts = AV_NOPTS_VALUE;
2427
    ist->next_dts = AV_NOPTS_VALUE;
2428

2429
    return 0;
Fabrice Bellard's avatar
Fabrice Bellard committed
2430 2431
}

2432
static InputStream *get_input_stream(OutputStream *ost)
2433
{
2434 2435 2436
    if (ost->source_index >= 0)
        return input_streams[ost->source_index];
    return NULL;
2437 2438
}

2439 2440 2441 2442 2443 2444
static int compare_int64(const void *a, const void *b)
{
    int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
    return va < vb ? -1 : va > vb ? +1 : 0;
}

2445 2446
static void parse_forced_key_frames(char *kf, OutputStream *ost,
                                    AVCodecContext *avctx)
2447
{
2448
    char *p;
2449 2450
    int n = 1, i, size, index = 0;
    int64_t t, *pts;
2451

2452 2453 2454
    for (p = kf; *p; p++)
        if (*p == ',')
            n++;
2455
    size = n;
2456
    pts = av_malloc_array(size, sizeof(*pts));
2457
    if (!pts) {
2458
        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2459
        exit_program(1);
2460 2461
    }

2462
    p = kf;
2463
    for (i = 0; i < n; i++) {
2464
        char *next = strchr(p, ',');
2465

2466 2467 2468
        if (next)
            *next++ = 0;

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
        if (!memcmp(p, "chapters", 8)) {

            AVFormatContext *avf = output_files[ost->file_index]->ctx;
            int j;

            if (avf->nb_chapters > INT_MAX - size ||
                !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
                                     sizeof(*pts)))) {
                av_log(NULL, AV_LOG_FATAL,
                       "Could not allocate forced key frames array.\n");
2479
                exit_program(1);
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
            }
            t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
            t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);

            for (j = 0; j < avf->nb_chapters; j++) {
                AVChapter *c = avf->chapters[j];
                av_assert1(index < size);
                pts[index++] = av_rescale_q(c->start, c->time_base,
                                            avctx->time_base) + t;
            }

        } else {

            t = parse_time_or_die("force_key_frames", p, 1);
            av_assert1(index < size);
            pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);

        }
2498 2499

        p = next;
2500
    }
2501 2502 2503 2504 2505

    av_assert0(index == size);
    qsort(pts, size, sizeof(*pts), compare_int64);
    ost->forced_kf_count = size;
    ost->forced_kf_pts   = pts;
2506 2507
}

2508
static void report_new_stream(int input_index, AVPacket *pkt)
2509
{
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
    InputFile *file = input_files[input_index];
    AVStream *st = file->ctx->streams[pkt->stream_index];

    if (pkt->stream_index < file->nb_streams_warn)
        return;
    av_log(file->ctx, AV_LOG_WARNING,
           "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
           av_get_media_type_string(st->codec->codec_type),
           input_index, pkt->stream_index,
           pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
    file->nb_streams_warn = pkt->stream_index + 1;
2521 2522
}

2523 2524 2525 2526 2527 2528 2529
static void set_encoder_id(OutputFile *of, OutputStream *ost)
{
    AVDictionaryEntry *e;

    uint8_t *encoder_string;
    int encoder_string_len;
    int format_flags = 0;
2530
    int codec_flags = 0;
2531

2532 2533 2534
    if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
        return;

2535 2536 2537 2538 2539 2540 2541
    e = av_dict_get(of->opts, "fflags", NULL, 0);
    if (e) {
        const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
        if (!o)
            return;
        av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
    }
2542 2543
    e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
    if (e) {
2544
        const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2545 2546
        if (!o)
            return;
2547
        av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2548
    }
2549 2550 2551 2552 2553 2554

    encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
    encoder_string     = av_mallocz(encoder_string_len);
    if (!encoder_string)
        exit_program(1);

2555
    if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2556
        av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2557 2558
    else
        av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2559 2560 2561 2562 2563
    av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
    av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
                AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
}

2564
static int transcode_init(void)
2565
{
2566
    int ret = 0, i, j, k;
2567
    AVFormatContext *oc;
2568
    OutputStream *ost;
2569
    InputStream *ist;
2570
    char error[1024] = {0};
2571
    int want_sdp = 1;
2572

2573 2574 2575 2576
    for (i = 0; i < nb_filtergraphs; i++) {
        FilterGraph *fg = filtergraphs[i];
        for (j = 0; j < fg->nb_outputs; j++) {
            OutputFilter *ofilter = fg->outputs[j];
2577
            if (!ofilter->ost || ofilter->ost->source_index >= 0)
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
                continue;
            if (fg->nb_inputs != 1)
                continue;
            for (k = nb_input_streams-1; k >= 0 ; k--)
                if (fg->inputs[0]->ist == input_streams[k])
                    break;
            ofilter->ost->source_index = k;
        }
    }

2588 2589
    /* init framerate emulation */
    for (i = 0; i < nb_input_files; i++) {
2590
        InputFile *ifile = input_files[i];
2591 2592
        if (ifile->rate_emu)
            for (j = 0; j < ifile->nb_streams; j++)
2593
                input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2594
    }
2595

2596
    /* output stream init */
2597
    for (i = 0; i < nb_output_files; i++) {
2598
        oc = output_files[i]->ctx;
2599 2600
        if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
            av_dump_format(oc, i, oc->filename, 1);
2601
            av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2602
            return AVERROR(EINVAL);
2603 2604
        }
    }
2605

2606 2607 2608 2609
    /* init complex filtergraphs */
    for (i = 0; i < nb_filtergraphs; i++)
        if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
            return ret;
2610

2611
    /* for each output stream, we compute the right encoding parameters */
2612
    for (i = 0; i < nb_output_streams; i++) {
2613
        AVCodecContext *enc_ctx;
2614
        AVCodecContext *dec_ctx = NULL;
2615 2616
        ost = output_streams[i];
        oc  = output_files[ost->file_index]->ctx;
2617
        ist = get_input_stream(ost);
2618

2619 2620
        if (ost->attachment_filename)
            continue;
2621

2622
        enc_ctx = ost->enc_ctx;
2623

2624
        if (ist) {
2625
            dec_ctx = ist->dec_ctx;
2626

2627
            ost->st->disposition          = ist->st->disposition;
2628 2629
            enc_ctx->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
            enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2630 2631 2632
        } else {
            for (j=0; j<oc->nb_streams; j++) {
                AVStream *st = oc->streams[j];
2633
                if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2634 2635 2636
                    break;
            }
            if (j == oc->nb_streams)
2637
                if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2638
                    ost->st->disposition = AV_DISPOSITION_DEFAULT;
2639
        }
2640

2641
        if (ost->stream_copy) {
2642
            AVRational sar;
2643
            uint64_t extra_size;
2644

2645
            av_assert0(ist && !ost->filter);
2646

2647
            extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2648

2649
            if (extra_size > INT_MAX) {
2650
                return AVERROR(EINVAL);
2651
            }
2652

2653
            /* if stream_copy is selected, no need to decode or encode */
2654 2655
            enc_ctx->codec_id   = dec_ctx->codec_id;
            enc_ctx->codec_type = dec_ctx->codec_type;
2656

2657
            if (!enc_ctx->codec_tag) {
2658
                unsigned int codec_tag;
2659
                if (!oc->oformat->codec_tag ||
2660
                     av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2661
                     !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2662
                    enc_ctx->codec_tag = dec_ctx->codec_tag;
2663
            }
2664

2665 2666 2667 2668 2669 2670
            enc_ctx->bit_rate       = dec_ctx->bit_rate;
            enc_ctx->rc_max_rate    = dec_ctx->rc_max_rate;
            enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
            enc_ctx->field_order    = dec_ctx->field_order;
            enc_ctx->extradata      = av_mallocz(extra_size);
            if (!enc_ctx->extradata) {
2671
                return AVERROR(ENOMEM);
2672
            }
2673
            memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2674 2675
            enc_ctx->extradata_size= dec_ctx->extradata_size;
            enc_ctx->bits_per_coded_sample  = dec_ctx->bits_per_coded_sample;
2676

2677
            enc_ctx->time_base = ist->st->time_base;
2678 2679 2680 2681 2682 2683 2684 2685
            /*
             * Avi is a special case here because it supports variable fps but
             * having the fps and timebase differe significantly adds quite some
             * overhead
             */
            if(!strcmp(oc->oformat->name, "avi")) {
                if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2686 2687
                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
                               && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2688
                     || copy_tb==2){
2689 2690 2691
                    enc_ctx->time_base.num = ist->st->r_frame_rate.den;
                    enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
                    enc_ctx->ticks_per_frame = 2;
2692
                } else if (   copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2693 2694
                                 && av_q2d(ist->st->time_base) < 1.0/500
                    || copy_tb==0){
2695 2696 2697 2698
                    enc_ctx->time_base = dec_ctx->time_base;
                    enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
                    enc_ctx->time_base.den *= 2;
                    enc_ctx->ticks_per_frame = 2;
2699 2700 2701 2702
                }
            } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
                      && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
                      && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2703
                      && strcmp(oc->oformat->name, "f4v")
2704
            ) {
2705 2706
                if(   copy_tb<0 && dec_ctx->time_base.den
                                && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2707 2708
                                && av_q2d(ist->st->time_base) < 1.0/500
                   || copy_tb==0){
2709 2710
                    enc_ctx->time_base = dec_ctx->time_base;
                    enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2711 2712
                }
            }
2713
            if (   enc_ctx->codec_tag == AV_RL32("tmcd")
2714 2715 2716
                && dec_ctx->time_base.num < dec_ctx->time_base.den
                && dec_ctx->time_base.num > 0
                && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2717
                enc_ctx->time_base = dec_ctx->time_base;
2718
            }
2719

2720 2721
            if (ist && !ost->frame_rate.num)
                ost->frame_rate = ist->framerate;
2722
            if(ost->frame_rate.num)
2723
                enc_ctx->time_base = av_inv_q(ost->frame_rate);
2724

2725 2726
            av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
                        enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2727

2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747
            if (ist->st->nb_side_data) {
                ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
                                                      sizeof(*ist->st->side_data));
                if (!ost->st->side_data)
                    return AVERROR(ENOMEM);

                for (j = 0; j < ist->st->nb_side_data; j++) {
                    const AVPacketSideData *sd_src = &ist->st->side_data[j];
                    AVPacketSideData *sd_dst = &ost->st->side_data[j];

                    sd_dst->data = av_malloc(sd_src->size);
                    if (!sd_dst->data)
                        return AVERROR(ENOMEM);
                    memcpy(sd_dst->data, sd_src->data, sd_src->size);
                    sd_dst->size = sd_src->size;
                    sd_dst->type = sd_src->type;
                    ost->st->nb_side_data++;
                }
            }

2748
            ost->parser = av_parser_init(enc_ctx->codec_id);
2749

2750
            switch (enc_ctx->codec_type) {
2751
            case AVMEDIA_TYPE_AUDIO:
Aneesh Dogra's avatar
Aneesh Dogra committed
2752
                if (audio_volume != 256) {
2753
                    av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2754
                    exit_program(1);
2755
                }
2756 2757 2758 2759 2760 2761
                enc_ctx->channel_layout     = dec_ctx->channel_layout;
                enc_ctx->sample_rate        = dec_ctx->sample_rate;
                enc_ctx->channels           = dec_ctx->channels;
                enc_ctx->frame_size         = dec_ctx->frame_size;
                enc_ctx->audio_service_type = dec_ctx->audio_service_type;
                enc_ctx->block_align        = dec_ctx->block_align;
2762 2763
                enc_ctx->initial_padding    = dec_ctx->delay;
#if FF_API_AUDIOENC_DELAY
2764
                enc_ctx->delay              = dec_ctx->delay;
2765
#endif
2766 2767 2768 2769
                if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
                    enc_ctx->block_align= 0;
                if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
                    enc_ctx->block_align= 0;
2770 2771
                break;
            case AVMEDIA_TYPE_VIDEO:
2772 2773 2774 2775
                enc_ctx->pix_fmt            = dec_ctx->pix_fmt;
                enc_ctx->width              = dec_ctx->width;
                enc_ctx->height             = dec_ctx->height;
                enc_ctx->has_b_frames       = dec_ctx->has_b_frames;
2776
                if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2777
                    sar =
2778
                        av_mul_q(ost->frame_aspect_ratio,
2779
                                 (AVRational){ enc_ctx->height, enc_ctx->width });
2780 2781
                    av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
                           "with stream copy may produce invalid files\n");
2782
                }
2783 2784 2785
                else if (ist->st->sample_aspect_ratio.num)
                    sar = ist->st->sample_aspect_ratio;
                else
2786
                    sar = dec_ctx->sample_aspect_ratio;
2787
                ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2788
                ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2789
                ost->st->r_frame_rate = ist->st->r_frame_rate;
2790 2791
                break;
            case AVMEDIA_TYPE_SUBTITLE:
2792 2793
                enc_ctx->width  = dec_ctx->width;
                enc_ctx->height = dec_ctx->height;
2794 2795
                break;
            case AVMEDIA_TYPE_DATA:
2796
            case AVMEDIA_TYPE_ATTACHMENT:
2797 2798 2799 2800 2801
                break;
            default:
                abort();
            }
        } else {
2802
            if (!ost->enc)
2803
                ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2804 2805
            if (!ost->enc) {
                /* should only happen when a default codec is not present. */
2806 2807
                snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
                         avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2808 2809
                ret = AVERROR(EINVAL);
                goto dump_format;
2810 2811
            }

2812
            if (ist)
2813
                ist->decoding_needed |= DECODING_FOR_OST;
2814
            ost->encoding_needed = 1;
2815

2816 2817
            set_encoder_id(output_files[ost->file_index], ost);

2818
            if (!ost->filter &&
2819 2820
                (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
                 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2821 2822
                    FilterGraph *fg;
                    fg = init_simple_filtergraph(ist, ost);
2823
                    if (configure_filtergraph(fg)) {
2824
                        av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2825
                        exit_program(1);
2826
                    }
2827 2828
            }

2829
            if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2830 2831
                if (ost->filter && !ost->frame_rate.num)
                    ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2832 2833
                if (ist && !ost->frame_rate.num)
                    ost->frame_rate = ist->framerate;
2834
                if (ist && !ost->frame_rate.num)
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
                    ost->frame_rate = ist->st->r_frame_rate;
                if (ist && !ost->frame_rate.num) {
                    ost->frame_rate = (AVRational){25, 1};
                    av_log(NULL, AV_LOG_WARNING,
                           "No information "
                           "about the input framerate is available. Falling "
                           "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
                           "if you want a different framerate.\n",
                           ost->file_index, ost->index);
                }
2845 2846 2847 2848 2849
//                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                    ost->frame_rate = ost->enc->supported_framerates[idx];
                }
2850
                // reduce frame rate for mpeg4 to be within the spec limits
2851
                if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2852 2853 2854
                    av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
                              ost->frame_rate.num, ost->frame_rate.den, 65535);
                }
2855
            }
2856

2857
            switch (enc_ctx->codec_type) {
2858
            case AVMEDIA_TYPE_AUDIO:
2859 2860 2861
                enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
                enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
                enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2862
                enc_ctx->channels       = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2863
                enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
2864 2865
                break;
            case AVMEDIA_TYPE_VIDEO:
2866 2867 2868 2869
                enc_ctx->time_base = av_inv_q(ost->frame_rate);
                if (ost->filter && !(enc_ctx->time_base.num && enc_ctx->time_base.den))
                    enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
                if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2870
                   && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
2871 2872 2873 2874 2875 2876
                    av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
                                               "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
                }
                for (j = 0; j < ost->forced_kf_count; j++)
                    ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
                                                         AV_TIME_BASE_Q,
2877
                                                         enc_ctx->time_base);
2878

2879 2880 2881
                enc_ctx->width  = ost->filter->filter->inputs[0]->w;
                enc_ctx->height = ost->filter->filter->inputs[0]->h;
                enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2882
                    ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2883
                    av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2884
                    ost->filter->filter->inputs[0]->sample_aspect_ratio;
2885
                if (!strncmp(ost->enc->name, "libx264", 7) &&
2886
                    enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2887
                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
2888
                    av_log(NULL, AV_LOG_WARNING,
2889 2890 2891
                           "No pixel format specified, %s for H.264 encoding chosen.\n"
                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
2892
                if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
2893
                    enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2894 2895 2896 2897 2898
                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
                    av_log(NULL, AV_LOG_WARNING,
                           "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
2899
                enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
2900

2901 2902
                ost->st->avg_frame_rate = ost->frame_rate;

2903
                if (!dec_ctx ||
2904 2905 2906 2907
                    enc_ctx->width   != dec_ctx->width  ||
                    enc_ctx->height  != dec_ctx->height ||
                    enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
                    enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
2908
                }
2909

2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
                if (ost->forced_keyframes) {
                    if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
                        ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
                                            forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
                        if (ret < 0) {
                            av_log(NULL, AV_LOG_ERROR,
                                   "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
                            return ret;
                        }
                        ost->forced_keyframes_expr_const_values[FKF_N] = 0;
                        ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
                    } else {
2924
                        parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
2925 2926
                    }
                }
2927 2928
                break;
            case AVMEDIA_TYPE_SUBTITLE:
2929
                enc_ctx->time_base = (AVRational){1, 1000};
2930 2931 2932
                if (!enc_ctx->width) {
                    enc_ctx->width     = input_streams[ost->source_index]->st->codec->width;
                    enc_ctx->height    = input_streams[ost->source_index]->st->codec->height;
2933
                }
2934
                break;
2935 2936
            case AVMEDIA_TYPE_DATA:
                break;
2937 2938 2939 2940 2941
            default:
                abort();
                break;
            }
            /* two pass mode */
2942
            if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
2943 2944
                char logfilename[1024];
                FILE *f;
2945

2946
                snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2947 2948
                         ost->logfile_prefix ? ost->logfile_prefix :
                                               DEFAULT_PASS_LOGFILENAME_PREFIX,
2949
                         i);
2950
                if (!strcmp(ost->enc->name, "libx264")) {
2951
                    av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2952
                } else {
2953
                    if (enc_ctx->flags & CODEC_FLAG_PASS2) {
Anton Khirnov's avatar
Anton Khirnov committed
2954 2955 2956 2957 2958
                        char  *logbuffer;
                        size_t logbuffer_size;
                        if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
                            av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
                                   logfilename);
2959
                            exit_program(1);
Anton Khirnov's avatar
Anton Khirnov committed
2960
                        }
2961
                        enc_ctx->stats_in = logbuffer;
2962
                    }
2963
                    if (enc_ctx->flags & CODEC_FLAG_PASS1) {
2964
                        f = av_fopen_utf8(logfilename, "wb");
2965 2966 2967
                        if (!f) {
                            av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
                                logfilename, strerror(errno));
2968
                            exit_program(1);
2969 2970 2971
                        }
                        ost->logfile = f;
                    }
2972
                }
2973
            }
2974
        }
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005

        if (ost->disposition) {
            static const AVOption opts[] = {
                { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
                { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
                { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
                { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
                { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
                { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
                { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
                { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
                { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
                { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
                { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
                { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
                { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
                { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
                { NULL },
            };
            static const AVClass class = {
                .class_name = "",
                .item_name  = av_default_item_name,
                .option     = opts,
                .version    = LIBAVUTIL_VERSION_INT,
            };
            const AVClass *pclass = &class;

            ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
            if (ret < 0)
                goto dump_format;
        }
3006 3007
    }

3008
    /* open each encoder */
3009
    for (i = 0; i < nb_output_streams; i++) {
3010
        ost = output_streams[i];
3011
        if (ost->encoding_needed) {
3012
            AVCodec      *codec = ost->enc;
3013 3014 3015
            AVCodecContext *dec = NULL;

            if ((ist = get_input_stream(ost)))
3016
                dec = ist->dec_ctx;
3017
            if (dec && dec->subtitle_header) {
3018
                /* ASS code assumes this buffer is null terminated so add extra byte. */
3019 3020
                ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
                if (!ost->enc_ctx->subtitle_header) {
3021 3022
                    ret = AVERROR(ENOMEM);
                    goto dump_format;
3023
                }
3024 3025
                memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
                ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3026
            }
3027 3028
            if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
                av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3029 3030 3031
            av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);

            if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3032 3033
                if (ret == AVERROR_EXPERIMENTAL)
                    abort_codec_experimental(codec, 1);
3034
                snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3035 3036
                        ost->file_index, ost->index);
                goto dump_format;
3037
            }
3038 3039 3040
            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
                !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
                av_buffersink_set_frame_size(ost->filter->filter,
3041
                                             ost->enc_ctx->frame_size);
3042
            assert_avoptions(ost->encoder_opts);
3043
            if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3044
                av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3045
                                             " It takes bits/s as argument, not kbits/s\n");
3046
        } else {
3047 3048
            ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
            if (ret < 0) {
3049 3050
                av_log(NULL, AV_LOG_FATAL,
                    "Error setting up codec context options.\n");
3051
                return ret;
3052
            }
3053 3054 3055 3056 3057 3058 3059
        }

        ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL,
                   "Error initializing the output stream codec context.\n");
            exit_program(1);
3060
        }
3061
        ost->st->codec->codec= ost->enc_ctx->codec;
3062

3063
        // copy timebase while removing common factors
3064
        ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3065
    }
3066

3067 3068
    /* init input streams */
    for (i = 0; i < nb_input_streams; i++)
3069 3070 3071
        if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
            for (i = 0; i < nb_output_streams; i++) {
                ost = output_streams[i];
3072
                avcodec_close(ost->enc_ctx);
3073
            }
3074
            goto dump_format;
3075
        }
3076

3077 3078
    /* discard unused programs */
    for (i = 0; i < nb_input_files; i++) {
3079
        InputFile *ifile = input_files[i];
3080 3081 3082
        for (j = 0; j < ifile->ctx->nb_programs; j++) {
            AVProgram *p = ifile->ctx->programs[j];
            int discard  = AVDISCARD_ALL;
3083

3084
            for (k = 0; k < p->nb_stream_indexes; k++)
3085
                if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3086 3087 3088 3089
                    discard = AVDISCARD_DEFAULT;
                    break;
                }
            p->discard = discard;
3090
        }
3091 3092
    }

3093
    /* open files and write file headers */
3094
    for (i = 0; i < nb_output_files; i++) {
3095
        oc = output_files[i]->ctx;
3096
        oc->interrupt_callback = int_cb;
3097
        if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3098 3099 3100
            snprintf(error, sizeof(error),
                     "Could not write header for output file #%d "
                     "(incorrect codec parameters ?): %s",
3101
                     i, av_err2str(ret));
3102 3103
            ret = AVERROR(EINVAL);
            goto dump_format;
3104
        }
3105
//         assert_avoptions(output_files[i]->opts);
3106
        if (strcmp(oc->oformat->name, "rtp")) {
3107
            want_sdp = 0;
3108
        }
3109
    }
3110

3111 3112 3113
 dump_format:
    /* dump the file output parameters - cannot be done before in case
       of stream copy */
3114
    for (i = 0; i < nb_output_files; i++) {
3115
        av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
Fabrice Bellard's avatar
Fabrice Bellard committed
3116 3117
    }

3118
    /* dump the stream mapping */
3119
    av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3120 3121
    for (i = 0; i < nb_input_streams; i++) {
        ist = input_streams[i];
3122

3123 3124 3125 3126
        for (j = 0; j < ist->nb_filters; j++) {
            if (ist->filters[j]->graph->graph_desc) {
                av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                       ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3127
                       ist->filters[j]->name);
3128 3129 3130 3131
                if (nb_filtergraphs > 1)
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
                av_log(NULL, AV_LOG_INFO, "\n");
            }
3132 3133 3134
        }
    }

3135
    for (i = 0; i < nb_output_streams; i++) {
3136
        ost = output_streams[i];
3137

3138 3139 3140 3141 3142
        if (ost->attachment_filename) {
            /* an attached file */
            av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
                   ost->attachment_filename, ost->file_index, ost->index);
            continue;
3143
        }
3144

3145 3146
        if (ost->filter && ost->filter->graph->graph_desc) {
            /* output from a complex graph */
3147
            av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
3148 3149
            if (nb_filtergraphs > 1)
                av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3150

3151 3152
            av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
                   ost->index, ost->enc ? ost->enc->name : "?");
3153
            continue;
3154
        }
3155

3156
        av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
3157 3158
               input_streams[ost->source_index]->file_index,
               input_streams[ost->source_index]->st->index,
3159 3160
               ost->file_index,
               ost->index);
3161
        if (ost->sync_ist != input_streams[ost->source_index])
3162
            av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3163 3164
                   ost->sync_ist->file_index,
                   ost->sync_ist->st->index);
3165
        if (ost->stream_copy)
3166
            av_log(NULL, AV_LOG_INFO, " (copy)");
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
        else {
            const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
            const AVCodec *out_codec   = ost->enc;
            const char *decoder_name   = "?";
            const char *in_codec_name  = "?";
            const char *encoder_name   = "?";
            const char *out_codec_name = "?";

            if (in_codec) {
                decoder_name  = in_codec->name;
                in_codec_name = avcodec_descriptor_get(in_codec->id)->name;
                if (!strcmp(decoder_name, in_codec_name))
                    decoder_name = "native";
            }

            if (out_codec) {
                encoder_name   = out_codec->name;
                out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
3185
                if (!strcmp(encoder_name, out_codec_name))
3186 3187 3188 3189 3190 3191 3192
                    encoder_name = "native";
            }

            av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
                   in_codec_name, decoder_name,
                   out_codec_name, encoder_name);
        }
3193
        av_log(NULL, AV_LOG_INFO, "\n");
3194 3195
    }

3196
    if (ret) {
3197
        av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3198
        return ret;
3199 3200
    }

3201
    if (sdp_filename || want_sdp) {
3202
        print_sdp();
3203 3204
    }

3205 3206
    transcode_init_done = 1;

3207 3208
    return 0;
}
3209

3210
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3211 3212 3213
static int need_output(void)
{
    int i;
3214

3215 3216 3217 3218
    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost    = output_streams[i];
        OutputFile *of       = output_files[ost->file_index];
        AVFormatContext *os  = output_files[ost->file_index]->ctx;
3219

3220
        if (ost->finished ||
3221 3222
            (os->pb && avio_tell(os->pb) >= of->limit_filesize))
            continue;
3223
        if (ost->frame_number >= ost->max_frames) {
3224 3225
            int j;
            for (j = 0; j < of->ctx->nb_streams; j++)
3226
                close_output_stream(output_streams[of->ost_index + j]);
3227 3228
            continue;
        }
3229

3230
        return 1;
3231
    }
3232

3233 3234
    return 0;
}
3235

3236
/**
3237
 * Select the output stream to process.
3238
 *
3239
 * @return  selected output stream, or NULL if none available
3240
 */
3241
static OutputStream *choose_output(void)
Fabrice Bellard's avatar
Fabrice Bellard committed
3242
{
3243 3244 3245
    int i;
    int64_t opts_min = INT64_MAX;
    OutputStream *ost_min = NULL;
3246

3247 3248 3249 3250
    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];
        int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
                                    AV_TIME_BASE_Q);
3251
        if (!ost->finished && opts < opts_min) {
3252
            opts_min = opts;
3253
            ost_min  = ost->unavailable ? NULL : ost;
3254
        }
3255
    }
3256
    return ost_min;
3257
}
3258

3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288
static int check_keyboard_interaction(int64_t cur_time)
{
    int i, ret, key;
    static int64_t last_time;
    if (received_nb_signals)
        return AVERROR_EXIT;
    /* read_key() returns 0 on EOF */
    if(cur_time - last_time >= 100000 && !run_as_daemon){
        key =  read_key();
        last_time = cur_time;
    }else
        key = -1;
    if (key == 'q')
        return AVERROR_EXIT;
    if (key == '+') av_log_set_level(av_log_get_level()+10);
    if (key == '-') av_log_set_level(av_log_get_level()-10);
    if (key == 's') qp_hist     ^= 1;
    if (key == 'h'){
        if (do_hex_dump){
            do_hex_dump = do_pkt_dump = 0;
        } else if(do_pkt_dump){
            do_hex_dump = 1;
        } else
            do_pkt_dump = 1;
        av_log_set_level(AV_LOG_DEBUG);
    }
    if (key == 'c' || key == 'C'){
        char buf[4096], target[64], command[256], arg[256] = {0};
        double time;
        int k, n = 0;
3289
        fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
        i = 0;
        while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
            if (k > 0)
                buf[i++] = k;
        buf[i] = 0;
        if (k > 0 &&
            (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
            av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
                   target, time, command, arg);
            for (i = 0; i < nb_filtergraphs; i++) {
                FilterGraph *fg = filtergraphs[i];
                if (fg->graph) {
                    if (time < 0) {
                        ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
                                                          key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3305
                        fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3306 3307 3308
                    } else if (key == 'c') {
                        fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
                        ret = AVERROR_PATCHWELCOME;
3309 3310 3311 3312
                    } else {
                        ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
                    }
                }
3313
            }
3314 3315 3316 3317
        } else {
            av_log(NULL, AV_LOG_ERROR,
                   "Parse error, at least 3 arguments were expected, "
                   "only %d given in string '%s'\n", n, buf);
3318
        }
3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
    }
    if (key == 'd' || key == 'D'){
        int debug=0;
        if(key == 'D') {
            debug = input_streams[0]->st->codec->debug<<1;
            if(!debug) debug = 1;
            while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
                debug += debug;
        }else
            if(scanf("%d", &debug)!=1)
                fprintf(stderr,"error parsing debug value\n");
        for(i=0;i<nb_input_streams;i++) {
            input_streams[i]->st->codec->debug = debug;
3332
        }
3333 3334
        for(i=0;i<nb_output_streams;i++) {
            OutputStream *ost = output_streams[i];
3335
            ost->enc_ctx->debug = debug;
3336 3337 3338
        }
        if(debug) av_log_set_level(AV_LOG_DEBUG);
        fprintf(stderr,"debug=%d\n", debug);
3339
    }
3340 3341 3342 3343 3344
    if (key == '?'){
        fprintf(stderr, "key    function\n"
                        "?      show this help\n"
                        "+      increase verbosity\n"
                        "-      decrease verbosity\n"
3345 3346
                        "c      Send command to first matching filter supporting it\n"
                        "C      Send/Que command to all matching filters\n"
3347 3348 3349 3350 3351 3352 3353
                        "D      cycle through available debug modes\n"
                        "h      dump packets/hex press to cycle through the 3 states\n"
                        "q      quit\n"
                        "s      Show QP histogram\n"
        );
    }
    return 0;
3354 3355
}

3356
#if HAVE_PTHREADS
3357
static void *input_thread(void *arg)
3358
{
3359
    InputFile *f = arg;
3360
    unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3361
    int ret = 0;
3362

3363
    while (1) {
3364 3365
        AVPacket pkt;
        ret = av_read_frame(f->ctx, &pkt);
3366

3367
        if (ret == AVERROR(EAGAIN)) {
3368
            av_usleep(10000);
3369
            continue;
3370 3371 3372
        }
        if (ret < 0) {
            av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3373
            break;
3374
        }
3375
        av_dup_packet(&pkt);
3376 3377 3378 3379 3380 3381 3382 3383 3384
        ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
        if (flags && ret == AVERROR(EAGAIN)) {
            flags = 0;
            ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
            av_log(f->ctx, AV_LOG_WARNING,
                   "Thread message queue blocking; consider raising the "
                   "thread_queue_size option (current value: %d)\n",
                   f->thread_queue_size);
        }
3385 3386 3387 3388 3389 3390 3391 3392 3393
        if (ret < 0) {
            if (ret != AVERROR_EOF)
                av_log(f->ctx, AV_LOG_ERROR,
                       "Unable to send packet to main thread: %s\n",
                       av_err2str(ret));
            av_free_packet(&pkt);
            av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
            break;
        }
3394 3395
    }

3396
    return NULL;
3397
}
3398

3399
static void free_input_threads(void)
3400
{
3401
    int i;
3402

3403 3404 3405
    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];
        AVPacket pkt;
3406

3407
        if (!f->in_thread_queue)
3408
            continue;
3409 3410
        av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
        while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3411
            av_free_packet(&pkt);
3412

3413 3414
        pthread_join(f->thread, NULL);
        f->joined = 1;
3415
        av_thread_message_queue_free(&f->in_thread_queue);
3416 3417
    }
}
3418

3419
static int init_input_threads(void)
3420
{
3421
    int i, ret;
3422

3423 3424
    if (nb_input_files == 1)
        return 0;
3425

3426 3427
    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];
3428

3429 3430 3431
        if (f->ctx->pb ? !f->ctx->pb->seekable :
            strcmp(f->ctx->iformat->name, "lavfi"))
            f->non_blocking = 1;
3432
        ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3433
                                            f->thread_queue_size, sizeof(AVPacket));
3434 3435
        if (ret < 0)
            return ret;
3436

3437 3438 3439
        if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
            av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
            av_thread_message_queue_free(&f->in_thread_queue);
3440
            return AVERROR(ret);
3441
        }
3442 3443 3444 3445
    }
    return 0;
}

3446
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3447
{
3448 3449 3450
    return av_thread_message_queue_recv(f->in_thread_queue, pkt,
                                        f->non_blocking ?
                                        AV_THREAD_MESSAGE_NONBLOCK : 0);
3451 3452
}
#endif
3453

3454 3455
static int get_input_packet(InputFile *f, AVPacket *pkt)
{
3456 3457 3458 3459
    if (f->rate_emu) {
        int i;
        for (i = 0; i < f->nb_streams; i++) {
            InputStream *ist = input_streams[f->ist_index + i];
3460
            int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3461
            int64_t now = av_gettime_relative() - ist->start;
3462 3463 3464 3465 3466
            if (pts > now)
                return AVERROR(EAGAIN);
        }
    }

3467
#if HAVE_PTHREADS
3468 3469 3470 3471
    if (nb_input_files > 1)
        return get_input_packet_mt(f, pkt);
#endif
    return av_read_frame(f->ctx, pkt);
3472 3473
}

3474 3475 3476
static int got_eagain(void)
{
    int i;
3477 3478
    for (i = 0; i < nb_output_streams; i++)
        if (output_streams[i]->unavailable)
3479 3480 3481 3482 3483 3484 3485 3486 3487
            return 1;
    return 0;
}

static void reset_eagain(void)
{
    int i;
    for (i = 0; i < nb_input_files; i++)
        input_files[i]->eagain = 0;
3488 3489 3490 3491
    for (i = 0; i < nb_output_streams; i++)
        output_streams[i]->unavailable = 0;
}

3492 3493
/*
 * Return
3494 3495 3496 3497 3498
 * - 0 -- one packet was read and processed
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
 *   this function should be called again
 * - AVERROR_EOF -- this function should not be called again
 */
3499
static int process_input(int file_index)
3500
{
3501
    InputFile *ifile = input_files[file_index];
3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517
    AVFormatContext *is;
    InputStream *ist;
    AVPacket pkt;
    int ret, i, j;

    is  = ifile->ctx;
    ret = get_input_packet(ifile, &pkt);

    if (ret == AVERROR(EAGAIN)) {
        ifile->eagain = 1;
        return ret;
    }
    if (ret < 0) {
        if (ret != AVERROR_EOF) {
            print_error(is->filename, ret);
            if (exit_on_error)
3518
                exit_program(1);
3519 3520 3521 3522
        }

        for (i = 0; i < ifile->nb_streams; i++) {
            ist = input_streams[ifile->ist_index + i];
3523
            if (ist->decoding_needed) {
3524
                ret = process_input_packet(ist, NULL);
3525 3526 3527
                if (ret>0)
                    return 0;
            }
3528

3529 3530 3531
            /* mark all outputs that don't go through lavfi as finished */
            for (j = 0; j < nb_output_streams; j++) {
                OutputStream *ost = output_streams[j];
3532

3533 3534
                if (ost->source_index == ifile->ist_index + i &&
                    (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3535
                    finish_output_stream(ost);
3536 3537 3538
            }
        }

3539
        ifile->eof_reached = 1;
3540
        return AVERROR(EAGAIN);
3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556
    }

    reset_eagain();

    if (do_pkt_dump) {
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                         is->streams[pkt.stream_index]);
    }
    /* the following test is needed in case new streams appear
       dynamically in stream : we ignore them */
    if (pkt.stream_index >= ifile->nb_streams) {
        report_new_stream(file_index, &pkt);
        goto discard_packet;
    }

    ist = input_streams[ifile->ist_index + pkt.stream_index];
3557 3558

    ist->data_size += pkt.size;
3559
    ist->nb_packets++;
3560

3561 3562 3563
    if (ist->discard)
        goto discard_packet;

3564 3565
    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3566
               "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3567
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3568 3569 3570 3571 3572 3573
               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3574 3575
    }

3576
    if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3577
        int64_t stime, stime2;
3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596
        // Correcting starttime based on the enabled streams
        // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
        //       so we instead do it here as part of discontinuity handling
        if (   ist->next_dts == AV_NOPTS_VALUE
            && ifile->ts_offset == -is->start_time
            && (is->iformat->flags & AVFMT_TS_DISCONT)) {
            int64_t new_start_time = INT64_MAX;
            for (i=0; i<is->nb_streams; i++) {
                AVStream *st = is->streams[i];
                if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                    continue;
                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
            }
            if (new_start_time > is->start_time) {
                av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
                ifile->ts_offset = -new_start_time;
            }
        }

3597 3598
        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
        stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3599
        ist->wrap_correction_done = 1;
3600 3601 3602

        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3603 3604
            ist->wrap_correction_done = 0;
        }
3605 3606
        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3607 3608 3609 3610
            ist->wrap_correction_done = 0;
        }
    }

3611
    /* add the stream-global side data to the first packet */
3612
    if (ist->nb_packets == 1) {
3613 3614
        if (ist->st->nb_side_data)
            av_packet_split_side_data(&pkt);
3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627
        for (i = 0; i < ist->st->nb_side_data; i++) {
            AVPacketSideData *src_sd = &ist->st->side_data[i];
            uint8_t *dst_data;

            if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
                continue;

            dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
            if (!dst_data)
                exit_program(1);

            memcpy(dst_data, src_sd->data, src_sd->size);
        }
3628
    }
3629

3630 3631 3632 3633 3634 3635 3636 3637 3638 3639
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts *= ist->ts_scale;
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts *= ist->ts_scale;

3640 3641 3642
    if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
         ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
        pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3643 3644 3645
        && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ifile->last_ts;
3646 3647
        if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
            delta >  1LL*dts_delta_threshold*AV_TIME_BASE){
3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
            ifile->ts_offset -= delta;
            av_log(NULL, AV_LOG_DEBUG,
                   "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                   delta, ifile->ts_offset);
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
        }
    }

3658 3659 3660
    if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
         ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
         pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3661 3662 3663 3664
        !copy_ts) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ist->next_dts;
        if (is->iformat->flags & AVFMT_TS_DISCONT) {
3665
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3666
                delta >  1LL*dts_delta_threshold*AV_TIME_BASE ||
3667 3668 3669 3670 3671 3672 3673 3674 3675
                pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
                ifile->ts_offset -= delta;
                av_log(NULL, AV_LOG_DEBUG,
                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                       delta, ifile->ts_offset);
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            }
3676 3677
        } else {
            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3678
                 delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
3679 3680 3681 3682 3683 3684 3685
                av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                pkt.dts = AV_NOPTS_VALUE;
            }
            if (pkt.pts != AV_NOPTS_VALUE){
                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
                delta   = pkt_pts - ist->next_dts;
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3686
                     delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
3687 3688 3689 3690 3691 3692 3693
                    av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                    pkt.pts = AV_NOPTS_VALUE;
                }
            }
        }
    }

3694 3695 3696
    if (pkt.dts != AV_NOPTS_VALUE)
        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);

3697
    if (debug_ts) {
3698
        av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3699
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3700 3701 3702 3703
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3704 3705
    }

3706 3707
    sub2video_heartbeat(ist, pkt.pts);

3708
    ret = process_input_packet(ist, &pkt);
3709
    if (ret < 0) {
3710
        av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
3711
               ist->file_index, ist->st->index, av_err2str(ret));
3712
        if (exit_on_error)
3713
            exit_program(1);
3714 3715 3716 3717 3718 3719 3720 3721
    }

discard_packet:
    av_free_packet(&pkt);

    return 0;
}

3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
/**
 * Perform a step of transcoding for the specified filter graph.
 *
 * @param[in]  graph     filter graph to consider
 * @param[out] best_ist  input stream where a frame would allow to continue
 * @return  0 for success, <0 for error
 */
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
{
    int i, ret;
    int nb_requests, nb_requests_max = 0;
    InputFilter *ifilter;
    InputStream *ist;

    *best_ist = NULL;
    ret = avfilter_graph_request_oldest(graph->graph);
    if (ret >= 0)
        return reap_filters();

    if (ret == AVERROR_EOF) {
        ret = reap_filters();
        for (i = 0; i < graph->nb_outputs; i++)
            close_output_stream(graph->outputs[i]->ost);
        return ret;
    }
    if (ret != AVERROR(EAGAIN))
        return ret;

    for (i = 0; i < graph->nb_inputs; i++) {
        ifilter = graph->inputs[i];
        ist = ifilter->ist;
        if (input_files[ist->file_index]->eagain ||
            input_files[ist->file_index]->eof_reached)
            continue;
        nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
        if (nb_requests > nb_requests_max) {
            nb_requests_max = nb_requests;
            *best_ist = ist;
        }
    }

    if (!*best_ist)
        for (i = 0; i < graph->nb_outputs; i++)
            graph->outputs[i]->ost->unavailable = 1;

    return 0;
}

/**
 * Run a single step of transcoding.
 *
 * @return  0 for success, <0 for error
 */
static int transcode_step(void)
{
    OutputStream *ost;
    InputStream  *ist;
    int ret;

    ost = choose_output();
    if (!ost) {
        if (got_eagain()) {
            reset_eagain();
            av_usleep(10000);
            return 0;
        }
        av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
        return AVERROR_EOF;
    }

    if (ost->filter) {
        if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
            return ret;
        if (!ist)
            return 0;
    } else {
        av_assert0(ost->source_index >= 0);
        ist = input_streams[ost->source_index];
    }

    ret = process_input(ist->file_index);
    if (ret == AVERROR(EAGAIN)) {
        if (input_files[ist->file_index]->eagain)
            ost->unavailable = 1;
        return 0;
    }
    if (ret < 0)
        return ret == AVERROR_EOF ? 0 : ret;

    return reap_filters();
}

3814 3815 3816
/*
 * The following code is the main loop of the file converter
 */
3817
static int transcode(void)
3818
{
3819
    int ret, i;
3820
    AVFormatContext *os;
3821
    OutputStream *ost;
3822 3823
    InputStream *ist;
    int64_t timer_start;
Fabrice Bellard's avatar
Fabrice Bellard committed
3824

3825
    ret = transcode_init();
3826 3827 3828
    if (ret < 0)
        goto fail;

3829 3830
    if (stdin_interaction) {
        av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3831 3832
    }

3833
    timer_start = av_gettime_relative();
Fabrice Bellard's avatar
Fabrice Bellard committed
3834

3835
#if HAVE_PTHREADS
3836 3837 3838
    if ((ret = init_input_threads()) < 0)
        goto fail;
#endif
Fabrice Bellard's avatar
Fabrice Bellard committed
3839

Anton Khirnov's avatar
Anton Khirnov committed
3840
    while (!received_sigterm) {
3841
        int64_t cur_time= av_gettime_relative();
3842

3843 3844 3845 3846
        /* if 'q' pressed, exits */
        if (stdin_interaction)
            if (check_keyboard_interaction(cur_time) < 0)
                break;
3847

3848
        /* check if there's any stream where output is still needed */
3849 3850
        if (!need_output()) {
            av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3851
            break;
3852
        }
3853

3854
        ret = transcode_step();
3855
        if (ret < 0) {
3856
            if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3857
                continue;
3858 3859 3860
            } else {
                char errbuf[128];
                av_strerror(ret, errbuf, sizeof(errbuf));
3861

3862 3863 3864
                av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
                break;
            }
3865
        }
3866 3867

        /* dump report by using the output first video and audio streams */
3868
        print_report(0, timer_start, cur_time);
Fabrice Bellard's avatar
Fabrice Bellard committed
3869
    }
3870
#if HAVE_PTHREADS
3871 3872
    free_input_threads();
#endif
Fabrice Bellard's avatar
Fabrice Bellard committed
3873

3874 3875
    /* at the end of stream, we must flush the decoder buffers */
    for (i = 0; i < nb_input_streams; i++) {
3876
        ist = input_streams[i];
3877
        if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3878
            process_input_packet(ist, NULL);
3879
        }
3880
    }
3881
    flush_encoders();
3882

3883
    term_exit();
Fabrice Bellard's avatar
Fabrice Bellard committed
3884

3885
    /* write the trailer if needed and close file */
Aneesh Dogra's avatar
Aneesh Dogra committed
3886
    for (i = 0; i < nb_output_files; i++) {
3887
        os = output_files[i]->ctx;
3888
        av_write_trailer(os);
3889
    }
3890

3891
    /* dump report by using the first video and audio streams */
3892
    print_report(1, timer_start, av_gettime_relative());
3893

3894
    /* close each encoder */
3895
    for (i = 0; i < nb_output_streams; i++) {
3896
        ost = output_streams[i];
3897
        if (ost->encoding_needed) {
3898
            av_freep(&ost->enc_ctx->stats_in);
3899 3900 3901
        }
    }

3902 3903
    /* close each decoder */
    for (i = 0; i < nb_input_streams; i++) {
3904
        ist = input_streams[i];
3905
        if (ist->decoding_needed) {
3906
            avcodec_close(ist->dec_ctx);
3907
            if (ist->hwaccel_uninit)
3908
                ist->hwaccel_uninit(ist->dec_ctx);
3909 3910
        }
    }
3911

3912 3913 3914 3915
    /* finished ! */
    ret = 0;

 fail:
3916
#if HAVE_PTHREADS
3917 3918
    free_input_threads();
#endif
3919

3920 3921
    if (output_streams) {
        for (i = 0; i < nb_output_streams; i++) {
3922
            ost = output_streams[i];
3923 3924 3925 3926
            if (ost) {
                if (ost->logfile) {
                    fclose(ost->logfile);
                    ost->logfile = NULL;
3927
                }
3928
                av_freep(&ost->forced_kf_pts);
3929
                av_freep(&ost->apad);
3930
                av_freep(&ost->disposition);
3931
                av_dict_free(&ost->encoder_opts);
3932
                av_dict_free(&ost->swr_opts);
3933
                av_dict_free(&ost->resample_opts);
3934
                av_dict_free(&ost->bsf_args);
3935 3936 3937
            }
        }
    }
3938
    return ret;
Fabrice Bellard's avatar
Fabrice Bellard committed
3939 3940
}

3941

3942
static int64_t getutime(void)
3943
{
3944 3945
#if HAVE_GETRUSAGE
    struct rusage rusage;
3946

3947 3948 3949 3950 3951 3952 3953 3954 3955
    getrusage(RUSAGE_SELF, &rusage);
    return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
#elif HAVE_GETPROCESSTIMES
    HANDLE proc;
    FILETIME c, e, k, u;
    proc = GetCurrentProcess();
    GetProcessTimes(proc, &c, &e, &k, &u);
    return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
#else
3956
    return av_gettime_relative();
3957
#endif
3958
}
3959

3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
static int64_t getmaxrss(void)
{
#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
    struct rusage rusage;
    getrusage(RUSAGE_SELF, &rusage);
    return (int64_t)rusage.ru_maxrss * 1024;
#elif HAVE_GETPROCESSMEMORYINFO
    HANDLE proc;
    PROCESS_MEMORY_COUNTERS memcounters;
    proc = GetCurrentProcess();
    memcounters.cb = sizeof(memcounters);
    GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
    return memcounters.PeakPagefileUsage;
#else
    return 0;
#endif
}

3978
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
3979 3980 3981
{
}

3982 3983
int main(int argc, char **argv)
{
3984
    int ret;
3985 3986
    int64_t ti;

3987
    register_exit(ffmpeg_cleanup);
3988

3989 3990
    setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */

3991
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3992
    parse_loglevel(argc, argv, options);
3993

3994
    if(argc>1 && !strcmp(argv[1], "-d")){
3995
        run_as_daemon=1;
3996 3997 3998 3999 4000
        av_log_set_callback(log_callback_null);
        argc--;
        argv++;
    }

Luca Abeni's avatar
Luca Abeni committed
4001
    avcodec_register_all();
4002
#if CONFIG_AVDEVICE
Luca Abeni's avatar
Luca Abeni committed
4003
    avdevice_register_all();
4004 4005
#endif
    avfilter_register_all();
4006
    av_register_all();
4007
    avformat_network_init();
4008

4009
    show_banner(argc, argv, options);
4010

4011 4012
    term_init();

4013
    /* parse options and open all input/output files */
4014
    ret = ffmpeg_parse_options(argc, argv);
4015
    if (ret < 0)
4016
        exit_program(1);
4017

4018
    if (nb_output_files <= 0 && nb_input_files == 0) {
4019
        show_usage();
4020
        av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4021
        exit_program(1);
4022
    }
4023

4024 4025
    /* file converter / grab */
    if (nb_output_files <= 0) {
4026
        av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4027
        exit_program(1);
4028 4029
    }

4030 4031
//     if (nb_input_files == 0) {
//         av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4032
//         exit_program(1);
4033
//     }
4034

4035
    current_time = ti = getutime();
4036
    if (transcode() < 0)
4037
        exit_program(1);
4038 4039
    ti = getutime() - ti;
    if (do_benchmark) {
4040
        printf("bench: utime=%0.3fs\n", ti / 1000000.0);
4041
    }
4042 4043
    av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
           decode_error_stat[0], decode_error_stat[1]);
4044
    if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4045
        exit_program(69);
4046

4047 4048
    exit_program(received_nb_signals ? 255 : main_return_code);
    return main_return_code;
4049
}