mux.c 45.1 KB
Newer Older
1
/*
2
 * muxing functions for use within FFmpeg
3 4
 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8 9 10 11
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13 14 15 16 17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19 20 21 22 23 24 25 26 27 28 29
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "libavcodec/internal.h"
#include "libavcodec/bytestream.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/pixdesc.h"
30
#include "libavutil/timestamp.h"
31 32 33 34
#include "metadata.h"
#include "id3v2.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
35
#include "libavutil/internal.h"
36 37 38 39 40 41 42 43 44 45 46 47 48
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include "riff.h"
#include "audiointerleave.h"
#include "url.h"
#include <stdarg.h>
#if CONFIG_NETWORK
#include "network.h"
#endif

/**
 * @file
49
 * muxing functions for use within libavformat
50 51 52 53 54 55 56 57 58 59 60 61 62 63
 */

/* fraction handling */

/**
 * f = val + (num / den) + 0.5.
 *
 * 'num' is normalized so that it is such as 0 <= num < den.
 *
 * @param f fractional number
 * @param val integer value
 * @param num must be >= 0
 * @param den must be >= 1
 */
64
static void frac_init(FFFrac *f, int64_t val, int64_t num, int64_t den)
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
{
    num += (den >> 1);
    if (num >= den) {
        val += num / den;
        num  = num % den;
    }
    f->val = val;
    f->num = num;
    f->den = den;
}

/**
 * Fractional addition to f: f = f + (incr / f->den).
 *
 * @param f fractional number
 * @param incr increment, can be positive or negative
 */
82
static void frac_add(FFFrac *f, int64_t incr)
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
{
    int64_t num, den;

    num = f->num + incr;
    den = f->den;
    if (num < 0) {
        f->val += num / den;
        num     = num % den;
        if (num < 0) {
            num += den;
            f->val--;
        }
    } else if (num >= den) {
        f->val += num / den;
        num     = num % den;
    }
    f->num = num;
}

102
AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision)
103 104 105 106
{
    AVRational q;
    int j;

107 108
    q = st->time_base;

109
    for (j=2; j<14; j+= 1+(j>2))
110
        while (q.den / q.num < min_precision && q.num % j == 0)
111
            q.num /= j;
112
    while (q.den / q.num < min_precision && q.den < (1<<24))
113 114 115 116 117
        q.den <<= 1;

    return q;
}

118 119
enum AVChromaLocation ff_choose_chroma_location(AVFormatContext *s, AVStream *st)
{
120 121
    AVCodecParameters *par = st->codecpar;
    const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(par->format);
122

123 124
    if (par->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
        return par->chroma_location;
125 126 127 128 129

    if (pix_desc) {
        if (pix_desc->log2_chroma_h == 0) {
            return AVCHROMA_LOC_TOPLEFT;
        } else if (pix_desc->log2_chroma_w == 1 && pix_desc->log2_chroma_h == 1) {
130 131
            if (par->field_order == AV_FIELD_UNKNOWN || par->field_order == AV_FIELD_PROGRESSIVE) {
                switch (par->codec_id) {
132 133 134 135
                case AV_CODEC_ID_MJPEG:
                case AV_CODEC_ID_MPEG1VIDEO: return AVCHROMA_LOC_CENTER;
                }
            }
136 137
            if (par->field_order == AV_FIELD_UNKNOWN || par->field_order != AV_FIELD_PROGRESSIVE) {
                switch (par->codec_id) {
138 139 140 141 142 143 144 145 146 147
                case AV_CODEC_ID_MPEG2VIDEO: return AVCHROMA_LOC_LEFT;
                }
            }
        }
    }

    return AVCHROMA_LOC_UNSPECIFIED;

}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
                                   const char *format, const char *filename)
{
    AVFormatContext *s = avformat_alloc_context();
    int ret = 0;

    *avctx = NULL;
    if (!s)
        goto nomem;

    if (!oformat) {
        if (format) {
            oformat = av_guess_format(format, NULL, NULL);
            if (!oformat) {
                av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
                ret = AVERROR(EINVAL);
                goto error;
            }
        } else {
            oformat = av_guess_format(NULL, filename, NULL);
            if (!oformat) {
                ret = AVERROR(EINVAL);
                av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
                       filename);
                goto error;
            }
        }
    }

    s->oformat = oformat;
    if (s->oformat->priv_data_size > 0) {
        s->priv_data = av_mallocz(s->oformat->priv_data_size);
        if (!s->priv_data)
            goto nomem;
        if (s->oformat->priv_class) {
            *(const AVClass**)s->priv_data= s->oformat->priv_class;
            av_opt_set_defaults(s->priv_data);
        }
    } else
        s->priv_data = NULL;

189
    if (filename) {
190 191
#if FF_API_FORMAT_FILENAME
FF_DISABLE_DEPRECATION_WARNINGS
192
        av_strlcpy(s->filename, filename, sizeof(s->filename));
193 194
FF_ENABLE_DEPRECATION_WARNINGS
#endif
195 196 197 198
        if (!(s->url = av_strdup(filename)))
            goto nomem;

    }
199 200 201 202 203 204 205 206 207 208
    *avctx = s;
    return 0;
nomem:
    av_log(s, AV_LOG_ERROR, "Out of memory\n");
    ret = AVERROR(ENOMEM);
error:
    avformat_free_context(s);
    return ret;
}

209 210 211 212 213
static int validate_codec_tag(AVFormatContext *s, AVStream *st)
{
    const AVCodecTag *avctag;
    int n;
    enum AVCodecID id = AV_CODEC_ID_NONE;
214
    int64_t tag  = -1;
215 216 217 218 219 220 221 222 223 224

    /**
     * Check that tag + id is in the table
     * If neither is in the table -> OK
     * If tag is in the table with another id -> FAIL
     * If id is in the table with another tag -> FAIL unless strict < normal
     */
    for (n = 0; s->oformat->codec_tag[n]; n++) {
        avctag = s->oformat->codec_tag[n];
        while (avctag->id != AV_CODEC_ID_NONE) {
225
            if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codecpar->codec_tag)) {
226
                id = avctag->id;
227
                if (id == st->codecpar->codec_id)
228 229
                    return 1;
            }
230
            if (avctag->id == st->codecpar->codec_id)
231 232 233 234 235 236
                tag = avctag->tag;
            avctag++;
        }
    }
    if (id != AV_CODEC_ID_NONE)
        return 0;
237
    if (tag >= 0 && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
238 239 240 241
        return 0;
    return 1;
}

242 243

static int init_muxer(AVFormatContext *s, AVDictionary **options)
244 245 246 247
{
    int ret = 0, i;
    AVStream *st;
    AVDictionary *tmp = NULL;
248
    AVCodecParameters *par = NULL;
249
    AVOutputFormat *of = s->oformat;
250
    const AVCodecDescriptor *desc;
251
    AVDictionaryEntry *e;
252 253 254

    if (options)
        av_dict_copy(&tmp, *options, 0);
255

256 257
    if ((ret = av_opt_set_dict(s, &tmp)) < 0)
        goto fail;
258
    if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
259
        (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
260
        goto fail;
261

262 263
#if FF_API_FORMAT_FILENAME
FF_DISABLE_DEPRECATION_WARNINGS
264
    if (!s->url && !(s->url = av_strdup(s->filename))) {
265 266 267 268
FF_ENABLE_DEPRECATION_WARNINGS
#else
    if (!s->url && !(s->url = av_strdup(""))) {
#endif
269 270 271 272
        ret = AVERROR(ENOMEM);
        goto fail;
    }

273
#if FF_API_LAVF_AVCTX
274
FF_DISABLE_DEPRECATION_WARNINGS
275 276 277 278 279 280 281 282
    if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT) {
        if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
            av_log(s, AV_LOG_WARNING,
                   "The AVFormatContext is not in set to bitexact mode, only "
                   "the AVCodecContext. If this is not intended, set "
                   "AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
        }
    }
283
FF_ENABLE_DEPRECATION_WARNINGS
284 285
#endif

286
    // some sanity checks
287
    if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
288
        av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
289 290 291 292 293
        ret = AVERROR(EINVAL);
        goto fail;
    }

    for (i = 0; i < s->nb_streams; i++) {
294 295
        st  = s->streams[i];
        par = st->codecpar;
296

297 298 299 300 301 302 303 304 305 306
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
        if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN &&
            st->codec->codec_type    != AVMEDIA_TYPE_UNKNOWN) {
            av_log(s, AV_LOG_WARNING, "Using AVStream.codec to pass codec "
                   "parameters to muxers is deprecated, use AVStream.codecpar "
                   "instead.\n");
            ret = avcodec_parameters_from_context(st->codecpar, st->codec);
            if (ret < 0)
                goto fail;
307 308 309 310 311 312
        }
FF_ENABLE_DEPRECATION_WARNINGS
#endif

        if (!st->time_base.num) {
            /* fall back on the default timebase values */
313 314
            if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
                avpriv_set_pts_info(st, 64, 1, par->sample_rate);
315 316 317 318
            else
                avpriv_set_pts_info(st, 33, 1, 90000);
        }

319
        switch (par->codec_type) {
320
        case AVMEDIA_TYPE_AUDIO:
321
            if (par->sample_rate <= 0) {
322 323 324 325
                av_log(s, AV_LOG_ERROR, "sample rate not set\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
326 327 328
            if (!par->block_align)
                par->block_align = par->channels *
                                   av_get_bits_per_sample(par->codec_id) >> 3;
329 330
            break;
        case AVMEDIA_TYPE_VIDEO:
331
            if ((par->width <= 0 || par->height <= 0) &&
332
                !(of->flags & AVFMT_NODIMENSIONS)) {
333 334 335 336
                av_log(s, AV_LOG_ERROR, "dimensions not set\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
337 338
            if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
                && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
339
            ) {
340 341
                if (st->sample_aspect_ratio.num != 0 &&
                    st->sample_aspect_ratio.den != 0 &&
342
                    par->sample_aspect_ratio.num != 0 &&
343
                    par->sample_aspect_ratio.den != 0) {
344
                    av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
345 346
                           "(%d/%d) and encoder layer (%d/%d)\n",
                           st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
347 348
                           par->sample_aspect_ratio.num,
                           par->sample_aspect_ratio.den);
349 350 351
                    ret = AVERROR(EINVAL);
                    goto fail;
                }
352 353 354 355
            }
            break;
        }

356
        desc = avcodec_descriptor_get(par->codec_id);
357 358 359
        if (desc && desc->props & AV_CODEC_PROP_REORDER)
            st->internal->reorder = 1;

360
        if (of->codec_tag) {
361 362 363 364
            if (   par->codec_tag
                && par->codec_id == AV_CODEC_ID_RAWVIDEO
                && (   av_codec_get_tag(of->codec_tag, par->codec_id) == 0
                    || av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
365
                && !validate_codec_tag(s, st)) {
366
                // the current rawvideo encoding system ends up setting
367
                // the wrong codec_tag for avi/mov, we override it here
368
                par->codec_tag = 0;
369
            }
370
            if (par->codec_tag) {
371
                if (!validate_codec_tag(s, st)) {
372
                    const uint32_t otag = av_codec_get_tag(s->oformat->codec_tag, par->codec_id);
373
                    av_log(s, AV_LOG_ERROR,
374 375
                           "Tag %s incompatible with output codec id '%d' (%s)\n",
                           av_fourcc2str(par->codec_tag), par->codec_id, av_fourcc2str(otag));
376 377 378 379
                    ret = AVERROR_INVALIDDATA;
                    goto fail;
                }
            } else
380
                par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
381 382
        }

383
        if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
384
            s->internal->nb_interleaved_streams++;
385 386
    }

387 388
    if (!s->priv_data && of->priv_data_size > 0) {
        s->priv_data = av_mallocz(of->priv_data_size);
389 390 391 392
        if (!s->priv_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
393 394
        if (of->priv_class) {
            *(const AVClass **)s->priv_data = of->priv_class;
395
            av_opt_set_defaults(s->priv_data);
396
            if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
397 398 399 400 401
                goto fail;
        }
    }

    /* set muxer identification string */
402
    if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
403
        av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
404 405
    } else {
        av_dict_set(&s->metadata, "encoder", NULL, 0);
406 407
    }

408 409 410 411
    for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
        av_dict_set(&s->metadata, e->key, NULL, 0);
    }

412 413 414
    if (options) {
         av_dict_free(options);
         *options = tmp;
415 416
    }

417 418 419 420 421 422 423
    if (s->oformat->init) {
        if ((ret = s->oformat->init(s)) < 0) {
            if (s->oformat->deinit)
                s->oformat->deinit(s);
            return ret;
        }
        return ret == 0;
424 425
    }

426 427 428 429 430 431 432 433 434 435 436 437
    return 0;

fail:
    av_dict_free(&tmp);
    return ret;
}

static int init_pts(AVFormatContext *s)
{
    int i;
    AVStream *st;

438 439 440 441 442
    /* init PTS generation */
    for (i = 0; i < s->nb_streams; i++) {
        int64_t den = AV_NOPTS_VALUE;
        st = s->streams[i];

443
        switch (st->codecpar->codec_type) {
444
        case AVMEDIA_TYPE_AUDIO:
445
            den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
446 447
            break;
        case AVMEDIA_TYPE_VIDEO:
448
            den = (int64_t)st->time_base.num * st->time_base.den;
449 450 451 452
            break;
        default:
            break;
        }
453

454 455 456
        if (!st->internal->priv_pts)
            st->internal->priv_pts = av_mallocz(sizeof(*st->internal->priv_pts));
        if (!st->internal->priv_pts)
457 458
            return AVERROR(ENOMEM);

459
        if (den != AV_NOPTS_VALUE) {
460 461 462
            if (den <= 0)
                return AVERROR_INVALIDDATA;

463
            frac_init(st->internal->priv_pts, 0, 0, den);
464 465 466
        }
    }

467 468 469 470 471 472 473 474
    if (s->avoid_negative_ts < 0) {
        av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
        if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
            s->avoid_negative_ts = 0;
        } else
            s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
    }

475 476 477
    return 0;
}

478 479 480 481 482 483 484 485 486 487
static void flush_if_needed(AVFormatContext *s)
{
    if (s->pb && s->pb->error >= 0) {
        if (s->flush_packets == 1 || s->flags & AVFMT_FLAG_FLUSH_PACKETS)
            avio_flush(s->pb);
        else if (s->flush_packets && !(s->oformat->flags & AVFMT_NOFILE))
            avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT);
    }
}

488
int avformat_init_output(AVFormatContext *s, AVDictionary **options)
489 490 491
{
    int ret = 0;

492
    if ((ret = init_muxer(s, options)) < 0)
493 494
        return ret;

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
    s->internal->initialized = 1;
    s->internal->streams_initialized = ret;

    if (s->oformat->init && ret) {
        if ((ret = init_pts(s)) < 0)
            return ret;

        return AVSTREAM_INIT_IN_INIT_OUTPUT;
    }

    return AVSTREAM_INIT_IN_WRITE_HEADER;
}

int avformat_write_header(AVFormatContext *s, AVDictionary **options)
{
    int ret = 0;
    int already_initialized = s->internal->initialized;
    int streams_already_initialized = s->internal->streams_initialized;

    if (!already_initialized)
        if ((ret = avformat_init_output(s, options)) < 0)
            return ret;

518 519 520 521 522 523
    if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
        avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
    if (s->oformat->write_header) {
        ret = s->oformat->write_header(s);
        if (ret >= 0 && s->pb && s->pb->error < 0)
            ret = s->pb->error;
524
        if (ret < 0)
525
            goto fail;
526
        flush_if_needed(s);
527
    }
528 529
    if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
        avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);
530

531 532 533
    if (!s->internal->streams_initialized) {
        if ((ret = init_pts(s)) < 0)
            goto fail;
534 535
    }

536
    return streams_already_initialized;
537 538 539 540 541

fail:
    if (s->oformat->deinit)
        s->oformat->deinit(s);
    return ret;
542 543
}

544 545 546 547 548 549 550 551 552
#define AV_PKT_FLAG_UNCODED_FRAME 0x2000

/* Note: using sizeof(AVFrame) from outside lavu is unsafe in general, but
   it is only being used internally to this file as a consistency check.
   The value is chosen to be very unlikely to appear on its own and to cause
   immediate failure if used anywhere as a real size. */
#define UNCODED_FRAME_PACKET_SIZE (INT_MIN / 3 * 2 + (int)sizeof(AVFrame))


553
#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
554
FF_DISABLE_DEPRECATION_WARNINGS
555
//FIXME merge with compute_pkt_fields
556
static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
557
{
558
    int delay = FFMAX(st->codecpar->video_delay, st->internal->avctx->max_b_frames > 0);
559
    int num, den, i;
560
    int frame_size;
561

562 563
    if (!s->internal->missing_ts_warning &&
        !(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
564
        (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC) || (st->disposition & AV_DISPOSITION_TIMED_THUMBNAILS)) &&
565 566 567 568 569 570 571 572
        (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
        av_log(s, AV_LOG_WARNING,
               "Timestamps are unset in a packet for stream %d. "
               "This is deprecated and will stop working in the future. "
               "Fix your code to set the timestamps properly\n", st->index);
        s->internal->missing_ts_warning = 1;
    }

573
    if (s->debug & FF_FDEBUG_TS)
574
        av_log(s, AV_LOG_DEBUG, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
575
            av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
576

577
    if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
578
        av_log(s, AV_LOG_WARNING, "Packet with invalid duration %"PRId64" in stream %d\n",
579 580 581 582
               pkt->duration, pkt->stream_index);
        pkt->duration = 0;
    }

583 584
    /* duration field */
    if (pkt->duration == 0) {
585
        ff_compute_frame_duration(s, &num, &den, st, NULL, pkt);
586 587 588 589 590 591 592 593 594 595
        if (den && num) {
            pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
        }
    }

    if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
        pkt->pts = pkt->dts;

    //XXX/FIXME this is a temporary hack until all encoders output pts
    if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
596 597 598 599 600
        static int warned;
        if (!warned) {
            av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
            warned = 1;
        }
601 602
        pkt->dts =
//        pkt->pts= st->cur_dts;
603
            pkt->pts = st->internal->priv_pts->val;
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
    }

    //calculate dts from pts
    if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
        st->pts_buffer[0] = pkt->pts;
        for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
            st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
        for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
            FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);

        pkt->dts = st->pts_buffer[0];
    }

    if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
        ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
619 620
          st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
          st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
621 622
          st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
        av_log(s, AV_LOG_ERROR,
623 624
               "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
               st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
625 626 627
        return AVERROR(EINVAL);
    }
    if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
628
        av_log(s, AV_LOG_ERROR,
629 630
               "pts (%s) < dts (%s) in stream %d\n",
               av_ts2str(pkt->pts), av_ts2str(pkt->dts),
631
               st->index);
632 633 634
        return AVERROR(EINVAL);
    }

635
    if (s->debug & FF_FDEBUG_TS)
636
        av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n",
637
            av_ts2str(pkt->pts), av_ts2str(pkt->dts));
638

639
    st->cur_dts = pkt->dts;
640
    st->internal->priv_pts->val = pkt->dts;
641 642

    /* update pts */
643
    switch (st->codecpar->codec_type) {
644
    case AVMEDIA_TYPE_AUDIO:
645 646
        frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
                     ((AVFrame *)pkt->data)->nb_samples :
647
                     av_get_audio_frame_duration(st->codec, pkt->size);
648 649 650 651

        /* HACK/FIXME, we skip the initial 0 size packets as they are most
         * likely equal to the encoder delay, but it would be better if we
         * had the real timestamps from the encoder */
652 653
        if (frame_size >= 0 && (pkt->size || st->internal->priv_pts->num != st->internal->priv_pts->den >> 1 || st->internal->priv_pts->val)) {
            frac_add(st->internal->priv_pts, (int64_t)st->time_base.den * frame_size);
654 655 656
        }
        break;
    case AVMEDIA_TYPE_VIDEO:
657
        frac_add(st->internal->priv_pts, (int64_t)st->time_base.den * st->time_base.num);
658 659 660 661
        break;
    }
    return 0;
}
662
FF_ENABLE_DEPRECATION_WARNINGS
663
#endif
664

665
/**
666 667 668
 * Make timestamps non negative, move side data from payload to internal struct, call muxer, and restore
 * sidedata.
 *
669 670 671 672
 * FIXME: this function should NEVER get undefined pts/dts beside when the
 * AVFMT_NOTIMESTAMPS is set.
 * Those additional safety checks should be dropped once the correct checks
 * are set in the callers.
673
 */
674
static int write_packet(AVFormatContext *s, AVPacket *pkt)
675
{
676
    int ret;
677 678 679 680
    int64_t pts_backup, dts_backup;

    pts_backup = pkt->pts;
    dts_backup = pkt->dts;
681

682 683
    // If the timestamp offsetting below is adjusted, adjust
    // ff_interleaved_peek similarly.
684 685 686 687 688 689 690 691 692 693
    if (s->output_ts_offset) {
        AVStream *st = s->streams[pkt->stream_index];
        int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);

        if (pkt->dts != AV_NOPTS_VALUE)
            pkt->dts += offset;
        if (pkt->pts != AV_NOPTS_VALUE)
            pkt->pts += offset;
    }

694 695
    if (s->avoid_negative_ts > 0) {
        AVStream *st = s->streams[pkt->stream_index];
696
        int64_t offset = st->mux_ts_offset;
697
        int64_t ts = s->internal->avoid_negative_ts_use_pts ? pkt->pts : pkt->dts;
698

699 700 701
        if (s->internal->offset == AV_NOPTS_VALUE && ts != AV_NOPTS_VALUE &&
            (ts < 0 || s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)) {
            s->internal->offset = -ts;
702
            s->internal->offset_timebase = st->time_base;
703
        }
704

705
        if (s->internal->offset != AV_NOPTS_VALUE && !offset) {
706
            offset = st->mux_ts_offset =
707 708
                av_rescale_q_rnd(s->internal->offset,
                                 s->internal->offset_timebase,
709 710 711 712 713
                                 st->time_base,
                                 AV_ROUND_UP);
        }

        if (pkt->dts != AV_NOPTS_VALUE)
714
            pkt->dts += offset;
715
        if (pkt->pts != AV_NOPTS_VALUE)
716
            pkt->pts += offset;
717

718 719 720 721 722
        if (s->internal->avoid_negative_ts_use_pts) {
            if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < 0) {
                av_log(s, AV_LOG_WARNING, "failed to avoid negative "
                    "pts %s in stream %d.\n"
                    "Try -avoid_negative_ts 1 as a possible workaround.\n",
723
                    av_ts2str(pkt->pts),
724 725 726 727 728 729 730 731 732 733 734 735 736 737
                    pkt->stream_index
                );
            }
        } else {
            av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0 || s->max_interleave_delta > 0);
            if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
                av_log(s, AV_LOG_WARNING,
                    "Packets poorly interleaved, failed to avoid negative "
                    "timestamp %s in stream %d.\n"
                    "Try -max_interleave_delta 0 as a possible workaround.\n",
                    av_ts2str(pkt->dts),
                    pkt->stream_index
                );
            }
738
        }
739 740
    }

741 742 743 744 745 746 747 748
    if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
        AVFrame *frame = (AVFrame *)pkt->data;
        av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
        ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, &frame, 0);
        av_frame_free(&frame);
    } else {
        ret = s->oformat->write_packet(s, pkt);
    }
749

750
    if (s->pb && ret >= 0) {
751
        flush_if_needed(s);
752 753 754
        if (s->pb->error < 0)
            ret = s->pb->error;
    }
755

756 757 758 759 760
    if (ret < 0) {
        pkt->pts = pts_backup;
        pkt->dts = dts_backup;
    }

761 762 763
    return ret;
}

764 765 766 767 768 769 770 771 772 773 774
static int check_packet(AVFormatContext *s, AVPacket *pkt)
{
    if (!pkt)
        return 0;

    if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
        av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
               pkt->stream_index);
        return AVERROR(EINVAL);
    }

775
    if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
776 777 778 779 780 781 782
        av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
        return AVERROR(EINVAL);
    }

    return 0;
}

783
static int prepare_input_packet(AVFormatContext *s, AVPacket *pkt)
784 785 786
{
    int ret;

787 788 789 790
    ret = check_packet(s, pkt);
    if (ret < 0)
        return ret;

791
#if !FF_API_COMPUTE_PKT_FIELDS2 || !FF_API_LAVF_AVCTX
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
    /* sanitize the timestamps */
    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
        AVStream *st = s->streams[pkt->stream_index];

        /* when there is no reordering (so dts is equal to pts), but
         * only one of them is set, set the other as well */
        if (!st->internal->reorder) {
            if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
                pkt->pts = pkt->dts;
            if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
                pkt->dts = pkt->pts;
        }

        /* check that the timestamps are set */
        if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
            av_log(s, AV_LOG_ERROR,
                   "Timestamps are unset in a packet for stream %d\n", st->index);
            return AVERROR(EINVAL);
        }

        /* check that the dts are increasing (or at least non-decreasing,
         * if the format allows it */
        if (st->cur_dts != AV_NOPTS_VALUE &&
            ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) ||
             st->cur_dts > pkt->dts)) {
            av_log(s, AV_LOG_ERROR,
                   "Application provided invalid, non monotonically increasing "
                   "dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
                   st->index, st->cur_dts, pkt->dts);
            return AVERROR(EINVAL);
        }

        if (pkt->pts < pkt->dts) {
            av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
                   pkt->pts, pkt->dts, st->index);
            return AVERROR(EINVAL);
        }
    }
#endif

    return 0;
}

835 836 837 838
static int do_packet_auto_bsf(AVFormatContext *s, AVPacket *pkt) {
    AVStream *st = s->streams[pkt->stream_index];
    int i, ret;

839 840 841
    if (!(s->flags & AVFMT_FLAG_AUTO_BSF))
        return 1;

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
    if (s->oformat->check_bitstream) {
        if (!st->internal->bitstream_checked) {
            if ((ret = s->oformat->check_bitstream(s, pkt)) < 0)
                return ret;
            else if (ret == 1)
                st->internal->bitstream_checked = 1;
        }
    }

    for (i = 0; i < st->internal->nb_bsfcs; i++) {
        AVBSFContext *ctx = st->internal->bsfcs[i];
        // TODO: when any bitstream filter requires flushing at EOF, we'll need to
        // flush each stream's BSF chain on write_trailer.
        if ((ret = av_bsf_send_packet(ctx, pkt)) < 0) {
            av_log(ctx, AV_LOG_ERROR,
857
                    "Failed to send packet to filter %s for stream %d\n",
858 859 860 861 862 863 864 865 866 867
                    ctx->filter->name, pkt->stream_index);
            return ret;
        }
        // TODO: when any automatically-added bitstream filter is generating multiple
        // output packets for a single input one, we'll need to call this in a loop
        // and write each output packet.
        if ((ret = av_bsf_receive_packet(ctx, pkt)) < 0) {
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                return 0;
            av_log(ctx, AV_LOG_ERROR,
868
                    "Failed to send packet to filter %s for stream %d\n",
869
                    ctx->filter->name, pkt->stream_index);
870 871 872
            if (s->error_recognition & AV_EF_EXPLODE)
                return ret;
            return 0;
873 874 875 876 877
        }
    }
    return 1;
}

878 879
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
{
880
    int ret;
881 882 883 884 885

    ret = prepare_input_packet(s, pkt);
    if (ret < 0)
        return ret;

886
    if (!pkt) {
887
        if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
888
            ret = s->oformat->write_packet(s, NULL);
889
            flush_if_needed(s);
890 891 892 893
            if (ret >= 0 && s->pb && s->pb->error < 0)
                ret = s->pb->error;
            return ret;
        }
894
        return 1;
895
    }
896

897 898 899 900
    ret = do_packet_auto_bsf(s, pkt);
    if (ret <= 0)
        return ret;

901
#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
902
    ret = compute_muxer_pkt_fields(s, s->streams[pkt->stream_index], pkt);
903 904 905

    if (ret < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
        return ret;
906
#endif
907

908
    ret = write_packet(s, pkt);
909 910
    if (ret >= 0 && s->pb && s->pb->error < 0)
        ret = s->pb->error;
911 912 913 914 915 916

    if (ret >= 0)
        s->streams[pkt->stream_index]->nb_frames++;
    return ret;
}

917 918 919
#define CHUNK_START 0x1000

int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
920
                             int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
921
{
922
    int ret;
923
    AVPacketList **next_point, *this_pktl;
924 925
    AVStream *st   = s->streams[pkt->stream_index];
    int chunked    = s->max_chunk_size || s->max_chunk_duration;
926 927

    this_pktl      = av_mallocz(sizeof(AVPacketList));
928 929
    if (!this_pktl)
        return AVERROR(ENOMEM);
930 931 932
    if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
        av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
        av_assert0(((AVFrame *)pkt->data)->buf);
933 934 935 936 937 938 939 940 941
        this_pktl->pkt = *pkt;
        pkt->buf = NULL;
        pkt->side_data = NULL;
        pkt->side_data_elems = 0;
    } else {
        if ((ret = av_packet_ref(&this_pktl->pkt, pkt)) < 0) {
            av_free(this_pktl);
            return ret;
        }
942
    }
943 944

    if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
945 946
        next_point = &(st->last_in_packet_buffer->next);
    } else {
947
        next_point = &s->internal->packet_buffer;
948
    }
949

950 951
    if (chunked) {
        uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
952 953
        st->interleaver_chunk_size     += pkt->size;
        st->interleaver_chunk_duration += pkt->duration;
954 955
        if (   (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size)
            || (max && st->interleaver_chunk_duration           > max)) {
956
            st->interleaver_chunk_size      = 0;
957
            this_pktl->pkt.flags |= CHUNK_START;
958
            if (max && st->interleaver_chunk_duration > max) {
959
                int64_t syncoffset = (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
960 961 962 963 964
                int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;

                st->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
            } else
                st->interleaver_chunk_duration = 0;
965
        }
966 967 968 969
    }
    if (*next_point) {
        if (chunked && !(this_pktl->pkt.flags & CHUNK_START))
            goto next_non_null;
970

971
        if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) {
972 973 974
            while (   *next_point
                   && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
                       || !compare(s, &(*next_point)->pkt, pkt)))
975
                next_point = &(*next_point)->next;
976 977
            if (*next_point)
                goto next_non_null;
978
        } else {
979
            next_point = &(s->internal->packet_buffer_end->next);
980 981
        }
    }
982
    av_assert1(!*next_point);
983

984
    s->internal->packet_buffer_end = this_pktl;
985 986 987 988 989 990
next_non_null:

    this_pktl->next = *next_point;

    s->streams[pkt->stream_index]->last_in_packet_buffer =
        *next_point                                      = this_pktl;
991

992 993
    av_packet_unref(pkt);

994
    return 0;
995 996
}

997 998
static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
                                  AVPacket *pkt)
999 1000 1001 1002 1003
{
    AVStream *st  = s->streams[pkt->stream_index];
    AVStream *st2 = s->streams[next->stream_index];
    int comp      = av_compare_ts(next->dts, st2->time_base, pkt->dts,
                                  st->time_base);
1004 1005 1006
    if (s->audio_preload && ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))) {
        int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
        int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
1007
        if (ts == ts2) {
1008 1009
            ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
               -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
1010 1011 1012 1013
            ts2=0;
        }
        comp= (ts>ts2) - (ts<ts2);
    }
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024

    if (comp == 0)
        return pkt->stream_index < next->stream_index;
    return comp > 0;
}

int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
                                 AVPacket *pkt, int flush)
{
    AVPacketList *pktl;
    int stream_count = 0;
1025
    int noninterleaved_count = 0;
1026
    int i, ret;
1027
    int eof = flush;
1028 1029

    if (pkt) {
1030
        if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
1031
            return ret;
1032 1033
    }

1034 1035 1036
    for (i = 0; i < s->nb_streams; i++) {
        if (s->streams[i]->last_in_packet_buffer) {
            ++stream_count;
1037 1038 1039
        } else if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
                   s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP8 &&
                   s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP9) {
1040 1041 1042
            ++noninterleaved_count;
        }
    }
1043

1044
    if (s->internal->nb_interleaved_streams == stream_count)
1045
        flush = 1;
1046

1047
    if (s->max_interleave_delta > 0 &&
1048
        s->internal->packet_buffer &&
1049 1050 1051
        !flush &&
        s->internal->nb_interleaved_streams == stream_count+noninterleaved_count
    ) {
1052
        AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
        int64_t delta_dts = INT64_MIN;
        int64_t top_dts = av_rescale_q(top_pkt->dts,
                                       s->streams[top_pkt->stream_index]->time_base,
                                       AV_TIME_BASE_Q);

        for (i = 0; i < s->nb_streams; i++) {
            int64_t last_dts;
            const AVPacketList *last = s->streams[i]->last_in_packet_buffer;

            if (!last)
                continue;

            last_dts = av_rescale_q(last->pkt.dts,
                                    s->streams[i]->time_base,
                                    AV_TIME_BASE_Q);
            delta_dts = FFMAX(delta_dts, last_dts - top_dts);
1069
        }
1070 1071 1072 1073 1074 1075

        if (delta_dts > s->max_interleave_delta) {
            av_log(s, AV_LOG_DEBUG,
                   "Delay between the first packet and last packet in the "
                   "muxing queue is %"PRId64" > %"PRId64": forcing output\n",
                   delta_dts, s->max_interleave_delta);
1076 1077 1078
            flush = 1;
        }
    }
1079

1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
    if (s->internal->packet_buffer &&
        eof &&
        (s->flags & AVFMT_FLAG_SHORTEST) &&
        s->internal->shortest_end == AV_NOPTS_VALUE) {
        AVPacket *top_pkt = &s->internal->packet_buffer->pkt;

        s->internal->shortest_end = av_rescale_q(top_pkt->dts,
                                       s->streams[top_pkt->stream_index]->time_base,
                                       AV_TIME_BASE_Q);
    }

    if (s->internal->shortest_end != AV_NOPTS_VALUE) {
        while (s->internal->packet_buffer) {
            AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
            AVStream *st;
            int64_t top_dts = av_rescale_q(top_pkt->dts,
                                        s->streams[top_pkt->stream_index]->time_base,
                                        AV_TIME_BASE_Q);

            if (s->internal->shortest_end + 1 >= top_dts)
                break;

            pktl = s->internal->packet_buffer;
            st   = s->streams[pktl->pkt.stream_index];

            s->internal->packet_buffer = pktl->next;
            if (!s->internal->packet_buffer)
                s->internal->packet_buffer_end = NULL;

            if (st->last_in_packet_buffer == pktl)
                st->last_in_packet_buffer = NULL;

            av_packet_unref(&pktl->pkt);
            av_freep(&pktl);
            flush = 0;
        }
    }

1118 1119
    if (stream_count && flush) {
        AVStream *st;
1120
        pktl = s->internal->packet_buffer;
1121
        *out = pktl->pkt;
1122
        st   = s->streams[out->stream_index];
1123

1124 1125 1126
        s->internal->packet_buffer = pktl->next;
        if (!s->internal->packet_buffer)
            s->internal->packet_buffer_end = NULL;
1127

1128 1129
        if (st->last_in_packet_buffer == pktl)
            st->last_in_packet_buffer = NULL;
1130
        av_freep(&pktl);
1131

1132 1133 1134 1135 1136 1137 1138
        return 1;
    } else {
        av_init_packet(out);
        return 0;
    }
}

1139 1140
int ff_interleaved_peek(AVFormatContext *s, int stream,
                        AVPacket *pkt, int add_offset)
1141 1142 1143
{
    AVPacketList *pktl = s->internal->packet_buffer;
    while (pktl) {
1144
        if (pktl->pkt.stream_index == stream) {
1145
            *pkt = pktl->pkt;
1146 1147 1148 1149 1150 1151 1152
            if (add_offset) {
                AVStream *st = s->streams[pkt->stream_index];
                int64_t offset = st->mux_ts_offset;

                if (s->output_ts_offset)
                    offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);

1153 1154 1155 1156 1157 1158
                if (pkt->dts != AV_NOPTS_VALUE)
                    pkt->dts += offset;
                if (pkt->pts != AV_NOPTS_VALUE)
                    pkt->pts += offset;
            }
            return 0;
1159
        }
1160 1161
        pktl = pktl->next;
    }
1162
    return AVERROR(ENOENT);
1163 1164
}

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
/**
 * Interleave an AVPacket correctly so it can be muxed.
 * @param out the interleaved packet will be output here
 * @param in the input packet
 * @param flush 1 if no further packets are available as input and all
 *              remaining packets should be output
 * @return 1 if a packet was output, 0 if no packet could be output,
 *         < 0 if an error occurred
 */
static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush)
{
    if (s->oformat->interleave_packet) {
        int ret = s->oformat->interleave_packet(s, out, in, flush);
        if (in)
1179
            av_packet_unref(in);
1180 1181 1182 1183 1184 1185 1186
        return ret;
    } else
        return ff_interleave_packet_per_dts(s, out, in, flush);
}

int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
{
1187
    int ret, flush = 0;
1188

1189
    ret = prepare_input_packet(s, pkt);
1190
    if (ret < 0)
1191
        goto fail;
1192

1193 1194 1195
    if (pkt) {
        AVStream *st = s->streams[pkt->stream_index];

1196 1197 1198 1199 1200
        ret = do_packet_auto_bsf(s, pkt);
        if (ret == 0)
            return 0;
        else if (ret < 0)
            goto fail;
1201

1202
        if (s->debug & FF_FDEBUG_TS)
1203
            av_log(s, AV_LOG_DEBUG, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
1204
                pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
1205

1206
#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
1207
        if ((ret = compute_muxer_pkt_fields(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
1208
            goto fail;
1209
#endif
1210

1211 1212 1213 1214
        if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
            ret = AVERROR(EINVAL);
            goto fail;
        }
1215
    } else {
1216
        av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
1217 1218 1219 1220 1221 1222
        flush = 1;
    }

    for (;; ) {
        AVPacket opkt;
        int ret = interleave_packet(s, &opkt, pkt, flush);
1223 1224 1225 1226 1227
        if (pkt) {
            memset(pkt, 0, sizeof(*pkt));
            av_init_packet(pkt);
            pkt = NULL;
        }
1228 1229 1230
        if (ret <= 0) //FIXME cleanup needed for ret<0 ?
            return ret;

1231
        ret = write_packet(s, &opkt);
1232 1233 1234
        if (ret >= 0)
            s->streams[opkt.stream_index]->nb_frames++;

1235
        av_packet_unref(&opkt);
1236 1237 1238

        if (ret < 0)
            return ret;
1239 1240
        if(s->pb && s->pb->error)
            return s->pb->error;
1241
    }
1242 1243 1244
fail:
    av_packet_unref(pkt);
    return ret;
1245 1246 1247 1248 1249 1250 1251 1252 1253
}

int av_write_trailer(AVFormatContext *s)
{
    int ret, i;

    for (;; ) {
        AVPacket pkt;
        ret = interleave_packet(s, &pkt, NULL, 1);
1254
        if (ret < 0)
1255 1256 1257 1258
            goto fail;
        if (!ret)
            break;

1259
        ret = write_packet(s, &pkt);
1260 1261 1262
        if (ret >= 0)
            s->streams[pkt.stream_index]->nb_frames++;

1263
        av_packet_unref(&pkt);
1264 1265 1266

        if (ret < 0)
            goto fail;
1267 1268
        if(s->pb && s->pb->error)
            goto fail;
1269 1270
    }

1271
fail:
1272
    if (s->oformat->write_trailer) {
1273 1274
        if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
            avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER);
1275
        if (ret >= 0) {
1276
        ret = s->oformat->write_trailer(s);
1277 1278 1279
        } else {
            s->oformat->write_trailer(s);
        }
1280
    }
1281

1282 1283 1284
    if (s->oformat->deinit)
        s->oformat->deinit(s);

1285 1286 1287
    s->internal->initialized =
    s->internal->streams_initialized = 0;

1288 1289 1290 1291
    if (s->pb)
       avio_flush(s->pb);
    if (ret == 0)
       ret = s->pb ? s->pb->error : 0;
1292 1293 1294 1295 1296 1297 1298 1299 1300
    for (i = 0; i < s->nb_streams; i++) {
        av_freep(&s->streams[i]->priv_data);
        av_freep(&s->streams[i]->index_entries);
    }
    if (s->oformat->priv_class)
        av_opt_free(s->priv_data);
    av_freep(&s->priv_data);
    return ret;
}
1301 1302 1303 1304 1305 1306 1307 1308

int av_get_output_timestamp(struct AVFormatContext *s, int stream,
                            int64_t *dts, int64_t *wall)
{
    if (!s->oformat || !s->oformat->get_output_timestamp)
        return AVERROR(ENOSYS);
    s->oformat->get_output_timestamp(s, stream, dts, wall);
    return 0;
1309
}
1310

1311
int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
1312
                     AVFormatContext *src, int interleave)
1313 1314
{
    AVPacket local_pkt;
1315
    int ret;
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326

    local_pkt = *pkt;
    local_pkt.stream_index = dst_stream;
    if (pkt->pts != AV_NOPTS_VALUE)
        local_pkt.pts = av_rescale_q(pkt->pts,
                                     src->streams[pkt->stream_index]->time_base,
                                     dst->streams[dst_stream]->time_base);
    if (pkt->dts != AV_NOPTS_VALUE)
        local_pkt.dts = av_rescale_q(pkt->dts,
                                     src->streams[pkt->stream_index]->time_base,
                                     dst->streams[dst_stream]->time_base);
1327 1328 1329 1330
    if (pkt->duration)
        local_pkt.duration = av_rescale_q(pkt->duration,
                                          src->streams[pkt->stream_index]->time_base,
                                          dst->streams[dst_stream]->time_base);
1331

1332 1333
    if (interleave) ret = av_interleaved_write_frame(dst, &local_pkt);
    else            ret = av_write_frame(dst, &local_pkt);
1334
    pkt->buf = local_pkt.buf;
1335 1336
    pkt->side_data       = local_pkt.side_data;
    pkt->side_data_elems = local_pkt.side_data_elems;
1337
    return ret;
1338
}
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357

static int av_write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
                                           AVFrame *frame, int interleaved)
{
    AVPacket pkt, *pktp;

    av_assert0(s->oformat);
    if (!s->oformat->write_uncoded_frame)
        return AVERROR(ENOSYS);

    if (!frame) {
        pktp = NULL;
    } else {
        pktp = &pkt;
        av_init_packet(&pkt);
        pkt.data = (void *)frame;
        pkt.size         = UNCODED_FRAME_PACKET_SIZE;
        pkt.pts          =
        pkt.dts          = frame->pts;
1358
        pkt.duration     = frame->pkt_duration;
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
        pkt.stream_index = stream_index;
        pkt.flags |= AV_PKT_FLAG_UNCODED_FRAME;
    }

    return interleaved ? av_interleaved_write_frame(s, pktp) :
                         av_write_frame(s, pktp);
}

int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
                           AVFrame *frame)
{
    return av_write_uncoded_frame_internal(s, stream_index, frame, 0);
}

int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
                                       AVFrame *frame)
{
    return av_write_uncoded_frame_internal(s, stream_index, frame, 1);
}

int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
{
    av_assert0(s->oformat);
    if (!s->oformat->write_uncoded_frame)
        return AVERROR(ENOSYS);
    return s->oformat->write_uncoded_frame(s, stream_index, NULL,
                                           AV_WRITE_UNCODED_FRAME_QUERY);
}