output_example.c 15.9 KB
Newer Older
1 2 3
/*
 * Libavformat API example: Output a media file in any supported
 * libavformat format. The default codecs are used.
4
 *
5
 * Copyright (c) 2003 Fabrice Bellard
6
 *
7 8 9 10 11 12
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
13
 *
14 15
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
16
 *
17 18 19 20 21 22
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
 * THE SOFTWARE.
24 25 26
 */
#include <stdlib.h>
#include <stdio.h>
27
#include <string.h>
28 29
#include <math.h>

30 31 32 33
#ifndef M_PI
#define M_PI 3.1415926535897931
#endif

34
#include "avformat.h"
35
#include "swscale.h"
36 37

/* 5 seconds stream duration */
38 39 40
#define STREAM_DURATION   5.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
41
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
42

43 44
static int sws_flags = SWS_BICUBIC;

45 46 47
/**************************************************************/
/* audio output */

48
float t, tincr, tincr2;
49 50 51 52 53
int16_t *samples;
uint8_t *audio_outbuf;
int audio_outbuf_size;
int audio_input_frame_size;

54
/*
55 56
 * add an audio output stream
 */
57
static AVStream *add_audio_stream(AVFormatContext *oc, int codec_id)
58 59 60 61 62 63 64 65 66 67
{
    AVCodecContext *c;
    AVStream *st;

    st = av_new_stream(oc, 1);
    if (!st) {
        fprintf(stderr, "Could not alloc stream\n");
        exit(1);
    }

68
    c = st->codec;
69
    c->codec_id = codec_id;
70 71 72 73 74 75
    c->codec_type = CODEC_TYPE_AUDIO;

    /* put sample parameters */
    c->bit_rate = 64000;
    c->sample_rate = 44100;
    c->channels = 2;
76 77 78
    return st;
}

79
static void open_audio(AVFormatContext *oc, AVStream *st)
80 81 82 83
{
    AVCodecContext *c;
    AVCodec *codec;

84
    c = st->codec;
85 86 87 88 89 90 91

    /* find the audio encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }
92 93 94 95 96 97 98 99 100

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* init signal generator */
    t = 0;
101 102 103
    tincr = 2 * M_PI * 110.0 / c->sample_rate;
    /* increment frequency by 110 Hz per second */
    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
104 105

    audio_outbuf_size = 10000;
106
    audio_outbuf = av_malloc(audio_outbuf_size);
107 108 109 110 111

    /* ugly hack for PCM codecs (will be removed ASAP with new PCM
       support to compute the input frame size in samples */
    if (c->frame_size <= 1) {
        audio_input_frame_size = audio_outbuf_size / c->channels;
112
        switch(st->codec->codec_id) {
113 114 115 116 117 118 119 120 121 122 123 124
        case CODEC_ID_PCM_S16LE:
        case CODEC_ID_PCM_S16BE:
        case CODEC_ID_PCM_U16LE:
        case CODEC_ID_PCM_U16BE:
            audio_input_frame_size >>= 1;
            break;
        default:
            break;
        }
    } else {
        audio_input_frame_size = c->frame_size;
    }
125
    samples = av_malloc(audio_input_frame_size * 2 * c->channels);
126 127
}

128 129
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
   'nb_channels' channels */
130
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
131 132 133 134 135 136 137 138 139 140 141 142 143 144
{
    int j, i, v;
    int16_t *q;

    q = samples;
    for(j=0;j<frame_size;j++) {
        v = (int)(sin(t) * 10000);
        for(i = 0; i < nb_channels; i++)
            *q++ = v;
        t += tincr;
        tincr += tincr2;
    }
}

145
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
146 147
{
    AVCodecContext *c;
148 149
    AVPacket pkt;
    av_init_packet(&pkt);
150

151
    c = st->codec;
152

153 154
    get_audio_frame(samples, audio_input_frame_size, c->channels);

155 156
    pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);

157
    pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
158 159 160
    pkt.flags |= PKT_FLAG_KEY;
    pkt.stream_index= st->index;
    pkt.data= audio_outbuf;
161 162

    /* write the compressed frame in the media file */
163
    if (av_write_frame(oc, &pkt) != 0) {
164 165 166 167 168
        fprintf(stderr, "Error while writing audio frame\n");
        exit(1);
    }
}

169
static void close_audio(AVFormatContext *oc, AVStream *st)
170
{
171
    avcodec_close(st->codec);
172

173 174 175 176
    av_free(samples);
    av_free(audio_outbuf);
}

177 178 179
/**************************************************************/
/* video output */

180
AVFrame *picture, *tmp_picture;
181 182 183 184
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;

/* add a video output stream */
185
static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
186 187 188 189 190 191 192 193 194
{
    AVCodecContext *c;
    AVStream *st;

    st = av_new_stream(oc, 0);
    if (!st) {
        fprintf(stderr, "Could not alloc stream\n");
        exit(1);
    }
195

196
    c = st->codec;
197
    c->codec_id = codec_id;
198 199 200 201 202
    c->codec_type = CODEC_TYPE_VIDEO;

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
203
    c->width = 352;
204
    c->height = 288;
205 206 207 208
    /* time base: this is the fundamental unit of time (in seconds) in terms
       of which frame timestamps are represented. for fixed-fps content,
       timebase should be 1/framerate and timestamp increments should be
       identically 1. */
209
    c->time_base.den = STREAM_FRAME_RATE;
210
    c->time_base.num = 1;
211
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
212
    c->pix_fmt = STREAM_PIX_FMT;
213
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
214 215 216
        /* just for testing, we also add B frames */
        c->max_b_frames = 2;
    }
217
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
218 219
        /* needed to avoid using macroblocks in which some coeffs overflow
           this doesnt happen with normal video, it just happens here as the
220 221 222
           motion of the chroma plane doesnt match the luma plane */
        c->mb_decision=2;
    }
223 224 225
    // some formats want stream headers to be seperate
    if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
226

227 228 229
    return st;
}

230
static AVFrame *alloc_picture(int pix_fmt, int width, int height)
231 232 233 234
{
    AVFrame *picture;
    uint8_t *picture_buf;
    int size;
235

236 237 238 239
    picture = avcodec_alloc_frame();
    if (!picture)
        return NULL;
    size = avpicture_get_size(pix_fmt, width, height);
240
    picture_buf = av_malloc(size);
241 242 243 244
    if (!picture_buf) {
        av_free(picture);
        return NULL;
    }
245
    avpicture_fill((AVPicture *)picture, picture_buf,
246 247 248
                   pix_fmt, width, height);
    return picture;
}
249

250
static void open_video(AVFormatContext *oc, AVStream *st)
251 252 253 254
{
    AVCodec *codec;
    AVCodecContext *c;

255
    c = st->codec;
256 257 258 259 260 261 262 263 264

    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    /* open the codec */
265 266 267 268 269
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

270 271 272 273
    video_outbuf = NULL;
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
274 275 276 277
        /* buffers passed into lav* can be allocated any way you prefer,
           as long as they're aligned enough for the architecture, and
           they're freed appropriately (such as using av_free for buffers
           allocated with av_malloc) */
278
        video_outbuf_size = 200000;
279
        video_outbuf = av_malloc(video_outbuf_size);
280
    }
281

282 283 284 285 286 287
    /* allocate the encoded raw picture */
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }
288

289 290 291 292 293 294 295 296 297 298 299 300
    /* if the output format is not YUV420P, then a temporary YUV420P
       picture is needed too. It is then converted to the required
       output format */
    tmp_picture = NULL;
    if (c->pix_fmt != PIX_FMT_YUV420P) {
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
        if (!tmp_picture) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
301

302
/* prepare a dummy image */
303
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
304
{
305 306 307
    int x, y, i;

    i = frame_index;
308 309

    /* Y */
310 311 312
    for(y=0;y<height;y++) {
        for(x=0;x<width;x++) {
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
313 314
        }
    }
315

316
    /* Cb and Cr */
317 318 319 320
    for(y=0;y<height/2;y++) {
        for(x=0;x<width/2;x++) {
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
321 322
        }
    }
323
}
324

325
static void write_video_frame(AVFormatContext *oc, AVStream *st)
326 327 328
{
    int out_size, ret;
    AVCodecContext *c;
329
    static struct SwsContext *img_convert_ctx;
330

331
    c = st->codec;
332

333 334 335
    if (frame_count >= STREAM_NB_FRAMES) {
        /* no more frame to compress. The codec has a latency of a few
           frames if using B frames, so we get the last frames by
336
           passing the same picture again */
337
    } else {
338 339 340
        if (c->pix_fmt != PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
               to the codec pixel format if needed */
341 342 343 344 345 346 347 348 349 350 351
            if (img_convert_ctx == NULL) {
                img_convert_ctx = sws_getContext(c->width, c->height,
                                                 PIX_FMT_YUV420P,
                                                 c->width, c->height,
                                                 c->pix_fmt,
                                                 sws_flags, NULL, NULL, NULL);
                if (img_convert_ctx == NULL) {
                    fprintf(stderr, "Cannot initialize the conversion context\n");
                    exit(1);
                }
            }
352
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
353 354
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
                      0, c->height, picture->data, picture->linesize);
355 356 357
        } else {
            fill_yuv_image(picture, frame_count, c->width, c->height);
        }
358 359
    }

360

361 362 363
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
           futur for that */
364 365
        AVPacket pkt;
        av_init_packet(&pkt);
366

367 368
        pkt.flags |= PKT_FLAG_KEY;
        pkt.stream_index= st->index;
369
        pkt.data= (uint8_t *)picture;
370
        pkt.size= sizeof(AVPicture);
371

372
        ret = av_write_frame(oc, &pkt);
373 374
    } else {
        /* encode the image */
375
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
376
        /* if zero size, it means the image was buffered */
Michael Niedermayer's avatar
Michael Niedermayer committed
377
        if (out_size > 0) {
378 379
            AVPacket pkt;
            av_init_packet(&pkt);
380

381
            pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
382 383 384 385 386
            if(c->coded_frame->key_frame)
                pkt.flags |= PKT_FLAG_KEY;
            pkt.stream_index= st->index;
            pkt.data= video_outbuf;
            pkt.size= out_size;
387

388
            /* write the compressed frame in the media file */
389
            ret = av_write_frame(oc, &pkt);
390 391 392
        } else {
            ret = 0;
        }
393 394
    }
    if (ret != 0) {
395 396 397
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
398 399 400
    frame_count++;
}

401
static void close_video(AVFormatContext *oc, AVStream *st)
402
{
403
    avcodec_close(st->codec);
404 405 406 407 408 409 410
    av_free(picture->data[0]);
    av_free(picture);
    if (tmp_picture) {
        av_free(tmp_picture->data[0]);
        av_free(tmp_picture);
    }
    av_free(video_outbuf);
411 412 413 414 415 416 417 418 419 420
}

/**************************************************************/
/* media file output */

int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
421
    AVStream *audio_st, *video_st;
422
    double audio_pts, video_pts;
423 424
    int i;

425 426
    /* initialize libavcodec, and register all codecs and formats */
    av_register_all();
427

428 429
    if (argc != 2) {
        printf("usage: %s output_file\n"
430 431 432
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
433 434 435
               "\n", argv[0]);
        exit(1);
    }
436

437 438 439 440 441 442 443 444 445 446 447 448 449
    filename = argv[1];

    /* auto detect the output format from the name. default is
       mpeg. */
    fmt = guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        exit(1);
    }
450

451
    /* allocate the output media context */
452
    oc = av_alloc_format_context();
453 454 455 456 457
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        exit(1);
    }
    oc->oformat = fmt;
458
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
459 460 461 462 463 464 465 466 467 468 469 470

    /* add the audio and video streams using the default format codecs
       and initialize the codecs */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

471 472 473 474 475 476 477
    /* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }

478 479
    dump_format(oc, 0, filename, 1);

480 481 482 483 484 485 486
    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

487 488 489 490 491 492 493
    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            exit(1);
        }
    }
494

495 496
    /* write the stream header, if any */
    av_write_header(oc);
497

498 499 500
    for(;;) {
        /* compute current audio and video time */
        if (audio_st)
Michael Niedermayer's avatar
Michael Niedermayer committed
501
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
502 503
        else
            audio_pts = 0.0;
504

505
        if (video_st)
Michael Niedermayer's avatar
Michael Niedermayer committed
506
            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
507 508 509
        else
            video_pts = 0.0;

510
        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
511 512
            (!video_st || video_pts >= STREAM_DURATION))
            break;
513

514
        /* write interleaved audio and video frames */
515
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
516 517 518 519 520 521 522
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* close each codec */
523 524 525 526
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);
527 528 529

    /* write the trailer, if any */
    av_write_trailer(oc);
530

531 532
    /* free the streams */
    for(i = 0; i < oc->nb_streams; i++) {
533
        av_freep(&oc->streams[i]->codec);
534 535 536
        av_freep(&oc->streams[i]);
    }

537 538 539 540 541 542 543 544 545 546
    if (!(fmt->flags & AVFMT_NOFILE)) {
        /* close the output file */
        url_fclose(&oc->pb);
    }

    /* free the stream */
    av_free(oc);

    return 0;
}