Commit d1afb2f9 authored by Justin Ruggles's avatar Justin Ruggles

libfaac: use AVCodec.encode2()

Encoder output is delayed by several frames, so we keep a queue of input
frame timing info to match up with corresponding output packets.
parent 59041fd0
...@@ -581,7 +581,7 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ...@@ -581,7 +581,7 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
# external codec libraries # external codec libraries
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
......
...@@ -24,11 +24,19 @@ ...@@ -24,11 +24,19 @@
* Interface to libfaac for aac encoding. * Interface to libfaac for aac encoding.
*/ */
#include "avcodec.h"
#include <faac.h> #include <faac.h>
#include "avcodec.h"
#include "audio_frame_queue.h"
#include "internal.h"
/* libfaac has an encoder delay of 1024 samples */
#define FAAC_DELAY_SAMPLES 1024
typedef struct FaacAudioContext { typedef struct FaacAudioContext {
faacEncHandle faac_handle; faacEncHandle faac_handle;
AudioFrameQueue afq;
} FaacAudioContext; } FaacAudioContext;
...@@ -36,11 +44,15 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx) ...@@ -36,11 +44,15 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame); av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata); av_freep(&avctx->extradata);
ff_af_queue_close(&s->afq);
if (s->faac_handle) if (s->faac_handle)
faacEncClose(s->faac_handle); faacEncClose(s->faac_handle);
return 0; return 0;
} }
...@@ -109,11 +121,13 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) ...@@ -109,11 +121,13 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
avctx->frame_size = samples_input / avctx->channels; avctx->frame_size = samples_input / avctx->channels;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame) { if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto error; goto error;
} }
#endif
/* Set decoder specific info */ /* Set decoder specific info */
avctx->extradata_size = 0; avctx->extradata_size = 0;
...@@ -144,26 +158,52 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) ...@@ -144,26 +158,52 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
goto error; goto error;
} }
avctx->delay = FAAC_DELAY_SAMPLES;
ff_af_queue_init(avctx, &s->afq);
return 0; return 0;
error: error:
Faac_encode_close(avctx); Faac_encode_close(avctx);
return ret; return ret;
} }
static int Faac_encode_frame(AVCodecContext *avctx, static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
unsigned char *frame, int buf_size, void *data) const AVFrame *frame, int *got_packet_ptr)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
int bytes_written; int bytes_written, ret;
int num_samples = data ? avctx->frame_size : 0; int num_samples = frame ? frame->nb_samples : 0;
void *samples = frame ? frame->data[0] : NULL;
bytes_written = faacEncEncode(s->faac_handle, if ((ret = ff_alloc_packet(avpkt, (7 + 768) * avctx->channels))) {
data, av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret;
}
bytes_written = faacEncEncode(s->faac_handle, samples,
num_samples * avctx->channels, num_samples * avctx->channels,
frame, avpkt->data, avpkt->size);
buf_size); if (bytes_written < 0) {
av_log(avctx, AV_LOG_ERROR, "faacEncEncode() error\n");
return bytes_written;
}
/* add current frame to the queue */
if (frame) {
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
return ret;
}
return bytes_written; if (!bytes_written)
return 0;
/* Get the next frame pts/duration */
ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
&avpkt->duration);
avpkt->size = bytes_written;
*got_packet_ptr = 1;
return 0;
} }
static const AVProfile profiles[] = { static const AVProfile profiles[] = {
...@@ -180,7 +220,7 @@ AVCodec ff_libfaac_encoder = { ...@@ -180,7 +220,7 @@ AVCodec ff_libfaac_encoder = {
.id = CODEC_ID_AAC, .id = CODEC_ID_AAC,
.priv_data_size = sizeof(FaacAudioContext), .priv_data_size = sizeof(FaacAudioContext),
.init = Faac_encode_init, .init = Faac_encode_init,
.encode = Faac_encode_frame, .encode2 = Faac_encode_frame,
.close = Faac_encode_close, .close = Faac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment