Commit 26509173 authored by NzSN's avatar NzSN

udpate

parent cd3b010e
#include <string.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersrc.h>
#include <libavfilter/buffersink.h>
#include <libavutil/opt.h>
#define INPUT_FMT AV_SAMPLE_FMT_FLTP
#define INPUT_SAMPLE_RATE 44100
#define INPUT_TB_DEN 14112000
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_STEREO
uint8_t errstr[1024];
typedef struct AudioStreamingContext {
AVFormatContext *fmt;
AVStream *s;
AVCodec *c;
AVCodecContext *cc;
char *fileName;
} AudioStreamingContext;
typedef struct FilteringContext {
AVFilterGraph *graph;
AVFilterContext *src;
AVFilterContext *src_;
AVFilterContext *sink;
} FilteringContext;
int prepare_audio_encoder(AudioStreamingContext *encoder, AudioStreamingContext *decoder);
int prepare_audio_decoder(AudioStreamingContext *decoder);
int add_to_src(AudioStreamingContext *decoder, AVFilterContext *src, AVPacket *input_packet);
int process_filtered_frames(AudioStreamingContext *encoder, AVFrame *frame);
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
AVFilterContext **src_,
AVFilterContext **sink,
AudioStreamingContext *decoder_1,
AudioStreamingContext *decoder_2) {
AVFilterGraph *filter_graph;
AVFilterContext *abuffer_first_ctx;
AVFilterContext *abuffer_second_ctx;
AVFilterContext *amix_ctx;
AVFilterContext *abuffersink_ctx;
const AVFilter *amix;
const AVFilter *abuffer_first;
const AVFilter *abuffer_second;
const AVFilter *abuffersink;
uint8_t ch_layout[64];
int err = 0;
AVFormatContext *fmt = decoder_1->fmt;
AVCodecContext *dec_ctx = decoder_1->cc;
AVFormatContext *fmt_ = decoder_2->fmt;
AVCodecContext *dec_ctx_ = decoder_2->cc;
AVRational time_base = fmt->streams[0]->time_base;
AVRational time_base_ = fmt_->streams[0]->time_base;
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
printf("Unable to create filter graph.\n");
return 1;
}
/* Create first abuffer filter */
abuffer_first = avfilter_get_by_name("abuffer");
if (!abuffer_first) {
printf("Unable to find the abuffer filter\n");
return 1;
}
abuffer_first_ctx = avfilter_graph_alloc_filter(
filter_graph, abuffer_first, "SRC");
if (!abuffer_first_ctx) {
printf("Could not allocate the abuffer instance.\n");
return 1;
}
av_get_channel_layout_string((char*)ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
av_opt_set (abuffer_first_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set (abuffer_first_ctx, "sample_fmt", av_get_sample_fmt_name(dec_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q (abuffer_first_ctx, "time_base", (AVRational){ time_base.num, time_base.den }, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_first_ctx, "sample_rate", dec_ctx->sample_rate, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(abuffer_first_ctx, NULL);
if (err < 0) {
printf("Could not initialize the abuffer filter.\n");
return err;
}
/* Create second abuffer filter */
abuffer_second = avfilter_get_by_name("abuffer");
if (!abuffer_second) {
printf("Unable to find the abuffer filter\n");
return 1;
}
abuffer_second_ctx = avfilter_graph_alloc_filter(
filter_graph, abuffer_second, "src_2");
if (!abuffer_second_ctx) {
printf("Could not allocate the abuffer instance.\n");
return 1;
}
av_get_channel_layout_string((char*)ch_layout, sizeof(ch_layout), 0, AV_CH_LAYOUT_STEREO);
av_opt_set(abuffer_second_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set(abuffer_second_ctx, "sample_fmt", av_get_sample_fmt_name(dec_ctx_->sample_fmt), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q(abuffer_second_ctx, "time_base", (AVRational){time_base_.num, time_base_.den}, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_second_ctx, "sample_rate", dec_ctx_->sample_rate, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(abuffer_second_ctx, NULL);
if (err < 0) {
printf("Could not initialize the abuffer filter.\n");
return err;
}
/* Create amix filter */
amix = avfilter_get_by_name("amix");
if (!amix_ctx) {
printf("Unable to find the amix filter\n");
return 1;
}
amix_ctx = avfilter_graph_alloc_filter(filter_graph, amix, "amix");
if (!amix_ctx) {
printf("Could not allocate the amix instance.\n");
return 1;
}
av_opt_set_int(amix_ctx, "input", 2, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(amix_ctx, NULL);
if (err < 0) {
printf("Could not initialize the amix filter.\n");
return 1;
}
/* Create abuffersink filter */
abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
printf("Unable to find the abuffersink.\n");
return 1;
}
abuffersink_ctx = avfilter_graph_alloc_filter(
filter_graph, abuffersink, "abuffersink");
if (!abuffersink_ctx) {
printf("Could not allocate the abuffersink.\n");
return 1;
}
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
printf("Could not initialize the abuffersink instance.\n");
return 1;
}
/* Link Filter together */
err = avfilter_link(abuffer_first_ctx, 0, amix_ctx, 0);
if (err >= 0)
avfilter_link(abuffer_second_ctx, 0, amix_ctx, 1);
if (err >= 0)
avfilter_link(amix_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
printf("Failed to connecting filters\n");
return err;
}
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
printf("Failed to configure filter graph\n");
return 1;
}
char* dump =avfilter_graph_dump(filter_graph, NULL);
av_log(NULL, AV_LOG_ERROR, "Graph :\n%s\n", dump);
*graph = filter_graph;
*src = abuffer_first_ctx;
*src_ = abuffer_second_ctx;
*sink = abuffersink_ctx;
return 0;
}
int audioMix(char *argv[]) {
int ret = 0;
FilteringContext filter_ctx;
memset(&filter_ctx, 0, sizeof(FilteringContext));
// Decoder
AudioStreamingContext s1_decoder;
AudioStreamingContext s2_decoder;
memset(&s1_decoder, 0, sizeof(AudioStreamingContext));
memset(&s2_decoder, 0, sizeof(AudioStreamingContext));
// Encoder
AudioStreamingContext o_encoder;
memset(&o_encoder, 0, sizeof(AudioStreamingContext));
s1_decoder.fileName = argv[1];
s2_decoder.fileName = argv[2];
o_encoder.fileName = argv[3];
ret = avformat_open_input(&s1_decoder.fmt, s1_decoder.fileName, NULL, NULL);
if (ret < 0) {
printf("Failed to open input file 1\n");
return 1;
}
ret = avformat_find_stream_info(s1_decoder.fmt, NULL);
if (ret < 0) {
printf("Failed to find input stream infors\n");
return 1;
}
ret = avformat_open_input(&s2_decoder.fmt, s2_decoder.fileName, NULL, NULL);
if (ret < 0) {
printf("Failed to open input file: %s\n", s2_decoder.fileName);
return 1;
}
ret = avformat_find_stream_info(s2_decoder.fmt, NULL);
if (ret < 0) {
printf("Failed to find input stream infors\n");
return 1;
}
avformat_alloc_output_context2(
&o_encoder.fmt, NULL, NULL, o_encoder.fileName);
if (!o_encoder.fmt) {
printf("Could not create output context\n");
return 1;
}
/* Prepare decoder for input 1 */
prepare_audio_decoder(&s1_decoder);
/* Prepare decoder for input 2 */
prepare_audio_decoder(&s2_decoder);
/* Prepare encoder for output */
prepare_audio_encoder(&o_encoder, &s1_decoder);
if (avio_open(&o_encoder.fmt->pb, o_encoder.fileName, AVIO_FLAG_WRITE) < 0) {
printf("Could not open the output file\n");
return 1;
}
AVDictionary *opts = NULL;
if (avformat_write_header(o_encoder.fmt, &opts) < 0) {
printf("Failed to write header to output file.\n");
return 1;
}
init_filter_graph(&filter_ctx.graph, &filter_ctx.src,
&filter_ctx.src_, &filter_ctx.sink,
&s1_decoder, &s2_decoder);
int idx = 0;
AVPacket *packet = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
AudioStreamingContext *audioContexts[2] = {
&s1_decoder,
&s2_decoder
};
int fin[2] = { 0, 0 };
AVFilterContext *srcs[2] = {
filter_ctx.src,
filter_ctx.src_
};
const int nb_inputs = 2;
while (1) {
// Add Stream of frames into filter through
// source filters.
for (int i = 0; i < nb_inputs; ++i) {
if (fin[i] != 1) {
ret = av_read_frame(audioContexts[i]->fmt, packet);
if (ret < 0) {
if (ret == AVERROR_EOF) {
fin[i] = 1;
} else {
printf("Failed to read frame\n");
}
}
if (fin[i] == 1) {
add_to_src(audioContexts[i], srcs[i], NULL);
}
add_to_src(audioContexts[i], srcs[i], packet);
av_packet_unref(packet);
}
}
// Get filtered frames
while ((ret = av_buffersink_get_frame(filter_ctx.sink, frame)) >= 0) {
process_filtered_frames(&o_encoder, frame);
av_frame_unref(frame);
}
if (fin[0] == 1 && fin[1] == 1) {
break;
}
}
av_write_trailer(o_encoder.fmt);
avfilter_graph_free(&filter_ctx.graph);
avformat_close_input(&s1_decoder.fmt);
avformat_close_input(&s2_decoder.fmt);
avformat_free_context(s1_decoder.fmt);
s1_decoder.fmt = NULL;
avformat_free_context(s2_decoder.fmt);
s2_decoder.fmt = NULL;
avformat_free_context(o_encoder.fmt);
o_encoder.fmt = NULL;
avcodec_free_context(&s1_decoder.cc);
s1_decoder.cc = NULL;
avcodec_free_context(&s2_decoder.cc);
s2_decoder.cc = NULL;
return 0;
}
int process_filtered_frames(AudioStreamingContext *encoder, AVFrame *frame) {
int response = 0;
AVPacket *output_packet = av_packet_alloc();
response = avcodec_send_frame(encoder->cc, frame);
while (response >= 0) {
response = avcodec_receive_packet(encoder->cc, output_packet);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
break;
} else if (response < 0) {
printf("Error while receiving packet from encoder: %s", av_err2str(response));
return 1;
}
output_packet->stream_index = 0;
response = av_interleaved_write_frame(encoder->fmt, output_packet);
if (response != 0) {
printf("Error %d while receiving packet from decoder: %s",
response, av_err2str(response));
return 1;
}
}
av_packet_unref(output_packet);
av_packet_free(&output_packet);
return 0;
}
int add_to_src(AudioStreamingContext *decoder, AVFilterContext *src, AVPacket *input_packet) {
int err = 0;
AVFrame *input_frame = av_frame_alloc();
if (input_packet == NULL) {
err = av_buffersrc_add_frame(src, NULL);
if (err < 0) {
printf("Failed to write NULL frame\n");
return 1;
}
return 0;
}
if (!input_frame) {
printf(" Failed to allocaated memory for AVFrame\n");
return 1;
}
int response = avcodec_send_packet(decoder->cc, input_packet);
if (response < 0) {
printf("Failed to decode packet\n");
return 1;
}
while (response >= 0) {
response = avcodec_receive_frame(decoder->cc, input_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
break;
} else if (response < 0) {
printf("Error while receiving frame from decoder: %s",
av_err2str(response));
return 1;
}
err = av_buffersrc_add_frame(src, input_frame);
if (err < 0) {
av_frame_unref(input_frame);
printf("Failed to submitting the frame to the filtergraph: %s\n",
av_err2str(err));
return 1;
}
}
av_frame_unref(input_frame);
return 0;
}
int prepare_audio_encoder(AudioStreamingContext *encoder, AudioStreamingContext *decoder) {
AVStream *stream = decoder->s;
encoder->s = avformat_new_stream(encoder->fmt, NULL);
if (!encoder->s) {
return 1;
}
encoder->c = avcodec_find_encoder(stream->codecpar->codec_id);
if (!encoder->c) {
printf("Could not find the proper codec");
return 1;
}
encoder->cc = avcodec_alloc_context3(encoder->c);
if (!encoder->cc) {
printf("Could not allocated memory for codec context\n");
return 1;
}
encoder->cc->channels = decoder->cc->channels;
encoder->cc->channel_layout = decoder->cc->channel_layout;
encoder->cc->sample_rate = decoder->cc->sample_rate;
encoder->cc->sample_fmt = decoder->cc->sample_fmt;
encoder->cc->bit_rate = decoder->cc->bit_rate;
encoder->cc->time_base = decoder->cc->time_base;
encoder->cc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
encoder->s->time_base = encoder->cc->time_base;
if (avcodec_open2(encoder->cc, encoder->c, NULL) < 0) {
printf("Could not open the codec");
return 1;
}
avcodec_parameters_from_context(encoder->s->codecpar, encoder->cc);
return 0;
}
int prepare_audio_decoder(AudioStreamingContext *decoder) {
AVFormatContext *fmtCtx = decoder->fmt;
AVCodec *c = decoder->c;
AVCodecContext *cc = decoder->cc;
AVStream *inputStream1 = fmtCtx->streams[0];
c = avcodec_find_decoder(inputStream1->codecpar->codec_id);
if (!c) {
printf("Unable to find decoder for input1");
return 1;
}
cc = avcodec_alloc_context3(c);
if (!cc) {
printf("Unable to alloc memory for codec context\n");
return 1;
}
if (avcodec_parameters_to_context(cc, inputStream1->codecpar) < 0) {
printf("Failed to fill parameters to context\n");
return 1;
}
if (avcodec_open2(cc, c, NULL) < 0) {
printf("Failed to open codec\n");
return 1;
}
decoder->s = inputStream1;
decoder->c = c;
decoder->cc = cc;
return 0;
}
static int add_frame_to_fmt(AVFormatContext *oFmt, AVStream *in_stream, AVStream *out_stream, AVPacket *packet) {
int ret = 0;
packet->pts = av_rescale_q_rnd(packet->pts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->dts = av_rescale_q_rnd(packet->dts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->duration = av_rescale_q(packet->duration, in_stream->time_base, out_stream->time_base);
packet->pos = -1;
ret = av_interleaved_write_frame(oFmt, packet);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
return 1;
}
return 0;
}
char *do_mergeAV(char *videoPath, char *audioPath, char *outputPath) {
int err = 0;
AVFormatContext *vFmtCtx = NULL,
*aFmtCtx = NULL,
*outputCtx = NULL;
/* Open Video media */
err = avformat_open_input(&vFmtCtx, videoPath, NULL, NULL);
if (err < 0) {
printf("Error occured during opening video media.\n");
return NULL;
}
err = avformat_find_stream_info(vFmtCtx, NULL);
if (err < 0) {
printf("Failed to find stream infor from video media.\n");
return NULL;
}
/* Open Mixed Audio media */
err = avformat_open_input(&aFmtCtx, audioPath, NULL, NULL);
if (err < 0) {
printf("Error occured during opening mixed audio media.\n");
return NULL;
}
err = avformat_find_stream_info(aFmtCtx, NULL);
if (err < 0) {
printf("Failed to find stream infor from mixed audio media.\n");
return NULL;
}
avformat_alloc_output_context2(&outputCtx, NULL, NULL, outputPath);
if (!outputCtx) {
printf("Failed to create output context.\n");
return NULL;
}
enum {
FIRST_STREAM = 0,
V_STREAM = 0,
A_STREAM = 1,
END_STREAM = 2,
NB_STREAMS = 2,
};
AVStream *i_streams[2] = { NULL, NULL };
AVStream *o_streams[2] = { NULL, NULL };
/* Create Video Stream for output */
AVStream *o_vstream = avformat_new_stream(outputCtx, NULL);
if (!o_vstream) {
printf("Failed to create new stream for output.\n");
return NULL;
}
o_streams[V_STREAM] = o_vstream;
// Copy parameters to Video Stream of output from input stream
int video_stream_idx = av_find_best_stream(
vFmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
i_streams[V_STREAM] = vFmtCtx->streams[video_stream_idx];
err = avcodec_parameters_copy(o_vstream->codecpar, i_streams[V_STREAM]->codecpar);
if (err < 0) {
printf("Failed to copy codec parameters.\n");
return NULL;
}
/* Create Audio Stream for output */
AVStream *o_astream = avformat_new_stream(outputCtx, NULL);
if (!o_astream) {
printf("Failed to create new stream for output.\n");
return NULL;
}
o_streams[A_STREAM] = o_astream;
// Copy parameters to Audio Stream of output from input stream
int audio_stream_idx = av_find_best_stream(
aFmtCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
i_streams[A_STREAM] = aFmtCtx->streams[audio_stream_idx];
err = avcodec_parameters_copy(o_astream->codecpar, i_streams[A_STREAM]->codecpar);
if (err < 0) {
printf("Failed to copy codec parameters.\n");
return NULL;
}
err = avio_open(&outputCtx->pb, outputPath, AVIO_FLAG_WRITE);
if (err < 0) {
printf("Failed to open output file %s", audioPath);
return NULL;
}
AVDictionary *opts = NULL;
err = avformat_write_header(outputCtx, &opts);
if (err < 0) {
printf("Failed to write header to output file.\n");
return NULL;
}
int idx = FIRST_STREAM;
int fin[NB_STREAMS] = { 0, 0 };
AVPacket packet;
AVFormatContext *context = NULL;
AVFormatContext *contexts[2] = {
vFmtCtx,
aFmtCtx
};
while (1) {
context = contexts[idx];
// Write frame to output fmt
if (fin[idx] == 0) {
err = av_read_frame(context, &packet);
if (err < 0) {
fin[idx] = 1;
goto NEXT;
}
packet.stream_index = idx;
add_frame_to_fmt(outputCtx, i_streams[idx], outputCtx->streams[idx], &packet);
av_packet_unref(&packet);
}
NEXT:
if (fin[V_STREAM] == 1 &&
fin[A_STREAM] == 1) {
break;
}
idx = (idx + 1) % NB_STREAMS;
}
av_write_trailer(outputCtx);
avformat_close_input(&vFmtCtx);
avformat_close_input(&aFmtCtx);
avio_closep(&outputCtx->pb);
avformat_free_context(outputCtx);
return outputPath;
}
char* mergeAV(char *videoPath, char *audio_1_path, char *audio_2_path) {
int err = 0;
char *output_path = "./mixed_audio.mp3";
char *output_video = "./mergeAV.mp4";
char *argv[4] = {
"",
audio_1_path,
audio_2_path,
output_path,
};
err = audioMix(argv);
if (err != 0) {
return NULL;
}
return do_mergeAV(videoPath, output_path, output_video);
}
int main(int argc, char *argv[]) {
mergeAV(argv[1], argv[2], argv[3]);
}
#include <stdio.h>
#import "libavcodec/avcodec.h"
#include "libavutil/channel_layout.h"
#include "libavutil/md5.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include <libavformat/avformat.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/avfiltergraph.h>
#include "libavformat/avio.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/frame.h"
#include "libavutil/opt.h"
#define INPUT_SAMPLERATE 44100
#define INPUT_FORMAT AV_SAMPLE_FMT_S16
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_STEREO
/** The output bit rate in kbit/s */
#define OUTPUT_BIT_RATE 44100
/** The number of output channels */
#define OUTPUT_CHANNELS 2
/** The audio sample output format */
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
#define VOLUME_VAL 0.90
AVFormatContext *output_format_context = NULL;
AVCodecContext *output_codec_context = NULL;
AVFormatContext *input_format_context_0 = NULL;
AVCodecContext *input_codec_context_0 = NULL;
AVFormatContext *input_format_context_1 = NULL;
AVCodecContext *input_codec_context_1 = NULL;
AVFilterGraph *graph;
AVFilterContext *src0,*src1, *sink;
static char *const get_error_text(const int error)
{
static char error_buffer[255];
av_strerror(error, error_buffer, sizeof(error_buffer));
return error_buffer;
}
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src0, AVFilterContext **src1,
AVFilterContext **sink)
{
AVFilterGraph *filter_graph;
AVFilterContext *abuffer1_ctx;
AVFilter *abuffer1;
AVFilterContext *abuffer0_ctx;
AVFilter *abuffer0;
AVFilterContext *mix_ctx;
AVFilter *mix_filter;
AVFilterContext *abuffersink_ctx;
AVFilter *abuffersink;
char args[512];
int err;
/* Create a new filtergraph, which will contain all the filters. */
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
av_log(NULL, AV_LOG_ERROR, "Unable to create filter graph.\n");
return AVERROR(ENOMEM);
}
/****** abuffer 0 ********/
/* Create the abuffer filter;
* it will be used for feeding the data into the graph. */
abuffer0 = avfilter_get_by_name("abuffer");
if (!abuffer0) {
av_log(NULL, AV_LOG_ERROR, "Could not find the abuffer filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!input_codec_context_0->channel_layout)
input_codec_context_0->channel_layout = av_get_default_channel_layout(input_codec_context_0->channels);
snprintf(args, sizeof(args),
"sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
input_codec_context_0->sample_rate,
av_get_sample_fmt_name(input_codec_context_0->sample_fmt), input_codec_context_0->channel_layout);
err = avfilter_graph_create_filter(&abuffer0_ctx, abuffer0, "src0",
args, NULL, filter_graph);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
return err;
}
/****** abuffer 1 ******* */
/* Create the abuffer filter;
* it will be used for feeding the data into the graph. */
abuffer1 = avfilter_get_by_name("abuffer");
if (!abuffer1) {
av_log(NULL, AV_LOG_ERROR, "Could not find the abuffer filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!input_codec_context_1->channel_layout)
input_codec_context_1->channel_layout = av_get_default_channel_layout(input_codec_context_1->channels);
snprintf(args, sizeof(args),
"sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
input_codec_context_1->sample_rate,
av_get_sample_fmt_name(input_codec_context_1->sample_fmt), input_codec_context_1->channel_layout);
err = avfilter_graph_create_filter(&abuffer1_ctx, abuffer1, "src1",
args, NULL, filter_graph);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
return err;
}
/****** amix ******* */
/* Create mix filter. */
mix_filter = avfilter_get_by_name("amix");
if (!mix_filter) {
av_log(NULL, AV_LOG_ERROR, "Could not find the mix filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
snprintf(args, sizeof(args), "inputs=2");
err = avfilter_graph_create_filter(&mix_ctx, mix_filter, "amix",
args, NULL, filter_graph);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio amix filter\n");
return err;
}
/* Finally create the abuffersink filter;
* it will be used to get the filtered data out of the graph. */
abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
av_log(NULL, AV_LOG_ERROR, "Could not find the abuffersink filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
if (!abuffersink_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate the abuffersink instance.\n");
return AVERROR(ENOMEM);
}
/* Same sample fmts as the output file. */
err = av_opt_set_int_list(abuffersink_ctx, "sample_fmts",
((int[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }),
AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
uint8_t ch_layout[64];
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, OUTPUT_CHANNELS);
av_opt_set (abuffersink_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Could set options to the abuffersink instance.\n");
return err;
}
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not initialize the abuffersink instance.\n");
return err;
}
/* Connect the filters; */
err = avfilter_link(abuffer0_ctx, 0, mix_ctx, 0);
if (err >= 0)
err = avfilter_link(abuffer1_ctx, 0, mix_ctx, 1);
if (err >= 0)
err = avfilter_link(mix_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error connecting filters\n");
return err;
}
/* Configure the graph. */
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while configuring graph : %s\n", get_error_text(err));
return err;
}
char* dump =avfilter_graph_dump(filter_graph, NULL);
av_log(NULL, AV_LOG_ERROR, "Graph :\n%s\n", dump);
*graph = filter_graph;
*src0 = abuffer0_ctx;
*src1 = abuffer1_ctx;
*sink = abuffersink_ctx;
return 0;
}
/** Open an input file and the required decoder. */
static int open_input_file(const char *filename,
AVFormatContext **input_format_context,
AVCodecContext **input_codec_context)
{
AVCodec *input_codec;
int error;
/** Open the input file to read from it. */
if ((error = avformat_open_input(input_format_context, filename, NULL,
NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open input file '%s' (error '%s')\n",
filename, get_error_text(error));
*input_format_context = NULL;
return error;
}
/** Get information on the input file (number of streams etc.). */
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open find stream info (error '%s')\n",
get_error_text(error));
avformat_close_input(input_format_context);
return error;
}
/** Make sure that there is only one stream in the input file. */
if ((*input_format_context)->nb_streams != 1) {
av_log(NULL, AV_LOG_ERROR, "Expected one audio input stream, but found %d\n",
(*input_format_context)->nb_streams);
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** Find a decoder for the audio stream. */
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
av_log(NULL, AV_LOG_ERROR, "Could not find input codec\n");
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** Open the decoder for the audio stream to use it later. */
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
input_codec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open input codec (error '%s')\n",
get_error_text(error));
avformat_close_input(input_format_context);
return error;
}
/** Save the decoder context for easier access later. */
*input_codec_context = (*input_format_context)->streams[0]->codec;
return 0;
}
/**
* Open an output file and the required encoder.
* Also set some basic encoder parameters.
* Some of these parameters are based on the input file's parameters.
*/
static int open_output_file(const char *filename,
AVCodecContext *input_codec_context,
AVFormatContext **output_format_context,
AVCodecContext **output_codec_context)
{
AVIOContext *output_io_context = NULL;
AVStream *stream = NULL;
AVCodec *output_codec = NULL;
int error;
/** Open the output file to write to it. */
if ((error = avio_open(&output_io_context, filename,
AVIO_FLAG_WRITE)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s' (error '%s')\n",
filename, get_error_text(error));
return error;
}
/** Create a new format context for the output container format. */
if (!(*output_format_context = avformat_alloc_context())) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate output format context\n");
return AVERROR(ENOMEM);
}
/** Associate the output file (pointer) with the container format context. */
(*output_format_context)->pb = output_io_context;
/** Guess the desired container format based on the file extension. */
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
NULL))) {
av_log(NULL, AV_LOG_ERROR, "Could not find output file format\n");
goto cleanup;
}
av_strlcpy((*output_format_context)->filename, filename,
sizeof((*output_format_context)->filename));
/** Find the encoder to be used by its name. */
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_PCM_S16LE))) {
av_log(NULL, AV_LOG_ERROR, "Could not find an PCM encoder.\n");
goto cleanup;
}
/** Create a new audio stream in the output file container. */
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
av_log(NULL, AV_LOG_ERROR, "Could not create new stream\n");
error = AVERROR(ENOMEM);
goto cleanup;
}
/** Save the encoder context for easiert access later. */
*output_codec_context = stream->codec;
/**
* Set the basic encoder parameters.
*/
(*output_codec_context)->channels = OUTPUT_CHANNELS;
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
//(*output_codec_context)->bit_rate = input_codec_context->bit_rate;
av_log(NULL, AV_LOG_INFO, "output bitrate %d\n", (*output_codec_context)->bit_rate);
/**
* Some container formats (like MP4) require global headers to be present
* Mark the encoder so that it behaves accordingly.
*/
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
/** Open the encoder for the audio stream to use it later. */
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output codec (error '%s')\n",
get_error_text(error));
goto cleanup;
}
return 0;
cleanup:
avio_close((*output_format_context)->pb);
avformat_free_context(*output_format_context);
*output_format_context = NULL;
return error < 0 ? error : AVERROR_EXIT;
}
/** Initialize one audio frame for reading from the input file */
static int init_input_frame(AVFrame **frame)
{
if (!(*frame = av_frame_alloc())) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate input frame\n");
return AVERROR(ENOMEM);
}
return 0;
}
/** Initialize one data packet for reading or writing. */
static void init_packet(AVPacket *packet)
{
av_init_packet(packet);
/** Set the packet data and size so that it is recognized as being empty. */
packet->data = NULL;
packet->size = 0;
}
/** Decode one audio frame from the input file. */
static int decode_audio_frame(AVFrame *frame,
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
int *data_present, int *finished)
{
/** Packet used for temporary storage. */
AVPacket input_packet;
int error;
init_packet(&input_packet);
/** Read one audio frame from the input file into a temporary packet. */
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
/** If we are the the end of the file, flush the decoder below. */
if (error == AVERROR_EOF)
*finished = 1;
else {
av_log(NULL, AV_LOG_ERROR, "Could not read frame (error '%s')\n",
get_error_text(error));
return error;
}
}
/**
* Decode the audio frame stored in the temporary packet.
* The input audio stream decoder is used to do this.
* If we are at the end of the file, pass an empty packet to the decoder
* to flush it.
*/
if ((error = avcodec_decode_audio4(input_codec_context, frame,
data_present, &input_packet)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not decode frame (error '%s')\n",
get_error_text(error));
av_free_packet(&input_packet);
return error;
}
/**
* If the decoder has not been flushed completely, we are not finished,
* so that this function has to be called again.
*/
if (*finished && *data_present)
*finished = 0;
av_free_packet(&input_packet);
return 0;
}
/** Encode one frame worth of audio to the output file. */
static int encode_audio_frame(AVFrame *frame,
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context,
int *data_present)
{
/** Packet used for temporary storage. */
AVPacket output_packet;
int error;
init_packet(&output_packet);
/**
* Encode the audio frame and store it in the temporary packet.
* The output audio stream encoder is used to do this.
*/
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
frame, data_present)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not encode frame (error '%s')\n",
get_error_text(error));
av_free_packet(&output_packet);
return error;
}
/** Write one audio frame from the temporary packet to the output file. */
if (*data_present) {
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not write frame (error '%s')\n",
get_error_text(error));
av_free_packet(&output_packet);
return error;
}
av_free_packet(&output_packet);
}
return 0;
}
static int process_all(){
int ret = 0;
int data_present = 0;
int finished = 0;
int nb_inputs = 2;
AVFormatContext* input_format_contexts[2];
AVCodecContext* input_codec_contexts[2];
input_format_contexts[0] = input_format_context_0;
input_format_contexts[1] = input_format_context_1;
input_codec_contexts[0] = input_codec_context_0;
input_codec_contexts[1] = input_codec_context_1;
AVFilterContext* buffer_contexts[2];
buffer_contexts[0] = src0;
buffer_contexts[1] = src1;
int input_finished[2];
input_finished[0] = 0;
input_finished[1] = 0;
int input_to_read[2];
input_to_read[0] = 1;
input_to_read[1] = 1;
int total_samples[2];
total_samples[0] = 0;
total_samples[1] = 0;
int total_out_samples = 0;
int nb_finished = 0;
while (nb_finished < nb_inputs) {
int data_present_in_graph = 0;
for(int i = 0 ; i < nb_inputs ; i++){
if(input_finished[i] || input_to_read[i] == 0){
continue;
}
input_to_read[i] = 0;
AVFrame *frame = NULL;
if(init_input_frame(&frame) > 0){
goto end;
}
/** Decode one frame worth of audio samples. */
if ( (ret = decode_audio_frame(frame, input_format_contexts[i], input_codec_contexts[i], &data_present, &finished))){
goto end;
}
/**
* If we are at the end of the file and there are no more samples
* in the decoder which are delayed, we are actually finished.
* This must not be treated as an error.
*/
if (finished && !data_present) {
input_finished[i] = 1;
nb_finished++;
ret = 0;
av_log(NULL, AV_LOG_INFO, "Input n°%d finished. Write NULL frame \n", i);
ret = av_buffersrc_write_frame(buffer_contexts[i], NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error writing EOF null frame for input %d\n", i);
goto end;
}
}
else if (data_present) { /** If there is decoded data, convert and store it */
/* push the audio data from decoded frame into the filtergraph */
ret = av_buffersrc_write_frame(buffer_contexts[i], frame);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
goto end;
}
av_log(NULL, AV_LOG_INFO, "add %d samples on input %d (%d Hz, time=%f, ttime=%f)\n",
frame->nb_samples, i, input_codec_contexts[i]->sample_rate,
(double)frame->nb_samples / input_codec_contexts[i]->sample_rate,
(double)(total_samples[i] += frame->nb_samples) / input_codec_contexts[i]->sample_rate);
}
av_frame_free(&frame);
data_present_in_graph = data_present | data_present_in_graph;
}
if(data_present_in_graph){
AVFrame *filt_frame = av_frame_alloc();
/* pull filtered audio from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(sink, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
for(int i = 0 ; i < nb_inputs ; i++){
if(av_buffersrc_get_nb_failed_requests(buffer_contexts[i]) > 0){
input_to_read[i] = 1;
av_log(NULL, AV_LOG_INFO, "Need to read input %d\n", i);
}
}
break;
}
if (ret < 0)
goto end;
av_log(NULL, AV_LOG_INFO, "remove %d samples from sink (%d Hz, time=%f, ttime=%f)\n",
filt_frame->nb_samples, output_codec_context->sample_rate,
(double)filt_frame->nb_samples / output_codec_context->sample_rate,
(double)(total_out_samples += filt_frame->nb_samples) / output_codec_context->sample_rate);
//av_log(NULL, AV_LOG_INFO, "Data read from graph\n");
ret = encode_audio_frame(filt_frame, output_format_context, output_codec_context, &data_present);
if (ret < 0)
goto end;
av_frame_unref(filt_frame);
}
av_frame_free(&filt_frame);
} else {
av_log(NULL, AV_LOG_INFO, "No data in graph\n");
for(int i = 0 ; i < nb_inputs ; i++){
input_to_read[i] = 1;
}
}
}
return 0;
end:
// avcodec_close(input_codec_context);
// avformat_close_input(&input_format_context);
// av_frame_free(&frame);
// av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
exit(0);
}
/** Write the header of the output file container. */
static int write_output_file_header(AVFormatContext *output_format_context)
{
int error;
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not write output file header (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Write the trailer of the output file container. */
static int write_output_file_trailer(AVFormatContext *output_format_context)
{
int error;
if ((error = av_write_trailer(output_format_context)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not write output file trailer (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
int main(int argc, const char * argv[])
{
av_log_set_level(AV_LOG_VERBOSE);
int err;
av_register_all();
avfilter_register_all();
char* audio1Path = "audio10.wav";
if (open_input_file(audio1Path, &input_format_context_0, &input_codec_context_0) < 0){
av_log(NULL, AV_LOG_ERROR, "Error while opening file 1\n");
exit(1);
}
av_dump_format(input_format_context_0, 0, audio1Path, 0);
char* audio2Path = "audio20.wav";
if (open_input_file(audio2Path, &input_format_context_1, &input_codec_context_1) < 0){
av_log(NULL, AV_LOG_ERROR, "Error while opening file 2\n");
exit(1);
}
av_dump_format(input_format_context_1, 0, audio2Path, 0);
/* Set up the filtergraph. */
err = init_filter_graph(&graph, &src0, &src1, &sink);
printf("Init err = %d\n", err);
char* outputFile = "output.wav";
remove(outputFile);
av_log(NULL, AV_LOG_INFO, "Output file : %s\n", outputFile);
err = open_output_file(outputFile, input_codec_context_0, &output_format_context, &output_codec_context);
printf("open output file err : %d\n", err);
av_dump_format(output_format_context, 0, outputFile, 1);
if(write_output_file_header(output_format_context) < 0){
av_log(NULL, AV_LOG_ERROR, "Error while writing header outputfile\n");
exit(1);
}
process_all();
if(write_output_file_trailer(output_format_context) < 0){
av_log(NULL, AV_LOG_ERROR, "Error while writing header outputfile\n");
exit(1);
}
printf("FINISHED\n");
return 0;
}
File added
/*
* copyright (c) 2013 Andrew Kelley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libavfilter API usage example.
*
* @example filter_audio.c
* This example will generate a sine wave audio,
* pass it through a simple filter chain, and then compute the MD5 checksum of
* the output data.
*
* The filter chain it uses is:
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
*
* abuffer: This provides the endpoint where you can feed the decoded samples.
* volume: In this example we hardcode it to 0.90.
* aformat: This converts the samples to the samplefreq, channel layout,
* and sample format required by the audio device.
* abuffersink: This provides the endpoint where you can read the samples after
* they have passed through the filter chain.
*/
#include <inttypes.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <libavutil/channel_layout.h>
#include <libavutil/md5.h>
#include <libavutil/mem.h>
#include <libavutil/opt.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#define INPUT_SAMPLERATE 44100
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_STEREO
#define VOLUME_VAL 0.90
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
AVFilterContext **sink)
{
AVFilterGraph *filter_graph;
AVFilterContext *abuffer_ctx;
const AVFilter *abuffer;
AVFilterContext *volume_ctx;
const AVFilter *volume;
AVFilterContext *aformat_ctx;
const AVFilter *aformat;
AVFilterContext *abuffersink_ctx;
const AVFilter *abuffersink;
AVDictionary *options_dict = NULL;
uint8_t options_str[1024];
uint8_t ch_layout[64];
int err;
/* Create a new filtergraph, which will contain all the filters. */
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
fprintf(stderr, "Unable to create filter graph.\n");
return AVERROR(ENOMEM);
}
/* Create the abuffer filter;
* it will be used for feeding the data into the graph. */
abuffer = avfilter_get_by_name("abuffer");
if (!abuffer) {
fprintf(stderr, "Could not find the abuffer filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
if (!abuffer_ctx) {
fprintf(stderr, "Could not allocate the abuffer instance.\n");
return AVERROR(ENOMEM);
}
/* Set the filter options through the AVOptions API. */
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_ctx, "channels", 2, AV_OPT_SEARCH_CHILDREN);
/* Now initialize the filter; we pass NULL options, since we have already
* set all the options above. */
err = avfilter_init_str(abuffer_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffer filter.\n");
return err;
}
/* Create volume filter. */
volume = avfilter_get_by_name("volume");
if (!volume) {
fprintf(stderr, "Could not find the volume filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
if (!volume_ctx) {
fprintf(stderr, "Could not allocate the volume instance.\n");
return AVERROR(ENOMEM);
}
/* A different way of passing the options is as key/value pairs in a
* dictionary. */
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
err = avfilter_init_dict(volume_ctx, &options_dict);
av_dict_free(&options_dict);
if (err < 0) {
fprintf(stderr, "Could not initialize the volume filter.\n");
return err;
}
/* Create the aformat filter;
* it ensures that the output is of the format we want. */
aformat = avfilter_get_by_name("aformat");
if (!aformat) {
fprintf(stderr, "Could not find the aformat filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
if (!aformat_ctx) {
fprintf(stderr, "Could not allocate the aformat instance.\n");
return AVERROR(ENOMEM);
}
/* A third way of passing the options is in a string of the form
* key1=value1:key2=value2.... */
snprintf(options_str, sizeof(options_str),
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
(uint64_t)AV_CH_LAYOUT_STEREO);
err = avfilter_init_str(aformat_ctx, options_str);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
return err;
}
/* Finally create the abuffersink filter;
* it will be used to get the filtered data out of the graph. */
abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
fprintf(stderr, "Could not find the abuffersink filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
if (!abuffersink_ctx) {
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
return AVERROR(ENOMEM);
}
/* This filter takes no options. */
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
return err;
}
/* Connect the filters;
* in this simple case the filters just form a linear chain. */
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
if (err >= 0)
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
if (err >= 0)
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
fprintf(stderr, "Error connecting filters\n");
return err;
}
/* Configure the graph. */
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
return err;
}
*graph = filter_graph;
*src = abuffer_ctx;
*sink = abuffersink_ctx;
return 0;
}
/* Do something useful with the filtered data: this simple
* example just prints the MD5 checksum of each plane to stdout. */
static int process_output(struct AVMD5 *md5, AVFrame *frame)
{
int planar = av_sample_fmt_is_planar(frame->format);
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
int planes = planar ? channels : 1;
int bps = av_get_bytes_per_sample(frame->format);
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
int i, j;
for (i = 0; i < planes; i++) {
uint8_t checksum[16];
av_md5_init(md5);
av_md5_sum(checksum, frame->extended_data[i], plane_size);
fprintf(stdout, "plane %d: 0x", i);
for (j = 0; j < sizeof(checksum); j++)
fprintf(stdout, "%02X", checksum[j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
return 0;
}
/* Construct a frame of audio data to be filtered;
* this simple example just synthesizes a sine wave. */
static int get_input(AVFrame *frame, int frame_num)
{
int err, i, j;
#define FRAME_SIZE 1024
/* Set up the frame properties and allocate the buffer for the data. */
frame->sample_rate = INPUT_SAMPLERATE;
frame->format = INPUT_FORMAT;
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
frame->nb_samples = FRAME_SIZE;
frame->pts = frame_num * FRAME_SIZE;
err = av_frame_get_buffer(frame, 0);
if (err < 0)
return err;
/* Fill the data for each channel. */
for (i = 0; i < 5; i++) {
float *data = (float*)frame->extended_data[i];
for (j = 0; j < frame->nb_samples; j++)
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
}
return 0;
}
int main(int argc, char *argv[])
{
struct AVMD5 *md5;
AVFilterGraph *graph;
AVFilterContext *src, *sink;
AVFrame *frame;
uint8_t errstr[1024];
float duration;
int err, nb_frames, i;
if (argc < 2) {
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
return 1;
}
duration = atof(argv[1]);
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
if (nb_frames <= 0) {
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
return 1;
}
/* Allocate the frame we will be using to store the data. */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Error allocating the frame\n");
return 1;
}
md5 = av_md5_alloc();
if (!md5) {
fprintf(stderr, "Error allocating the MD5 context\n");
return 1;
}
/* Set up the filtergraph. */
err = init_filter_graph(&graph, &src, &sink);
if (err < 0) {
fprintf(stderr, "Unable to init filter graph:");
goto fail;
}
// Custom
AVFormatContext *inputFmt = NULL;
AVCodec *c;
AVCodecContext *cc;
err = avformat_open_input(&inputFmt, "./audio.mp3", NULL, NULL);
if (!inputFmt) {
printf("Failed to open media content\n");
return 1;
}
err = avformat_find_stream_info(inputFmt, NULL);
if (err < 0) {
printf("Failed to find input stream infor\n");
return 1;
}
c = avcodec_find_decoder(inputFmt->streams[0]->codecpar->codec_id);
if (!c) {
printf("Unable to find decoder");
return 1;
}
cc = avcodec_alloc_context3(c);
if (!cc) {
printf("Unable to alloc memory for codec context\n");
return 1;
}
if (avcodec_parameters_to_context(
cc, inputFmt->streams[0]->codecpar) < 0) {
printf("Unable to fill parameters to context\n");
return 1;
}
if (avcodec_open2(cc, c, NULL) < 0) {
printf("Failed to open codec\n");
return 1;
}
int idx = 0;
int ret;
AVPacket packet;
/* the main filtering loop */
while (1) {
/* get an input frame to be filtered */
// err = get_input(frame, i);
ret = av_read_frame(inputFmt, &packet);
if (ret < 0)
break;
ret = avcodec_send_packet(cc, &packet);
if (ret < 0) {
printf("Failed to decode packet\n");
return 1;
}
while (ret >= 0) {
ret = avcodec_receive_frame(cc, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
printf("Error while receiving frame from decoder: %s",
av_err2str(ret));
return 1;
}
}
frame->sample_rate = INPUT_SAMPLERATE;
frame->format = INPUT_FORMAT;
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
frame->nb_samples = FRAME_SIZE;
frame->pts = idx++ * FRAME_SIZE;
frame->channels = 2;
if (err < 0) {
fprintf(stderr, "Error generating input frame:");
goto fail;
}
/* Send the frame to the input of the filtergraph. */
err = av_buffersrc_add_frame(src, frame);
if (err < 0) {
av_frame_unref(frame);
fprintf(stderr, "Error submitting the frame to the filtergraph:");
goto fail;
}
/* Get all the filtered output that is available. */
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
/* now do something with our filtered frame */
err = process_output(md5, frame);
if (err < 0) {
fprintf(stderr, "Error processing the filtered frame:");
goto fail;
}
av_frame_unref(frame);
}
if (err == AVERROR(EAGAIN)) {
/* Need to feed more frames in. */
continue;
} else if (err == AVERROR_EOF) {
/* Nothing more to do, finish. */
break;
} else if (err < 0) {
/* An error occurred. */
fprintf(stderr, "Error filtering the data:");
goto fail;
}
}
avfilter_graph_free(&graph);
av_frame_free(&frame);
av_freep(&md5);
return 0;
fail:
av_strerror(err, errstr, sizeof(errstr));
fprintf(stderr, "%s\n", errstr);
return 1;
}
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Clément Bœsch
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for audio decoding and filtering
* @example filtering_audio.c
*/
#include <unistd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int audio_stream_index = -1;
static int open_input_file(const char *filename)
{
const AVCodec *dec;
int ret;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
return ret;
}
audio_stream_index = ret;
/* create decoding context */
dec_ctx = avcodec_alloc_context3(dec);
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
const AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
static const int out_sample_rates[] = { 8000, -1 };
const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
time_base.num, time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
/* buffer audio sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
/*
* Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr.
*/
/*
* The buffer source output must be connected to the input pad of
* the first filter described by filters_descr; since the first
* filter input label is not specified, it is set to "in" by
* default.
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
/*
* The buffer sink input must be connected to the output pad of
* the last filter described by filters_descr; since the last
* filter output label is not specified, it is set to "out" by
* default.
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
outlink = buffersink_ctx->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
(int)outlink->sample_rate,
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
args);
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static void print_frame(const AVFrame *frame)
{
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
const uint16_t *p = (uint16_t*)frame->data[0];
const uint16_t *p_end = p + n;
while (p < p_end) {
fputc(*p & 0xff, stdout);
fputc(*p>>8 & 0xff, stdout);
p++;
}
fflush(stdout);
}
int main(int argc, char **argv)
{
int ret;
AVPacket *packet = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
if (!packet || !frame || !filt_frame) {
fprintf(stderr, "Could not allocate frame or packet\n");
exit(1);
}
if (argc != 2) {
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
exit(1);
}
//if ((ret = open_input_file(argv[1])) < 0)
// goto end;
//if ((ret = init_filters(filter_descr)) < 0)
// goto end;
//
//
//
const AVCodec *dec;
printf("%s\n", argv[1]);
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
return ret;
}
audio_stream_index = ret;
/* create decoding context */
dec_ctx = avcodec_alloc_context3(dec);
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
/* read all packets */
while (1) {
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
break;
if (packet->stream_index == audio_stream_index) {
ret = avcodec_send_packet(dec_ctx, packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
break;
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
goto end;
}
printf("PTS: %d, DTS: %d, DUR: %d, TB: %d/%d, sample_fmt: %d, sample_rate: %d, Channel_layout: %d\n",
frame->pts, frame->pkt_dts, frame->pkt_duration,
fmt_ctx->streams[0]->time_base.num,
fmt_ctx->streams[0]->time_base.den,
frame->format,
frame->sample_rate,
frame->channel_layout);
#if 0
if (ret >= 0) {
/* push the audio data from decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
break;
}
/* pull filtered audio from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
print_frame(filt_frame);
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
#endif
}
}
av_packet_unref(packet);
}
end:
avfilter_graph_free(&filter_graph);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_packet_free(&packet);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
exit(0);
}
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
int main(int argc, char *argv[]) {
int ret = 0;
AVFormatContext *fmt_ctx = NULL;
AVCodec *dec = NULL;
AVCodecContext *dec_ctx = NULL;
AVPacket *packet = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
return ret;
}
int audio_stream_index = ret;
/* create decoding context */
dec_ctx = avcodec_alloc_context3(dec);
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
/* read all packets */
while (1) {
ret = av_read_frame(fmt_ctx, packet);
if (ret < 0)
break;
printf("PTS: %u, DTS: %u, DUR: %d, TB: %d/%d, sample_fmt: %d, sample_rate: %d, Channel_layout: %d\n",
packet->pts, packet->dts, packet->duration,
fmt_ctx->streams[0]->time_base.num,
fmt_ctx->streams[0]->time_base.den,
frame->format,
frame->sample_rate,
frame->channel_layout);
if (packet->stream_index == audio_stream_index) {
ret = avcodec_send_packet(dec_ctx, packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
break;
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
return 0;
}
}
}
av_packet_unref(packet);
}
#if 0
while (1) {
ret = av_read_frame(fmt_ctx, packet);
if (ret < 0)
break;
if (packet->stream_index == audio_stream_index) {
ret = avcodec_send_packet(dec_ctx, packet);
if (ret < 0) {
printf("Error while sendign a packet to the decoder\n");
break;
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
printf("Error\n");
return 1;
}
}
printf("PTS: %d, DTS: %d, DUR: %d, TB: %d/%d, sample_fmt: %d, sample_rate: %d, Channel_layout: %d\n",
frame->pts, frame->pkt_dts, frame->pkt_duration,
fmt_ctx->streams[0]->time_base.num,
fmt_ctx->streams[0]->time_base.den,
frame->format,
frame->sample_rate,
frame->channel_layout);
}
av_packet_unref(packet);
}
#endif
}
......@@ -5,6 +5,7 @@ typedef enum {
BEGIN_STREAM = 0,
V_STREAM = 0,
A_STREAM,
A_STREAM_1,
MAX_STREAM
} STREAM_IDX;
......@@ -20,6 +21,7 @@ int main(int argc, char *argv[]) {
char *fileName = "./demo2.mp4";
// This is a packing file
char *audioFile = "./audio.mp3";
char *audioFile_1 = "./audio_1.mp3";
char *outFileName = "./demo3.mp4";
// Number of seconds to shift audio away
......@@ -33,7 +35,8 @@ int main(int argc, char *argv[]) {
AVFormatContext *inputFormatContext = NULL,
*outputFormatContext = NULL,
*audioFormatContext = NULL;
*audioFormatContext = NULL,
*audioFormatContext_1 = NULL;
// Open InputFormatContext of demo2.mp4
ret = avformat_open_input(&inputFormatContext, fileName, NULL, NULL);
......@@ -55,7 +58,19 @@ int main(int argc, char *argv[]) {
}
ret = avformat_find_stream_info(audioFormatContext, NULL);
if (ret < 0) {
printf("Failed to find stream info");
printf("Failed to find stream info\n");
return 0;
}
ret = avformat_open_input(&audioFormatContext_1, audioFile_1, NULL, NULL);
if (ret < 0) {
printf("Failed to open Media_1\n");
return 0;
}
ret = avformat_find_stream_info(audioFormatContext_1, NULL);
if (ret < 0) {
printf("Failed to find stream info\n");
return 0;
}
// Check Precondition
......@@ -69,6 +84,11 @@ int main(int argc, char *argv[]) {
return 0;
}
if (audioFormatContext->nb_streams > 1) {
printf("./audio_1.mp3 has more than one streams\n");
return 0;
}
// Alloc OutputFormatContext
avformat_alloc_output_context2(
&outputFormatContext, NULL, NULL, outFileName);
......@@ -77,13 +97,14 @@ int main(int argc, char *argv[]) {
return 0;
}
int ostreams_idx[MAX_STREAM] = { -1, -1 };
int ostreams_idx[MAX_STREAM] = { -1, -1, -1 };
AVStream* streams[MAX_STREAM];
AVStream *in_stream, *out_stream;
AVCodecParameters *in_codecpar;
streams[V_STREAM] = inputFormatContext->streams[0];
streams[A_STREAM] = audioFormatContext->streams[0];
streams[A_STREAM_1] = audioFormatContext_1->streams[0];
// Create Video Stream and Audio Stream
// for OutputFormtContext.
......@@ -129,7 +150,11 @@ int main(int argc, char *argv[]) {
uint8_t idx = V_STREAM;
AVFormatContext *contexts[MAX_STREAM] = { inputFormatContext, audioFormatContext };
AVFormatContext *contexts[MAX_STREAM] = {
inputFormatContext,
audioFormatContext,
audioFormatContext_1,
};
AVFormatContext *context;
int audioFrameCount = 0;
......@@ -153,16 +178,34 @@ int main(int argc, char *argv[]) {
if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
nbFrames == -1) {
nbFrames = numFramesToMerged(streams[V_STREAM], streams[A_STREAM],
nbFrames = numFramesToMerged(streams[idx], streams[idx],
packet.duration, shift);
}
packet_rescale(&packet, in_stream, out_stream, idx);
switch (idx) {
case V_STREAM:
printf("V-STREAM:\n");
break;
case A_STREAM:
printf("A-STREAM:\n");
break;
case A_STREAM_1:
printf("A_STREAM_1:\n");
break;
}
printf("P: %d, D: %d, Dur: %d, TB: %d/%d\n",
packet.pts, packet.dts, packet.duration,
out_stream->time_base.num,
out_stream->time_base.den);
// Do audio shift
if (nbFrames > 0 && in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
packet.pts += temporal_shift;
packet.dts += temporal_shift;
audioFrameCount++;
}
......@@ -184,6 +227,8 @@ int main(int argc, char *argv[]) {
av_write_trailer(outputFormatContext);
avformat_close_input(&inputFormatContext);
avformat_close_input(&audioFormatContext);
avformat_close_input(&audioFormatContext_1);
avio_closep(&outputFormatContext->pb);
avformat_free_context(outputFormatContext);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment