Commit 1e68d12b authored by NzSN's avatar NzSN

update

parent de41646e
...@@ -7,15 +7,12 @@ ...@@ -7,15 +7,12 @@
#include <libavfilter/buffersink.h> #include <libavfilter/buffersink.h>
#include <libavutil/opt.h> #include <libavutil/opt.h>
#define INPUT_FMT AV_SAMPLE_FMT_FLTP /*****************************************************************************/
#define INPUT_SAMPLE_RATE 44100 /* Declarations */
#define INPUT_TB_DEN 14112000 /*****************************************************************************/
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_STEREO
uint8_t errstr[1024];
typedef struct MixParameters { typedef struct MixParameters {
unsigned int index;
AVCodecContext *cc; AVCodecContext *cc;
AVStream *s; AVStream *s;
unsigned offset; unsigned offset;
...@@ -32,7 +29,7 @@ typedef struct AudioStreamingContext { ...@@ -32,7 +29,7 @@ typedef struct AudioStreamingContext {
typedef struct FilteringContext { typedef struct FilteringContext {
AVFilterGraph *graph; AVFilterGraph *graph;
AVFilterContext *src; AVFilterContext **src;
AVFilterContext *sink; AVFilterContext *sink;
unsigned int numOfSrcs; unsigned int numOfSrcs;
} FilteringContext; } FilteringContext;
...@@ -59,6 +56,7 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int num ...@@ -59,6 +56,7 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int num
int mixing_audios(int numOfStream, AudioStreamingContext* inputs[], int mixing_audios(int numOfStream, AudioStreamingContext* inputs[],
AudioStreamingContext *output, AudioStreamingContext *output,
FilteringContext* filter); FilteringContext* filter);
int write_out(AudioStreamingContext *encoder, AVFrame *frame);
int prepare_audio_encoder(AudioStreamingContext *encoder, int prepare_audio_encoder(AudioStreamingContext *encoder,
AudioStreamingContext *decoder); AudioStreamingContext *decoder);
...@@ -70,15 +68,42 @@ int add_to_src(AudioStreamingContext *decoder, AVFilterContext *src, ...@@ -70,15 +68,42 @@ int add_to_src(AudioStreamingContext *decoder, AVFilterContext *src,
int process_filtered_frames(AudioStreamingContext *encoder, AVFrame *frame); int process_filtered_frames(AudioStreamingContext *encoder, AVFrame *frame);
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
AVFilterContext **src_,
AVFilterContext **sink,
AudioStreamingContext *decoder_1,
AudioStreamingContext *decoder_2);
/*****************************************************************************/
/* Definitions */
/*****************************************************************************/
void audioStreamCtx_Release(AudioStreamingContext **s_ctx) {
int err = 0;
AudioStreamingContext *ctx = *s_ctx;
// Format Cleanup
avformat_close_input(&ctx->fmt);
// CoDec context Cleanup
avcodec_free_context(&ctx->cc);
avcodec_close(ctx->cc);
}
void audioStreamCtx_Release_free(AudioStreamingContext **s_ctx) {
audioStreamCtx_Release(s_ctx);
free(*s_ctx);
*s_ctx = NULL;
}
void filterCtx_Release(FilteringContext **f_ctx) {
FilteringContext *ctx = *f_ctx;
avfilter_graph_free(&ctx->graph);
free(ctx->src);
free(*f_ctx);
*f_ctx = NULL;
}
int audioMixArbitary(int numOfAudios, AudioFile *files[], AudioFile *o_file) { int audioMixArbitary(int numOfAudios, AudioFile *files[], AudioFile *o_file) {
int err = 0;
AudioStreamingContext *output_ctx; AudioStreamingContext *output_ctx;
AudioStreamingContext **audio_ctx = AudioStreamingContext **audio_ctx =
(AudioStreamingContext**)calloc(1, numOfAudios*sizeof(AudioStreamingContext)); (AudioStreamingContext**)calloc(1, numOfAudios*sizeof(AudioStreamingContext));
...@@ -112,21 +137,150 @@ int audioMixArbitary(int numOfAudios, AudioFile *files[], AudioFile *o_file) { ...@@ -112,21 +137,150 @@ int audioMixArbitary(int numOfAudios, AudioFile *files[], AudioFile *o_file) {
goto FAIL; goto FAIL;
} }
return 0; goto CLEANUP;
FAIL: FAIL:
/* FIXME: Need to release all allocated resources */ err = 1;
CLEANUP:
audioStreamCtx_Release_free(&output_ctx);
for (int i = 0; i < numOfAudios; ++i) {
audioStreamCtx_Release_free(&audio_ctx[i]);
}
free(audio_ctx);
filterCtx_Release(&filter_ctx);
return err;
}
int mixing_audios(int numOfStream, AudioStreamingContext* inputs[],
AudioStreamingContext *output,
FilteringContext* filter) {
int err = 0;
AVPacket *packet = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
AudioStreamingContext *asctx = NULL;
int finCount = 0;
_Bool *finished = (_Bool*)calloc(numOfStream, sizeof(_Bool));
while (1) {
// Read frame from AudioStreams
for (int i = 0; i < numOfStream; ++i) {
// All frame of this stream is readed.
if (finished[i] == 1)
continue;
asctx = inputs[i];
err = av_read_frame(asctx->fmt, packet);
if (err < 0) {
finished[i] = 1;
++finCount;
err = 0;
} else {
err = avcodec_send_packet(asctx->cc, packet);
if (err < 0 && err != AVERROR(EAGAIN)) {
printf("Failed to send packet to decoder: %s\n",
av_err2str(err));
goto Done;
}
av_packet_unref(packet);
}
// Send an empty frame to notify to filter that this stream
// has no more frames.
if (finished[i] == 1) {
err = av_buffersrc_add_frame(filter->src[i], NULL);
if (err < 0) {
printf("Failed to add frame to filter\n");
goto Done;
}
} else {
while (err >= 0) {
err = avcodec_receive_frame(asctx->cc, frame);
if (err == AVERROR(EAGAIN) || err == AVERROR_EOF) {
break;
} else if (err < 0) {
printf("Failed to decode frames: %s\n", av_err2str(err));
goto Done;
}
err = av_buffersrc_add_frame(filter->src[i], frame);
if (err < 0) {
printf("Failed to filter frame\n");
goto Done;
}
av_frame_unref(frame);
}
}
}
if (finCount == numOfStream) {
err = 0;
write_out(output, NULL);
break;
}
// Get filtered frame
while ((err = av_buffersink_get_frame(filter->sink, frame)) >= 0) {
write_out(output, frame);
av_frame_unref(frame);
}
}
av_write_trailer(output->fmt);
Done:
free(finished);
av_packet_free(&packet);
av_frame_free(&frame);
return err;
}
int write_out(AudioStreamingContext *encoder, AVFrame *frame) {
int response = 0;
AVPacket *output_packet = av_packet_alloc();
response = avcodec_send_frame(encoder->cc, frame);
while (response >= 0) {
response = avcodec_receive_packet(encoder->cc, output_packet);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
break;
} else if (response < 0) {
printf("Error while receiving packet from encoder: %s", av_err2str(response));
return 1;
}
output_packet->stream_index = 0;
response = av_interleaved_write_frame(encoder->fmt, output_packet);
if (response != 0) {
printf("Error %d while receiving packet from decoder: %s",
response, av_err2str(response));
return 1; return 1;
}
}
av_packet_unref(output_packet);
av_packet_free(&output_packet);
return 0;
} }
/* This function create a filter chain for each of input audio medias. /* This function create a filter chain for each of input audio medias.
* Filter chain looks like: * Filter chain looks like:
* abuffer -> asetpts -> amix -> abuffersink * abuffer -> adelay -> amix -> abuffersink
* abuffer -> asetpts -----\ * abuffer -> adelay -----\
* ........................\ * ........................\
* abuffer -> asetpts -----\ * abuffer -> adelay -----\
* *
* "abuffer -> asetpts" is called shift part of filter graph * "abuffer -> adelay" is called shift part of filter graph
* "amix -> abuffersink" is called mix part of filter graph */ * "amix -> abuffersink" is called mix part of filter graph */
FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext *s_ctx[]) { FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext *s_ctx[]) {
int err = 0; int err = 0;
...@@ -147,12 +301,14 @@ FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext * ...@@ -147,12 +301,14 @@ FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext *
FilterChain *shift_chains = (FilterChain*)malloc( FilterChain *shift_chains = (FilterChain*)malloc(
numOfStream * sizeof(FilterChain)); numOfStream * sizeof(FilterChain));
if (shift_chains->src->nb_inputs < numOfStream) if (mix_chain.src->nb_inputs < (unsigned int)numOfStream)
return NULL; return NULL;
for (int i = 0; i < numOfStream; ++i) { for (int i = 0; i < numOfStream; ++i) {
// Create filter chain for shifting purposes for // Create filter chain for shifting purposes for
// each of all input streams // each of all input streams
s_ctx[i]->mixpar.cc = s_ctx[i]->cc;
s_ctx[i]->mixpar.s = s_ctx[i]->s;
chain = build_filter_chain_shift_part(graph, s_ctx[i]->mixpar); chain = build_filter_chain_shift_part(graph, s_ctx[i]->mixpar);
shift_chains[i] = chain; shift_chains[i] = chain;
...@@ -162,16 +318,31 @@ FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext * ...@@ -162,16 +318,31 @@ FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext *
} }
} }
// Configure AVFilterGraph
err = avfilter_graph_config(graph, NULL);
if (err < 0) {
goto FAIL;
}
char *dump = avfilter_graph_dump(graph, NULL);
av_log(NULL, AV_LOG_ERROR, "Graph:\n%s\n", dump);
free(dump);
FilteringContext *f_ctx = (FilteringContext*)malloc( FilteringContext *f_ctx = (FilteringContext*)malloc(
sizeof(FilteringContext)); sizeof(FilteringContext));
f_ctx->graph = graph; f_ctx->graph = graph;
f_ctx->sink = mix_chain.sink; f_ctx->sink = mix_chain.sink;
f_ctx->src = (AVFilterContext**)malloc(numOfStream*sizeof(AVFilterContext*));
for (int i = 0; i < numOfStream; ++i) { for (int i = 0; i < numOfStream; ++i) {
f_ctx->src = shift_chains[i].src; f_ctx->src[i] = shift_chains[i].src;
} }
free(shift_chains);
return f_ctx;
FAIL: FAIL:
free(shift_chains);
if (graph) if (graph)
avfilter_graph_free(&graph); avfilter_graph_free(&graph);
return NULL; return NULL;
...@@ -181,7 +352,9 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams) ...@@ -181,7 +352,9 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams)
int err = 0; int err = 0;
AVFilterContext *amix_ctx = NULL; AVFilterContext *amix_ctx = NULL;
AVFilterContext *abuffersink_ctx = NULL; AVFilterContext *abuffersink_ctx = NULL;
AVFilterContext *asetnsamples_ctx = NULL;
const AVFilter *amix = NULL; const AVFilter *amix = NULL;
const AVFilter *asetnsamples = NULL;
const AVFilter *abuffersink = NULL; const AVFilter *abuffersink = NULL;
/* Create amix filter */ /* Create amix filter */
...@@ -193,7 +366,8 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams) ...@@ -193,7 +366,8 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams)
if (amix_ctx == NULL) { if (amix_ctx == NULL) {
goto FAIL; goto FAIL;
} }
av_opt_set_int(amix_ctx, "input", numOfStreams, AV_OPT_SEARCH_CHILDREN); av_opt_set_int(amix_ctx, "inputs", numOfStreams, AV_OPT_SEARCH_CHILDREN);
av_opt_set(amix_ctx, "duration", "longest", AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(amix_ctx, NULL); err = avfilter_init_str(amix_ctx, NULL);
if (err < 0) { if (err < 0) {
...@@ -215,11 +389,30 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams) ...@@ -215,11 +389,30 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams)
goto FAIL; goto FAIL;
} }
/* Create asetnsamples filter */
asetnsamples = avfilter_get_by_name("asetnsamples");
if (asetnsamples == NULL) {
goto FAIL;
}
asetnsamples_ctx = avfilter_graph_alloc_filter(
graph, asetnsamples, "asetnsamples_out");
if (asetnsamples_ctx == NULL) {
goto FAIL;
}
av_opt_set_int(asetnsamples_ctx, "n", 1152, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(asetnsamples_ctx, NULL);
if (err < 0) {
goto FAIL;
}
// Link amix with abuffersink // Link amix with abuffersink
err = avfilter_link(amix_ctx, 0, abuffersink_ctx, 0); err = avfilter_link(amix_ctx, 0, asetnsamples_ctx, 0);
if (err != 0) { if (err != 0) {
goto FAIL; goto FAIL;
} }
err = avfilter_link(asetnsamples_ctx, 0, abuffersink_ctx, 0);
return (FilterChain){amix_ctx, abuffersink_ctx}; return (FilterChain){amix_ctx, abuffersink_ctx};
...@@ -234,12 +427,13 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams) ...@@ -234,12 +427,13 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams)
FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters param) { FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters param) {
int err = 0; int err = 0;
char ch_layout[64] = {0}; char ch_layout[64] = {0};
char filter_name[64] = {0};
const AVFilter *abuffer = NULL; const AVFilter *abuffer = NULL;
AVFilterContext *abuffer_ctx = NULL; AVFilterContext *abuffer_ctx = NULL;
const AVFilter *asetpts = NULL; const AVFilter *adelay = NULL;
AVFilterContext *asetpts_ctx = NULL; AVFilterContext *adelay_ctx = NULL;
AVRational time_base = param.cc->time_base; AVRational time_base = param.cc->time_base;
...@@ -248,13 +442,15 @@ FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters pa ...@@ -248,13 +442,15 @@ FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters pa
if (abuffer == NULL) { if (abuffer == NULL) {
goto FAIL; goto FAIL;
} }
abuffer_ctx = avfilter_graph_alloc_filter(graph, abuffer, "abuffer");
sprintf(filter_name, "abuffer_%d", param.index);
abuffer_ctx = avfilter_graph_alloc_filter(graph, abuffer, filter_name);
if (abuffer_ctx == NULL) { if (abuffer_ctx == NULL) {
goto FAIL; goto FAIL;
} }
av_get_channel_layout_string( av_get_channel_layout_string(
(char*)ch_layout, sizeof(ch_layout), 0, AV_OPT_SEARCH_CHILDREN); (char*)ch_layout, sizeof(ch_layout), 0, param.cc->channel_layout);
av_opt_set(abuffer_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN); av_opt_set(abuffer_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set(abuffer_ctx, "sample_fmt", av_opt_set(abuffer_ctx, "sample_fmt",
av_get_sample_fmt_name(param.cc->sample_fmt), av_get_sample_fmt_name(param.cc->sample_fmt),
...@@ -265,43 +461,43 @@ FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters pa ...@@ -265,43 +461,43 @@ FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters pa
av_opt_set_int(abuffer_ctx, "sample_rate", param.cc->sample_rate, AV_OPT_SEARCH_CHILDREN); av_opt_set_int(abuffer_ctx, "sample_rate", param.cc->sample_rate, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(abuffer_ctx, NULL); err = avfilter_init_str(abuffer_ctx, NULL);
if (err < 0) { if (err < 0) {;
goto FAIL; goto FAIL;
} }
// Create asetpts filter // Create adelay filter
asetpts = avfilter_get_by_name("asetpts"); adelay = avfilter_get_by_name("adelay");
if (asetpts == NULL) { if (adelay == NULL) {
goto FAIL; goto FAIL;
} }
asetpts_ctx = avfilter_graph_alloc_filter(graph, asetpts, "asetpts"); sprintf(filter_name, "adelay_%d", param.index);
if (asetpts_ctx == NULL) { adelay_ctx = avfilter_graph_alloc_filter(graph, adelay, filter_name);
if (adelay_ctx == NULL) {
goto FAIL; goto FAIL;
} }
// Construct expr argument of asetpts char delays[32] = {0};
char expr[128] = { 0 }; unsigned int offset = param.offset*1000;
sprintf(expr, "PTS+%u/TB", param.offset); sprintf(delays, "%d|%d", offset, offset);
av_opt_set(adelay_ctx, "delays", delays, AV_OPT_SEARCH_CHILDREN);
av_opt_set(asetpts_ctx, "expr", expr, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(asetpts_ctx, NULL); err = avfilter_init_str(adelay_ctx, NULL);
if (err < 0) { if (err < 0) {
goto FAIL; goto FAIL;
} }
err = avfilter_link(abuffer_ctx, 0, asetpts_ctx, 0); err = avfilter_link(abuffer_ctx, 0, adelay_ctx, 0);
if (err < 0) { if (err < 0) {
goto FAIL; goto FAIL;
} }
return (FilterChain){ abuffer_ctx, asetpts_ctx }; return (FilterChain){ abuffer_ctx, adelay_ctx };
FAIL: FAIL:
if (abuffer_ctx) if (abuffer_ctx)
avfilter_free(abuffer_ctx); avfilter_free(abuffer_ctx);
if (asetpts_ctx) if (adelay_ctx)
avfilter_free(asetpts_ctx); avfilter_free(adelay_ctx);
return (FilterChain){NULL, NULL}; return (FilterChain){NULL, NULL};
} }
...@@ -356,6 +552,14 @@ AudioStreamingContext* prepare_output_streaming_context( ...@@ -356,6 +552,14 @@ AudioStreamingContext* prepare_output_streaming_context(
} }
avcodec_parameters_from_context(stream->codecpar, codec_ctx); avcodec_parameters_from_context(stream->codecpar, codec_ctx);
if (avio_open(&fmt_ctx->pb, file->filepath, AVIO_FLAG_WRITE) < 0) {
goto FAIL;
}
AVDictionary *opts = NULL;
if (avformat_write_header(fmt_ctx, NULL) < 0) {
goto FAIL;
}
AudioStreamingContext *o_streaming_ctx = (AudioStreamingContext*)malloc( AudioStreamingContext *o_streaming_ctx = (AudioStreamingContext*)malloc(
sizeof(AudioStreamingContext)); sizeof(AudioStreamingContext));
o_streaming_ctx->c = codec; o_streaming_ctx->c = codec;
...@@ -440,6 +644,39 @@ AudioStreamingContext* open_audio_file(AudioFile *files) { ...@@ -440,6 +644,39 @@ AudioStreamingContext* open_audio_file(AudioFile *files) {
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
AudioFile i_file_1 = {
argv[1],
(MixParameters){
0, NULL, NULL, 10
}
};
AudioFile i_file_2 = {
argv[2],
(MixParameters){
1, NULL, NULL, 10
}
};
AudioFile i_file_3 = {
argv[3],
(MixParameters){
2, NULL, NULL, 5
}
};
AudioFile o_file = {
argv[4],
(MixParameters){
0, NULL, NULL, 0
}
};
AudioFile *i_files[3] = {
&i_file_1,
&i_file_2,
&i_file_3
};
audioMixArbitary(3, i_files, &o_file);
return 0; return 0;
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment