Commit de41646e authored by NzSN's avatar NzSN

Remove trivial codes

parent 2f065cc2
Pipeline #16293 failed with stages
...@@ -16,7 +16,9 @@ ...@@ -16,7 +16,9 @@
uint8_t errstr[1024]; uint8_t errstr[1024];
typedef struct MixParameters { typedef struct MixParameters {
AVCodecContext *cc;
AVStream *s;
unsigned offset;
} MixParameters; } MixParameters;
typedef struct AudioStreamingContext { typedef struct AudioStreamingContext {
...@@ -31,8 +33,8 @@ typedef struct AudioStreamingContext { ...@@ -31,8 +33,8 @@ typedef struct AudioStreamingContext {
typedef struct FilteringContext { typedef struct FilteringContext {
AVFilterGraph *graph; AVFilterGraph *graph;
AVFilterContext *src; AVFilterContext *src;
AVFilterContext *src_;
AVFilterContext *sink; AVFilterContext *sink;
unsigned int numOfSrcs;
} FilteringContext; } FilteringContext;
typedef struct FilterChain { typedef struct FilterChain {
...@@ -52,7 +54,7 @@ typedef struct AudioFile { ...@@ -52,7 +54,7 @@ typedef struct AudioFile {
AudioStreamingContext* open_audio_file(AudioFile*); AudioStreamingContext* open_audio_file(AudioFile*);
AudioStreamingContext* prepare_output_streaming_context(AudioFile *file, AudioStreamingContext*); AudioStreamingContext* prepare_output_streaming_context(AudioFile *file, AudioStreamingContext*);
FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext*[]); FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext*[]);
FilterChain build_filter_chain_shift_part(MixParameters param); FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters param);
FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStream); FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStream);
int mixing_audios(int numOfStream, AudioStreamingContext* inputs[], int mixing_audios(int numOfStream, AudioStreamingContext* inputs[],
AudioStreamingContext *output, AudioStreamingContext *output,
...@@ -77,7 +79,6 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, ...@@ -77,7 +79,6 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
int audioMixArbitary(int numOfAudios, AudioFile *files[], AudioFile *o_file) { int audioMixArbitary(int numOfAudios, AudioFile *files[], AudioFile *o_file) {
int ret = 0;
AudioStreamingContext *output_ctx; AudioStreamingContext *output_ctx;
AudioStreamingContext **audio_ctx = AudioStreamingContext **audio_ctx =
(AudioStreamingContext**)calloc(1, numOfAudios*sizeof(AudioStreamingContext)); (AudioStreamingContext**)calloc(1, numOfAudios*sizeof(AudioStreamingContext));
...@@ -146,19 +147,30 @@ FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext * ...@@ -146,19 +147,30 @@ FilteringContext* create_filter_context(int numOfStream, AudioStreamingContext *
FilterChain *shift_chains = (FilterChain*)malloc( FilterChain *shift_chains = (FilterChain*)malloc(
numOfStream * sizeof(FilterChain)); numOfStream * sizeof(FilterChain));
if (shift_chains->src->nb_inputs < numOfStream)
return NULL;
for (int i = 0; i < numOfStream; ++i) { for (int i = 0; i < numOfStream; ++i) {
// Create filter chain for shifting purposes for // Create filter chain for shifting purposes for
// each of all input streams // each of all input streams
chain = build_filter_chain_shift_part(s_ctx[i]->mixpar); chain = build_filter_chain_shift_part(graph, s_ctx[i]->mixpar);
shift_chains[i] = chain; shift_chains[i] = chain;
//
err = avfilter_link(chain.sink, 0, mix_chain.src, i); err = avfilter_link(chain.sink, 0, mix_chain.src, i);
if (err < 0) { if (err < 0) {
goto FAIL; goto FAIL;
} }
} }
FilteringContext *f_ctx = (FilteringContext*)malloc(
sizeof(FilteringContext));
f_ctx->graph = graph;
f_ctx->sink = mix_chain.sink;
for (int i = 0; i < numOfStream; ++i) {
f_ctx->src = shift_chains[i].src;
}
FAIL: FAIL:
if (graph) if (graph)
avfilter_graph_free(&graph); avfilter_graph_free(&graph);
...@@ -219,10 +231,78 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams) ...@@ -219,10 +231,78 @@ FilterChain build_filter_chain_mix_part(AVFilterGraph *graph, int numOfStreams)
return (FilterChain){ NULL, NULL }; return (FilterChain){ NULL, NULL };
} }
FilterChain build_filter_chain_shift_part(MixParameters param) { FilterChain build_filter_chain_shift_part(AVFilterGraph *graph, MixParameters param) {
FilterChain chain; int err = 0;
char ch_layout[64] = {0};
return chain; const AVFilter *abuffer = NULL;
AVFilterContext *abuffer_ctx = NULL;
const AVFilter *asetpts = NULL;
AVFilterContext *asetpts_ctx = NULL;
AVRational time_base = param.cc->time_base;
// Create abuffer filter
abuffer = avfilter_get_by_name("abuffer");
if (abuffer == NULL) {
goto FAIL;
}
abuffer_ctx = avfilter_graph_alloc_filter(graph, abuffer, "abuffer");
if (abuffer_ctx == NULL) {
goto FAIL;
}
av_get_channel_layout_string(
(char*)ch_layout, sizeof(ch_layout), 0, AV_OPT_SEARCH_CHILDREN);
av_opt_set(abuffer_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set(abuffer_ctx, "sample_fmt",
av_get_sample_fmt_name(param.cc->sample_fmt),
AV_OPT_SEARCH_CHILDREN);
av_opt_set_q(abuffer_ctx, "time_base",
(AVRational){time_base.num, time_base.den},
AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_ctx, "sample_rate", param.cc->sample_rate, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(abuffer_ctx, NULL);
if (err < 0) {
goto FAIL;
}
// Create asetpts filter
asetpts = avfilter_get_by_name("asetpts");
if (asetpts == NULL) {
goto FAIL;
}
asetpts_ctx = avfilter_graph_alloc_filter(graph, asetpts, "asetpts");
if (asetpts_ctx == NULL) {
goto FAIL;
}
// Construct expr argument of asetpts
char expr[128] = { 0 };
sprintf(expr, "PTS+%u/TB", param.offset);
av_opt_set(asetpts_ctx, "expr", expr, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(asetpts_ctx, NULL);
if (err < 0) {
goto FAIL;
}
err = avfilter_link(abuffer_ctx, 0, asetpts_ctx, 0);
if (err < 0) {
goto FAIL;
}
return (FilterChain){ abuffer_ctx, asetpts_ctx };
FAIL:
if (abuffer_ctx)
avfilter_free(abuffer_ctx);
if (asetpts_ctx)
avfilter_free(asetpts_ctx);
return (FilterChain){NULL, NULL};
} }
...@@ -358,661 +438,8 @@ AudioStreamingContext* open_audio_file(AudioFile *files) { ...@@ -358,661 +438,8 @@ AudioStreamingContext* open_audio_file(AudioFile *files) {
} }
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
AVFilterContext **src_,
AVFilterContext **sink,
AudioStreamingContext *decoder_1,
AudioStreamingContext *decoder_2) {
AVFilterGraph *filter_graph;
AVFilterContext *abuffer_first_ctx;
AVFilterContext *abuffer_second_ctx;
AVFilterContext *amix_ctx;
AVFilterContext *abuffersink_ctx;
const AVFilter *amix;
const AVFilter *abuffer_first;
const AVFilter *abuffer_second;
const AVFilter *abuffersink;
uint8_t ch_layout[64];
int err = 0;
AVFormatContext *fmt = decoder_1->fmt;
AVCodecContext *dec_ctx = decoder_1->cc;
AVFormatContext *fmt_ = decoder_2->fmt;
AVCodecContext *dec_ctx_ = decoder_2->cc;
AVRational time_base = fmt->streams[0]->time_base;
AVRational time_base_ = fmt_->streams[0]->time_base;
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
printf("Unable to create filter graph.\n");
return 1;
}
/* Create first abuffer filter */
abuffer_first = avfilter_get_by_name("abuffer");
if (!abuffer_first) {
printf("Unable to find the abuffer filter\n");
return 1;
}
abuffer_first_ctx = avfilter_graph_alloc_filter(
filter_graph, abuffer_first, "SRC");
if (!abuffer_first_ctx) {
printf("Could not allocate the abuffer instance.\n");
return 1;
}
av_get_channel_layout_string((char*)ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
av_opt_set (abuffer_first_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set (abuffer_first_ctx, "sample_fmt", av_get_sample_fmt_name(dec_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q (abuffer_first_ctx, "time_base", (AVRational){ time_base.num, time_base.den }, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_first_ctx, "sample_rate", dec_ctx->sample_rate, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(abuffer_first_ctx, NULL);
if (err < 0) {
printf("Could not initialize the abuffer filter.\n");
return err;
}
/* Create second abuffer filter */
abuffer_second = avfilter_get_by_name("abuffer");
if (!abuffer_second) {
printf("Unable to find the abuffer filter\n");
return 1;
}
abuffer_second_ctx = avfilter_graph_alloc_filter(
filter_graph, abuffer_second, "src_2");
if (!abuffer_second_ctx) {
printf("Could not allocate the abuffer instance.\n");
return 1;
}
av_get_channel_layout_string((char*)ch_layout, sizeof(ch_layout), 0, AV_CH_LAYOUT_STEREO);
av_opt_set(abuffer_second_ctx, "channel_layout", (char*)ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set(abuffer_second_ctx, "sample_fmt", av_get_sample_fmt_name(dec_ctx_->sample_fmt), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q(abuffer_second_ctx, "time_base", (AVRational){time_base_.num, time_base_.den}, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_second_ctx, "sample_rate", dec_ctx_->sample_rate, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(abuffer_second_ctx, NULL);
if (err < 0) {
printf("Could not initialize the abuffer filter.\n");
return err;
}
/* Create amix filter */
amix = avfilter_get_by_name("amix");
if (!amix) {
printf("Unable to find the amix filter\n");
return 1;
}
amix_ctx = avfilter_graph_alloc_filter(filter_graph, amix, "amix");
if (!amix_ctx) {
printf("Could not allocate the amix instance.\n");
return 1;
}
av_opt_set_int(amix_ctx, "input", 2, AV_OPT_SEARCH_CHILDREN);
err = avfilter_init_str(amix_ctx, NULL);
if (err < 0) {
printf("Could not initialize the amix filter.\n");
return 1;
}
/* Create abuffersink filter */
abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
printf("Unable to find the abuffersink.\n");
return 1;
}
abuffersink_ctx = avfilter_graph_alloc_filter(
filter_graph, abuffersink, "abuffersink");
if (!abuffersink_ctx) {
printf("Could not allocate the abuffersink.\n");
return 1;
}
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
printf("Could not initialize the abuffersink instance.\n");
return 1;
}
/* Link Filter together */
err = avfilter_link(abuffer_first_ctx, 0, amix_ctx, 0);
if (err >= 0)
avfilter_link(abuffer_second_ctx, 0, amix_ctx, 1);
if (err >= 0)
avfilter_link(amix_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
printf("Failed to connecting filters\n");
return err;
}
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
printf("Failed to configure filter graph\n");
return 1;
}
*graph = filter_graph;
*src = abuffer_first_ctx;
*src_ = abuffer_second_ctx;
*sink = abuffersink_ctx;
return 0;
}
int audioMix(char *argv[]) {
int ret = 0;
FilteringContext filter_ctx;
memset(&filter_ctx, 0, sizeof(FilteringContext));
// Decoder
AudioStreamingContext s1_decoder;
AudioStreamingContext s2_decoder;
memset(&s1_decoder, 0, sizeof(AudioStreamingContext));
memset(&s2_decoder, 0, sizeof(AudioStreamingContext));
// Encoder
AudioStreamingContext o_encoder;
memset(&o_encoder, 0, sizeof(AudioStreamingContext));
s1_decoder.fileName = argv[1];
s2_decoder.fileName = argv[2];
o_encoder.fileName = argv[3];
ret = avformat_open_input(&s1_decoder.fmt, s1_decoder.fileName, NULL, NULL);
if (ret < 0) {
printf("Failed to open input file 1\n");
return 1;
}
ret = avformat_find_stream_info(s1_decoder.fmt, NULL);
if (ret < 0) {
printf("Failed to find input stream infors\n");
return 1;
}
ret = avformat_open_input(&s2_decoder.fmt, s2_decoder.fileName, NULL, NULL);
if (ret < 0) {
printf("Failed to open input file: %s\n", s2_decoder.fileName);
return 1;
}
ret = avformat_find_stream_info(s2_decoder.fmt, NULL);
if (ret < 0) {
printf("Failed to find input stream infors\n");
return 1;
}
avformat_alloc_output_context2(
&o_encoder.fmt, NULL, NULL, o_encoder.fileName);
if (!o_encoder.fmt) {
printf("Could not create output context\n");
return 1;
}
/* Prepare decoder for input 1 */
prepare_audio_decoder(&s1_decoder);
/* Prepare decoder for input 2 */
prepare_audio_decoder(&s2_decoder);
/* Prepare encoder for output */
prepare_audio_encoder(&o_encoder, &s1_decoder);
if (avio_open(&o_encoder.fmt->pb, o_encoder.fileName, AVIO_FLAG_WRITE) < 0) {
printf("Could not open the output file\n");
return 1;
}
AVDictionary *opts = NULL;
if (avformat_write_header(o_encoder.fmt, &opts) < 0) {
printf("Failed to write header to output file.\n");
return 1;
}
init_filter_graph(&filter_ctx.graph, &filter_ctx.src,
&filter_ctx.src_, &filter_ctx.sink,
&s1_decoder, &s2_decoder);
int idx = 0;
AVPacket *packet = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
AudioStreamingContext *audioContexts[2] = {
&s1_decoder,
&s2_decoder
};
int fin[2] = { 0, 0 };
AVFilterContext *srcs[2] = {
filter_ctx.src,
filter_ctx.src_
};
int i_ = 0;
const int nb_inputs = 2;
while (1) {
// Add Stream of frames into filter through
// source filters.
for (int i = 0; i < nb_inputs; ++i) {
if (fin[i] != 1) {
ret = av_read_frame(audioContexts[i]->fmt, packet);
if (ret < 0) {
if (ret == AVERROR_EOF) {
fin[i] = 1;
} else {
printf("Failed to read frame\n");
}
}
if (fin[i] == 1) {
add_to_src(audioContexts[i], srcs[i], NULL, NULL);
} else {
add_to_src(audioContexts[i], srcs[i], packet, frame);
}
av_packet_unref(packet);
i_ = i;
}
}
if (fin[i_] == 1) {
process_filtered_frames(&o_encoder, NULL);
} else {
// Get filtered frames
while ((ret = av_buffersink_get_frame(filter_ctx.sink, frame)) >= 0) {
process_filtered_frames(&o_encoder, frame);
av_frame_unref(frame);
}
}
if (fin[0] == 1 && fin[1] == 1) {
break;
}
}
av_frame_free(&frame);
av_packet_free(&packet);
av_write_trailer(o_encoder.fmt);
avfilter_graph_free(&filter_ctx.graph);
avformat_close_input(&s1_decoder.fmt);
avformat_close_input(&s2_decoder.fmt);
avformat_free_context(s1_decoder.fmt);
s1_decoder.fmt = NULL;
avformat_free_context(s2_decoder.fmt);
s2_decoder.fmt = NULL;
avio_close(o_encoder.fmt->pb);
avformat_free_context(o_encoder.fmt);
o_encoder.fmt = NULL;
avcodec_close(s1_decoder.cc);
avcodec_free_context(&s1_decoder.cc);
avcodec_close(s2_decoder.cc);
avcodec_free_context(&s2_decoder.cc);
avcodec_close(o_encoder.cc);
avcodec_free_context(&o_encoder.cc);
return 0;
}
int process_filtered_frames(AudioStreamingContext *encoder, AVFrame *frame) {
int response = 0;
AVPacket *output_packet = av_packet_alloc();
response = avcodec_send_frame(encoder->cc, frame);
while (response >= 0) {
response = avcodec_receive_packet(encoder->cc, output_packet);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
break;
} else if (response < 0) {
printf("Error while receiving packet from encoder: %s", av_err2str(response));
return 1;
}
output_packet->stream_index = 0;
response = av_interleaved_write_frame(encoder->fmt, output_packet);
if (response != 0) {
printf("Error %d while receiving packet from decoder: %s",
response, av_err2str(response));
return 1;
}
}
av_packet_unref(output_packet);
av_packet_free(&output_packet);
return 0;
}
int add_to_src(AudioStreamingContext *decoder, AVFilterContext *src,
AVPacket *input_packet, AVFrame *input_frame) {
int err = 0;
if (input_packet == NULL) {
err = av_buffersrc_add_frame(src, NULL);
if (err < 0) {
printf("Failed to write NULL frame\n");
}
goto CLEANUP;
}
int response = avcodec_send_packet(decoder->cc, input_packet);
if (response < 0) {
printf("Failed to decode packet\n");
goto CLEANUP;
}
while (response >= 0) {
response = avcodec_receive_frame(decoder->cc, input_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
break;
} else if (response < 0) {
printf("Error while receiving frame from decoder: %s",
av_err2str(response));
goto CLEANUP;
}
err = av_buffersrc_add_frame(src, input_frame);
if (err < 0) {
printf("Failed to submitting the frame to the filtergraph: %s\n",
av_err2str(err));
goto CLEANUP;
}
}
CLEANUP:
av_frame_unref(input_frame);
return err;
}
int prepare_audio_encoder(AudioStreamingContext *encoder, AudioStreamingContext *decoder) {
AVStream *stream = decoder->s;
encoder->s = avformat_new_stream(encoder->fmt, NULL);
if (!encoder->s) {
return 1;
}
encoder->c = avcodec_find_encoder(stream->codecpar->codec_id);
if (!encoder->c) {
printf("Could not find the proper codec");
return 1;
}
encoder->cc = avcodec_alloc_context3(encoder->c);
if (!encoder->cc) {
printf("Could not allocated memory for codec context\n");
return 1;
}
encoder->cc->channels = decoder->cc->channels;
encoder->cc->channel_layout = decoder->cc->channel_layout;
encoder->cc->sample_rate = decoder->cc->sample_rate;
encoder->cc->sample_fmt = decoder->cc->sample_fmt;
encoder->cc->bit_rate = decoder->cc->bit_rate;
encoder->cc->time_base = decoder->cc->time_base;
encoder->cc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
encoder->s->time_base = encoder->cc->time_base;
if (avcodec_open2(encoder->cc, encoder->c, NULL) < 0) {
printf("Could not open the codec");
return 1;
}
avcodec_parameters_from_context(encoder->s->codecpar, encoder->cc);
return 0;
}
int prepare_audio_decoder(AudioStreamingContext *decoder) {
AVFormatContext *fmtCtx = decoder->fmt;
AVCodec *c = decoder->c;
AVCodecContext *cc = decoder->cc;
AVStream *inputStream1 = fmtCtx->streams[0];
c = avcodec_find_decoder(inputStream1->codecpar->codec_id);
if (!c) {
printf("Unable to find decoder for input1");
return 1;
}
cc = avcodec_alloc_context3(c);
if (!cc) {
printf("Unable to alloc memory for codec context\n");
return 1;
}
if (avcodec_parameters_to_context(cc, inputStream1->codecpar) < 0) {
printf("Failed to fill parameters to context\n");
return 1;
}
if (avcodec_open2(cc, c, NULL) < 0) {
printf("Failed to open codec\n");
return 1;
}
decoder->s = inputStream1;
decoder->c = c;
decoder->cc = cc;
return 0;
}
static int add_frame_to_fmt(AVFormatContext *oFmt, AVStream *in_stream, AVStream *out_stream, AVPacket *packet) {
int ret = 0;
packet->pts = av_rescale_q_rnd(packet->pts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->dts = av_rescale_q_rnd(packet->dts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->duration = av_rescale_q(packet->duration, in_stream->time_base, out_stream->time_base);
packet->pos = -1;
ret = av_interleaved_write_frame(oFmt, packet);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
return 1;
}
return 0;
}
char *do_mergeAV(char *videoPath, char *audioPath, char *outputPath) {
int err = 0;
AVFormatContext *vFmtCtx = NULL,
*aFmtCtx = NULL,
*outputCtx = NULL;
/* Open Video media */
err = avformat_open_input(&vFmtCtx, videoPath, NULL, NULL);
if (err < 0) {
printf("Error occured during opening video media.\n");
return NULL;
}
err = avformat_find_stream_info(vFmtCtx, NULL);
if (err < 0) {
printf("Failed to find stream infor from video media.\n");
return NULL;
}
/* Open Mixed Audio media */
err = avformat_open_input(&aFmtCtx, audioPath, NULL, NULL);
if (err < 0) {
printf("Error occured during opening mixed audio media.\n");
return NULL;
}
err = avformat_find_stream_info(aFmtCtx, NULL);
if (err < 0) {
printf("Failed to find stream infor from mixed audio media.\n");
return NULL;
}
avformat_alloc_output_context2(&outputCtx, NULL, NULL, outputPath);
if (!outputCtx) {
printf("Failed to create output context.\n");
return NULL;
}
enum {
FIRST_STREAM = 0,
V_STREAM = 0,
A_STREAM = 1,
END_STREAM = 2,
NB_STREAMS = 2,
};
AVStream *i_streams[2] = { NULL, NULL };
AVStream *o_streams[2] = { NULL, NULL };
/* Create Video Stream for output */
AVStream *o_vstream = avformat_new_stream(outputCtx, NULL);
if (!o_vstream) {
printf("Failed to create new stream for output.\n");
return NULL;
}
o_streams[V_STREAM] = o_vstream;
// Copy parameters to Video Stream of output from input stream
int video_stream_idx = av_find_best_stream(
vFmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
i_streams[V_STREAM] = vFmtCtx->streams[video_stream_idx];
err = avcodec_parameters_copy(o_vstream->codecpar, i_streams[V_STREAM]->codecpar);
if (err < 0) {
printf("Failed to copy codec parameters.\n");
return NULL;
}
/* Create Audio Stream for output */
AVStream *o_astream = avformat_new_stream(outputCtx, NULL);
if (!o_astream) {
printf("Failed to create new stream for output.\n");
return NULL;
}
o_streams[A_STREAM] = o_astream;
// Copy parameters to Audio Stream of output from input stream
int audio_stream_idx = av_find_best_stream(
aFmtCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
i_streams[A_STREAM] = aFmtCtx->streams[audio_stream_idx];
err = avcodec_parameters_copy(o_astream->codecpar, i_streams[A_STREAM]->codecpar);
if (err < 0) {
printf("Failed to copy codec parameters.\n");
return NULL;
}
err = avio_open(&outputCtx->pb, outputPath, AVIO_FLAG_WRITE);
if (err < 0) {
printf("Failed to open output file %s", audioPath);
return NULL;
}
AVDictionary *opts = NULL;
err = avformat_write_header(outputCtx, &opts);
if (err < 0) {
printf("Failed to write header to output file.\n");
return NULL;
}
int idx = FIRST_STREAM;
int fin[NB_STREAMS] = { 0, 0 };
AVPacket packet;
AVFormatContext *context = NULL;
AVFormatContext *contexts[2] = {
vFmtCtx,
aFmtCtx
};
while (1) {
context = contexts[idx];
// Write frame to output fmt
if (fin[idx] == 0) {
err = av_read_frame(context, &packet);
if (err < 0) {
fin[idx] = 1;
goto NEXT;
}
packet.stream_index = idx;
add_frame_to_fmt(outputCtx, i_streams[idx], outputCtx->streams[idx], &packet);
av_packet_unref(&packet);
}
NEXT:
if (fin[V_STREAM] == 1 &&
fin[A_STREAM] == 1) {
break;
}
idx = (idx + 1) % NB_STREAMS;
}
av_write_trailer(outputCtx);
avformat_close_input(&vFmtCtx);
avformat_free_context(vFmtCtx);
avformat_close_input(&aFmtCtx);
avformat_free_context(aFmtCtx);
avio_closep(&outputCtx->pb);
avformat_free_context(outputCtx);
return outputPath;
}
char* mergeAV(char *videoPath, char *audio_1_path, char *audio_2_path) {
int err = 0;
char *output_path = "./mixed_audio.mp3";
char *output_video = "./mergeAV.mp4";
char *argv[4] = {
"",
audio_1_path,
audio_2_path,
output_path,
};
err = audioMix(argv);
if (err != 0) {
return NULL;
}
return do_mergeAV(videoPath, output_path, output_video);
}
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
audioMix(argv); return 0;
//mergeAV(argv[1], argv[2], argv[3]);
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment