Commit cd3b010e authored by NzSN's avatar NzSN

update

parents
File added
File added
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <stdio.h>
int main(int argc, const char *argv[]) {
int ret = 0;
int stream_index = 0;
int *streams_list = NULL;
AVPacket packet;
AVFormatContext *input_format_context = NULL;
AVFormatContext *output_format_context = NULL;
if (argc < 3) {
printf("You need to pass at least two parameters.\n");
return -1;
}
const char *in_filename = argv[1],
*out_filename = argv[2];
// Open input digital container
ret = avformat_open_input(&input_format_context, in_filename, NULL, NULL);
if (ret < 0) {
printf("failed to open\n");
return 1;
}
// Find streams
if (avformat_find_stream_info(input_format_context, NULL) < 0) {
printf("failed to find stream\n");
return 1;
}
avformat_alloc_output_context2(&output_format_context, NULL, NULL, argv[2]);
if (!output_format_context) {
printf("alloc output context failed\n");
return 1;
}
int number_of_streams = input_format_context->nb_streams;
streams_list = av_mallocz_array(number_of_streams, sizeof(*streams_list));
if (!streams_list) {
printf("streams_list error\n");
return 1;
}
// Copy codecpars
for (int i = 0; i < number_of_streams; i++) {
AVStream *out_stream;
AVStream *in_stream = input_format_context->streams[i];
AVCodecParameters *in_codecpar = in_stream->codecpar;
if (in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
streams_list[i] = -1;
continue;
}
streams_list[i] = stream_index++;
out_stream = avformat_new_stream(output_format_context, NULL);
if (!out_stream) {
printf("fail to create stream\n");
return 1;
}
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
printf("failed to copy codecpar");
return 1;
}
}
if (!(output_format_context->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&output_format_context->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
printf("fail to create file\n");
return 1;
}
}
AVDictionary *opts = NULL;
ret = avformat_write_header(output_format_context, &opts);
if (ret < 0) {
printf("fail to write header\n");
}
while (1) {
AVStream *in_stream, *out_stream;
ret = av_read_frame(input_format_context, &packet);
if (ret < 0)
break;
in_stream = input_format_context->streams[packet.stream_index];
if (packet.stream_index >= number_of_streams || streams_list[packet.stream_index] < 0) {
av_packet_unref(&packet);
continue;
}
packet.stream_index = streams_list[packet.stream_index];
out_stream = output_format_context->streams[packet.stream_index];
printf("%d, %d, %d\n", packet.pts, in_stream->time_base, out_stream->time_base);
/* copy packet */
packet.pts = av_rescale_q_rnd(packet.pts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet.dts = av_rescale_q_rnd(packet.dts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet.duration = av_rescale_q(packet.duration, in_stream->time_base, out_stream->time_base);
packet.pos = -1;
ret = av_interleaved_write_frame(output_format_context, &packet);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&packet);
}
av_write_trailer(output_format_context);
avformat_close_input(&input_format_context);
if (output_format_context && !(output_format_context->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&output_format_context->pb);
}
avformat_free_context(output_format_context);
av_freep(&streams_list);
return 0;
}
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
typedef enum {
BEGIN_STREAM = 0,
V_STREAM = 0,
A_STREAM,
MAX_STREAM
} STREAM_IDX;
void packet_rescale(AVPacket *packet, AVStream *in_stream, AVStream *out_stream, STREAM_IDX idx);
int numFramesToMerged(AVStream *vStream, AVStream *aStream,
uint64_t frameDur, int shift_sec);
int main(int argc, char *argv[]) {
int ret = 0;
char *fileName = "./demo2.mp4";
// This is a packing file
char *audioFile = "./audio.mp3";
char *outFileName = "./demo3.mp4";
// Number of seconds to shift audio away
// from begin of vedio.
double shift = 0;
uint64_t temporal_shift = 0;
if (argc > 1) {
shift = atof(argv[1]);
}
AVFormatContext *inputFormatContext = NULL,
*outputFormatContext = NULL,
*audioFormatContext = NULL;
// Open InputFormatContext of demo2.mp4
ret = avformat_open_input(&inputFormatContext, fileName, NULL, NULL);
if (ret < 0) {
printf("Failed to open mp4 file\n");
return 0;
}
ret = avformat_find_stream_info(inputFormatContext, NULL);
if (ret < 0) {
printf("Failed to retrieve input stream information\n");
return 0;
}
// Open Audio file
ret = avformat_open_input(&audioFormatContext, audioFile, NULL, NULL);
if (ret < 0) {
printf("Failed to open avformat_open_input");
return 0;
}
ret = avformat_find_stream_info(audioFormatContext, NULL);
if (ret < 0) {
printf("Failed to find stream info");
}
// Check Precondition
if (inputFormatContext->nb_streams > 1) {
printf("/tmp/demo2.mp4 has more than one streams\n");
return 0;
}
if (audioFormatContext->nb_streams > 1) {
printf("/tmp/audio.mp3 has more than one streams\n");
return 0;
}
// Alloc OutputFormatContext
avformat_alloc_output_context2(
&outputFormatContext, NULL, NULL, outFileName);
if (!outputFormatContext) {
printf("could not create output context\n");
return 0;
}
int ostreams_idx[MAX_STREAM] = { -1, -1 };
AVStream* streams[MAX_STREAM];
AVStream *in_stream, *out_stream;
AVCodecParameters *in_codecpar;
streams[V_STREAM] = inputFormatContext->streams[0];
streams[A_STREAM] = audioFormatContext->streams[0];
// Create Video Stream and Audio Stream
// for OutputFormtContext.
for (int i = BEGIN_STREAM; i < MAX_STREAM; ++i) {
in_stream = streams[i];
in_codecpar = in_stream->codecpar;
if (in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
printf("Media Type is not supported");
return 0;
}
out_stream = avformat_new_stream(outputFormatContext, NULL);
ostreams_idx[i] = i;
if (!out_stream) {
printf("Failed allocating output stream\n");
}
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
printf("Failed to copy codec paramters\n");
}
}
ret = avio_open(&outputFormatContext->pb, outFileName, AVIO_FLAG_WRITE);
if (ret < 0) {
printf("Could not open output file /tmp/demo3.mp4\n");
return 0;
}
AVDictionary *opts = NULL;
ret = avformat_write_header(outputFormatContext, &opts);
if (ret < 0) {
printf("Failed to write header to outpu file.\n");
return 0;
}
AVPacket packet;
int64_t pts;
int64_t dts;
int64_t duration;
uint8_t idx = V_STREAM;
AVFormatContext *contexts[MAX_STREAM] = { inputFormatContext, audioFormatContext };
AVFormatContext *context;
int audioFrameCount = 0;
int nbFrames = -1;
double durSecs = packet.duration / (double)out_stream->time_base.den;
temporal_shift = (shift / durSecs) * packet.duration;
printf("%d, %d/%d\n", temporal_shift, out_stream->time_base.num, out_stream->time_base.den);
// Put video stream of /tmp/demo2.mp4 into /tmp/demo3.mp4,
// no decoding is required.
while (1) {
context = contexts[idx];
ret = av_read_frame(context, &packet);
if (ret < 0) {
break;
}
in_stream = streams[idx];
out_stream = outputFormatContext->streams[ostreams_idx[idx]];
if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
nbFrames == -1) {
nbFrames = numFramesToMerged(streams[V_STREAM], streams[A_STREAM],
packet.duration, shift);
}
packet_rescale(&packet, in_stream, out_stream, idx);
// Do audio shift
if (nbFrames > 0 && in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
packet.pts += temporal_shift;
packet.dts += temporal_shift;
audioFrameCount++;
}
// Input and Output Digitial Container is equal so
// pts,dts,duration no need to recalculate.
ret = av_interleaved_write_frame(outputFormatContext, &packet);
if (ret < 0) {
printf("Failed to write frame\n");
return 0;
}
av_packet_unref(&packet);
if (audioFrameCount != nbFrames) {
idx = (idx + 1) % MAX_STREAM;
} else {
idx = V_STREAM;
}
}
av_write_trailer(outputFormatContext);
avformat_close_input(&inputFormatContext);
avio_closep(&outputFormatContext->pb);
avformat_free_context(outputFormatContext);
return 0;
}
void packet_rescale(AVPacket *packet, AVStream *in_stream, AVStream *out_stream, STREAM_IDX idx) {
packet->pts = av_rescale_q_rnd(packet->pts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->dts = av_rescale_q_rnd(packet->dts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->duration = av_rescale_q(packet->duration, in_stream->time_base, out_stream->time_base);
packet->stream_index = idx;
packet->pos = -1;
}
int numFramesToMerged(AVStream *vStream, AVStream *aStream, uint64_t frameDur, int shift_sec) {
double vDur_sec, frameDur_sec;
if (frameDur == 0) {
return 0;
}
vDur_sec = vStream->duration / (double)vStream->time_base.den;
frameDur_sec = frameDur / (double)aStream->time_base.den;
if (vDur_sec <= shift_sec) {
return 0;
}
return (vDur_sec - shift_sec) / frameDur_sec;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment