Commit 75b436d8 authored by Michael Niedermayer's avatar Michael Niedermayer

ffserver: Remove use of AVStream as a intermediate to store parameters

Reviewed-by: 's avatar"Reynaldo H. Verdejo Pinochet" <reynaldo@osg.samsung.com>
Signed-off-by: 's avatarMichael Niedermayer <michael@niedermayer.cc>
parent c8b24a68
...@@ -235,7 +235,7 @@ static int rtp_new_av_stream(HTTPContext *c, ...@@ -235,7 +235,7 @@ static int rtp_new_av_stream(HTTPContext *c,
/* utils */ /* utils */
static size_t htmlencode (const char *src, char **dest); static size_t htmlencode (const char *src, char **dest);
static inline void cp_html_entity (char *buffer, const char *entity); static inline void cp_html_entity (char *buffer, const char *entity);
static inline int check_codec_match(AVStream *ccf, AVStream *ccs, int stream); static inline int check_codec_match(LayeredAVStream *ccf, AVStream *ccs, int stream);
static const char *my_program_name; static const char *my_program_name;
...@@ -254,6 +254,21 @@ static AVLFG random_state; ...@@ -254,6 +254,21 @@ static AVLFG random_state;
static FILE *logfile = NULL; static FILE *logfile = NULL;
static void unlayer_stream(AVStream *st, LayeredAVStream *lst)
{
avcodec_free_context(&st->codec);
avcodec_parameters_free(&st->codecpar);
#define COPY(a) st->a = lst->a;
COPY(index)
COPY(id)
COPY(codec)
COPY(codecpar)
COPY(time_base)
COPY(pts_wrap_bits)
COPY(sample_aspect_ratio)
COPY(recommended_encoder_configuration)
}
static inline void cp_html_entity (char *buffer, const char *entity) { static inline void cp_html_entity (char *buffer, const char *entity) {
if (!buffer || !entity) if (!buffer || !entity)
return; return;
...@@ -1864,7 +1879,7 @@ static inline void print_stream_params(AVIOContext *pb, FFServerStream *stream) ...@@ -1864,7 +1879,7 @@ static inline void print_stream_params(AVIOContext *pb, FFServerStream *stream)
int i, stream_no; int i, stream_no;
const char *type = "unknown"; const char *type = "unknown";
char parameters[64]; char parameters[64];
AVStream *st; LayeredAVStream *st;
AVCodec *codec; AVCodec *codec;
stream_no = stream->nb_streams; stream_no = stream->nb_streams;
...@@ -1984,7 +1999,7 @@ static void compute_status(HTTPContext *c) ...@@ -1984,7 +1999,7 @@ static void compute_status(HTTPContext *c)
const char *video_codec_name_extra = ""; const char *video_codec_name_extra = "";
for(i=0;i<stream->nb_streams;i++) { for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i]; LayeredAVStream *st = stream->streams[i];
AVCodec *codec = avcodec_find_encoder(st->codecpar->codec_id); AVCodec *codec = avcodec_find_encoder(st->codecpar->codec_id);
switch(st->codecpar->codec_type) { switch(st->codecpar->codec_type) {
...@@ -2256,14 +2271,12 @@ static int http_prepare_data(HTTPContext *c) ...@@ -2256,14 +2271,12 @@ static int http_prepare_data(HTTPContext *c)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
c->pfmt_ctx = ctx; c->pfmt_ctx = ctx;
av_dict_copy(&(c->pfmt_ctx->metadata), c->stream->metadata, 0); av_dict_copy(&(c->pfmt_ctx->metadata), c->stream->metadata, 0);
c->pfmt_ctx->streams = av_mallocz_array(c->stream->nb_streams,
sizeof(AVStream *));
if (!c->pfmt_ctx->streams)
return AVERROR(ENOMEM);
for(i=0;i<c->stream->nb_streams;i++) { for(i=0;i<c->stream->nb_streams;i++) {
AVStream *src; LayeredAVStream *src;
c->pfmt_ctx->streams[i] = av_mallocz(sizeof(AVStream)); AVStream *st = avformat_new_stream(c->pfmt_ctx, NULL);
if (!st)
return AVERROR(ENOMEM);
/* if file or feed, then just take streams from FFServerStream /* if file or feed, then just take streams from FFServerStream
* struct */ * struct */
...@@ -2273,14 +2286,14 @@ static int http_prepare_data(HTTPContext *c) ...@@ -2273,14 +2286,14 @@ static int http_prepare_data(HTTPContext *c)
else else
src = c->stream->feed->streams[c->stream->feed_streams[i]]; src = c->stream->feed->streams[c->stream->feed_streams[i]];
*(c->pfmt_ctx->streams[i]) = *src; unlayer_stream(c->pfmt_ctx->streams[i], src); //TODO we no longer copy st->internal, does this matter?
c->pfmt_ctx->streams[i]->priv_data = 0; av_assert0(!c->pfmt_ctx->streams[i]->priv_data);
/* XXX: should be done in AVStream, not in codec */ /* XXX: should be done in AVStream, not in codec */
c->pfmt_ctx->streams[i]->codec->frame_number = 0; c->pfmt_ctx->streams[i]->codec->frame_number = 0;
} }
/* set output format parameters */ /* set output format parameters */
c->pfmt_ctx->oformat = c->stream->fmt; c->pfmt_ctx->oformat = c->stream->fmt;
c->pfmt_ctx->nb_streams = c->stream->nb_streams; av_assert0(c->pfmt_ctx->nb_streams == c->stream->nb_streams);
c->got_key_frame = 0; c->got_key_frame = 0;
...@@ -2807,7 +2820,7 @@ static int http_receive_data(HTTPContext *c) ...@@ -2807,7 +2820,7 @@ static int http_receive_data(HTTPContext *c)
} }
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *fst = feed->streams[i]; LayeredAVStream *fst = feed->streams[i];
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
avcodec_copy_context(fst->codec, st->codec); avcodec_copy_context(fst->codec, st->codec);
} }
...@@ -3424,19 +3437,16 @@ static int rtp_new_av_stream(HTTPContext *c, ...@@ -3424,19 +3437,16 @@ static int rtp_new_av_stream(HTTPContext *c,
if (!st) if (!st)
goto fail; goto fail;
av_freep(&st->codec);
av_freep(&st->info);
st_internal = st->internal; st_internal = st->internal;
if (!c->stream->feed || if (!c->stream->feed ||
c->stream->feed == c->stream) c->stream->feed == c->stream)
memcpy(st, c->stream->streams[stream_index], sizeof(AVStream)); unlayer_stream(st, c->stream->streams[stream_index]);
else else
memcpy(st, unlayer_stream(st,
c->stream->feed->streams[c->stream->feed_streams[stream_index]], c->stream->feed->streams[c->stream->feed_streams[stream_index]]);
sizeof(AVStream)); av_assert0(st->priv_data == NULL);
st->priv_data = NULL; av_assert0(st->internal == st_internal);
st->internal = st_internal;
/* build destination RTP address */ /* build destination RTP address */
ipaddr = inet_ntoa(dest_addr->sin_addr); ipaddr = inet_ntoa(dest_addr->sin_addr);
...@@ -3504,15 +3514,15 @@ static int rtp_new_av_stream(HTTPContext *c, ...@@ -3504,15 +3514,15 @@ static int rtp_new_av_stream(HTTPContext *c,
/* ffserver initialization */ /* ffserver initialization */
/* FIXME: This code should use avformat_new_stream() */ /* FIXME: This code should use avformat_new_stream() */
static AVStream *add_av_stream1(FFServerStream *stream, static LayeredAVStream *add_av_stream1(FFServerStream *stream,
AVCodecContext *codec, int copy) AVCodecContext *codec, int copy)
{ {
AVStream *fst; LayeredAVStream *fst;
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams)) if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
return NULL; return NULL;
fst = av_mallocz(sizeof(AVStream)); fst = av_mallocz(sizeof(*fst));
if (!fst) if (!fst)
return NULL; return NULL;
if (copy) { if (copy) {
...@@ -3528,20 +3538,20 @@ static AVStream *add_av_stream1(FFServerStream *stream, ...@@ -3528,20 +3538,20 @@ static AVStream *add_av_stream1(FFServerStream *stream,
*/ */
fst->codec = codec; fst->codec = codec;
fst->internal = av_mallocz(sizeof(*fst->internal)); //NOTE we previously allocated internal & internal->avctx, these seemed uneeded though
fst->internal->avctx = avcodec_alloc_context3(NULL);
fst->codecpar = avcodec_parameters_alloc(); fst->codecpar = avcodec_parameters_alloc();
fst->index = stream->nb_streams; fst->index = stream->nb_streams;
avpriv_set_pts_info(fst, 33, 1, 90000); fst->time_base = (AVRational) {1, 90000};
fst->pts_wrap_bits = 33;
fst->sample_aspect_ratio = codec->sample_aspect_ratio; fst->sample_aspect_ratio = codec->sample_aspect_ratio;
stream->streams[stream->nb_streams++] = fst; stream->streams[stream->nb_streams++] = fst;
return fst; return fst;
} }
/* return the stream number in the feed */ /* return the stream number in the feed */
static int add_av_stream(FFServerStream *feed, AVStream *st) static int add_av_stream(FFServerStream *feed, LayeredAVStream *st)
{ {
AVStream *fst; LayeredAVStream *fst;
AVCodecContext *av, *av1; AVCodecContext *av, *av1;
int i; int i;
...@@ -3575,9 +3585,9 @@ static int add_av_stream(FFServerStream *feed, AVStream *st) ...@@ -3575,9 +3585,9 @@ static int add_av_stream(FFServerStream *feed, AVStream *st)
fst = add_av_stream1(feed, av, 0); fst = add_av_stream1(feed, av, 0);
if (!fst) if (!fst)
return -1; return -1;
if (av_stream_get_recommended_encoder_configuration(st)) if (st->recommended_encoder_configuration)
av_stream_set_recommended_encoder_configuration(fst, fst->recommended_encoder_configuration =
av_strdup(av_stream_get_recommended_encoder_configuration(st))); av_strdup(st->recommended_encoder_configuration);
return feed->nb_streams - 1; return feed->nb_streams - 1;
} }
...@@ -3653,7 +3663,7 @@ static void build_file_streams(void) ...@@ -3653,7 +3663,7 @@ static void build_file_streams(void)
} }
static inline static inline
int check_codec_match(AVStream *ccf, AVStream *ccs, int stream) int check_codec_match(LayeredAVStream *ccf, AVStream *ccs, int stream)
{ {
int matches = 1; int matches = 1;
...@@ -3741,7 +3751,8 @@ static int build_feed_streams(void) ...@@ -3741,7 +3751,8 @@ static int build_feed_streams(void)
matches = 1; matches = 1;
for(i=0;i<s->nb_streams;i++) { for(i=0;i<s->nb_streams;i++) {
AVStream *sf, *ss; AVStream *ss;
LayeredAVStream *sf;
sf = feed->streams[i]; sf = feed->streams[i];
ss = s->streams[i]; ss = s->streams[i];
...@@ -3796,8 +3807,14 @@ drop: ...@@ -3796,8 +3807,14 @@ drop:
goto bail; goto bail;
} }
s->oformat = feed->fmt; s->oformat = feed->fmt;
s->nb_streams = feed->nb_streams; for (i = 0; i<feed->nb_streams; i++) {
s->streams = feed->streams; AVStream *st = avformat_new_stream(s, NULL); // FIXME free this
if (!st) {
http_log("Failed to allocate stream\n");
goto bail;
}
unlayer_stream(st, feed->streams[i]);
}
if (avformat_write_header(s, NULL) < 0) { if (avformat_write_header(s, NULL) < 0) {
http_log("Container doesn't support the required parameters\n"); http_log("Container doesn't support the required parameters\n");
avio_closep(&s->pb); avio_closep(&s->pb);
...@@ -3847,7 +3864,7 @@ static void compute_bandwidth(void) ...@@ -3847,7 +3864,7 @@ static void compute_bandwidth(void)
for(stream = config.first_stream; stream; stream = stream->next) { for(stream = config.first_stream; stream; stream = stream->next) {
bandwidth = 0; bandwidth = 0;
for(i=0;i<stream->nb_streams;i++) { for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i]; LayeredAVStream *st = stream->streams[i];
switch(st->codec->codec_type) { switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
......
...@@ -182,7 +182,7 @@ bail: ...@@ -182,7 +182,7 @@ bail:
static void add_codec(FFServerStream *stream, AVCodecContext *av, static void add_codec(FFServerStream *stream, AVCodecContext *av,
FFServerConfig *config) FFServerConfig *config)
{ {
AVStream *st; LayeredAVStream *st;
AVDictionary **opts, *recommended = NULL; AVDictionary **opts, *recommended = NULL;
char *enc_config; char *enc_config;
...@@ -313,12 +313,12 @@ static void add_codec(FFServerStream *stream, AVCodecContext *av, ...@@ -313,12 +313,12 @@ static void add_codec(FFServerStream *stream, AVCodecContext *av,
} }
done: done:
st = av_mallocz(sizeof(AVStream)); st = av_mallocz(sizeof(*st));
if (!st) if (!st)
return; return;
av_dict_get_string(recommended, &enc_config, '=', ','); av_dict_get_string(recommended, &enc_config, '=', ',');
av_dict_free(&recommended); av_dict_free(&recommended);
av_stream_set_recommended_encoder_configuration(st, enc_config); st->recommended_encoder_configuration = enc_config;
st->codec = av; st->codec = av;
stream->streams[stream->nb_streams++] = st; stream->streams[stream->nb_streams++] = st;
} }
......
...@@ -49,6 +49,24 @@ typedef struct FFServerIPAddressACL { ...@@ -49,6 +49,24 @@ typedef struct FFServerIPAddressACL {
struct in_addr last; struct in_addr last;
} FFServerIPAddressACL; } FFServerIPAddressACL;
/**
* This holds the stream parameters for an AVStream, it cannot be a AVStream
* because AVStreams cannot be instanciated without a AVFormatContext, especially
* not outside libavformat.
*
* The fields of this struct have the same semantics as the fields of an AVStream.
*/
typedef struct LayeredAVStream {
int index;
int id;
AVCodecParameters *codecpar;
AVCodecContext *codec;
AVRational time_base;
int pts_wrap_bits;
AVRational sample_aspect_ratio;
char *recommended_encoder_configuration;
} LayeredAVStream;
/* description of each stream of the ffserver.conf file */ /* description of each stream of the ffserver.conf file */
typedef struct FFServerStream { typedef struct FFServerStream {
enum FFServerStreamType stream_type; enum FFServerStreamType stream_type;
...@@ -64,7 +82,7 @@ typedef struct FFServerStream { ...@@ -64,7 +82,7 @@ typedef struct FFServerStream {
int prebuffer; /* Number of milliseconds early to start */ int prebuffer; /* Number of milliseconds early to start */
int64_t max_time; /* Number of milliseconds to run */ int64_t max_time; /* Number of milliseconds to run */
int send_on_key; int send_on_key;
AVStream *streams[FFSERVER_MAX_STREAMS]; LayeredAVStream *streams[FFSERVER_MAX_STREAMS];
int feed_streams[FFSERVER_MAX_STREAMS]; /* index of streams in the feed */ int feed_streams[FFSERVER_MAX_STREAMS]; /* index of streams in the feed */
char feed_filename[1024]; /* file name of the feed storage, or char feed_filename[1024]; /* file name of the feed storage, or
input file name for a stream */ input file name for a stream */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment