Commit 484e59a0 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  avs: call release_buffer() at the end.
  Add minor bumps and APIchanges entries for lavc/lavfi changes.
  mpegvideo.c: K&R formatting and cosmetics.
  avconv: avoid memcpy in vsrc_buffer when possible.
  avconv: implement get_buffer()/release_buffer().
  lavfi: add a new function av_buffersrc_buffer().
  lavfi: add avfilter_copy_frame_props()
  lavc: add format field to AVFrame
  lavc: add width and height fields to AVFrame
  lavc: add a sample_aspect_ratio field to AVFrame
  doxy: add website-alike style to the html output
  FAQ: add an entry for common error when using -profile

Conflicts:
	avconv.c
	cmdutils.c
	doc/APIchanges
	libavcodec/avcodec.h
	libavcodec/mpegvideo.c
	libavcodec/utils.c
	libavcodec/version.h
	libavfilter/Makefile
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/src_movie.c
	libavfilter/vsrc_buffer.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents c48f67f0 80dc7c01
...@@ -33,6 +33,12 @@ PROJECT_NAME = FFmpeg ...@@ -33,6 +33,12 @@ PROJECT_NAME = FFmpeg
PROJECT_NUMBER = PROJECT_NUMBER =
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will
# copy the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put. # base path where the generated documentation will be put.
# If a relative path is entered, it will be relative to the location # If a relative path is entered, it will be relative to the location
...@@ -761,7 +767,7 @@ ALPHABETICAL_INDEX = YES ...@@ -761,7 +767,7 @@ ALPHABETICAL_INDEX = YES
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
# in which this list will be split (can be a number in the range [1..20]) # in which this list will be split (can be a number in the range [1..20])
COLS_IN_ALPHA_INDEX = 5 COLS_IN_ALPHA_INDEX = 2
# In case all classes in a project start with a common prefix, all # In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index. # classes will be put under the same header in the alphabetical index.
...@@ -795,13 +801,13 @@ HTML_FILE_EXTENSION = .html ...@@ -795,13 +801,13 @@ HTML_FILE_EXTENSION = .html
# each generated HTML page. If it is left blank doxygen will generate a # each generated HTML page. If it is left blank doxygen will generate a
# standard header. # standard header.
HTML_HEADER = HTML_HEADER = doc/doxy/header.html
# The HTML_FOOTER tag can be used to specify a personal HTML footer for # The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a # each generated HTML page. If it is left blank doxygen will generate a
# standard footer. # standard footer.
HTML_FOOTER = HTML_FOOTER = doc/doxy/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading # The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to # style sheet that is used by each HTML page. It can be used to
...@@ -810,7 +816,7 @@ HTML_FOOTER = ...@@ -810,7 +816,7 @@ HTML_FOOTER =
# the style sheet file to the HTML output directory, so don't put your own # the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased! # stylesheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET = HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images # Doxygen will adjust the colors in the stylesheet and background images
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/libm.h" #include "libavutil/libm.h"
#include "libavutil/imgutils.h"
#include "libavformat/os_support.h" #include "libavformat/os_support.h"
#include "libavformat/ffm.h" // not public API #include "libavformat/ffm.h" // not public API
...@@ -53,6 +54,7 @@ ...@@ -53,6 +54,7 @@
# include "libavfilter/avfilter.h" # include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h" # include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h" # include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
# include "libavfilter/vsrc_buffer.h" # include "libavfilter/vsrc_buffer.h"
#endif #endif
...@@ -157,6 +159,19 @@ static unsigned int allocated_audio_out_size, allocated_audio_buf_size; ...@@ -157,6 +159,19 @@ static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct InputStream *ist;
struct FrameBuffer *next;
} FrameBuffer;
typedef struct InputStream { typedef struct InputStream {
int file_index; int file_index;
AVStream *st; AVStream *st;
...@@ -174,6 +189,9 @@ typedef struct InputStream { ...@@ -174,6 +189,9 @@ typedef struct InputStream {
int is_start; /* is 1 at the start and after a discontinuity */ int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning; int showed_multi_packet_warning;
AVDictionary *opts; AVDictionary *opts;
/* a pool of free buffers for decoded data */
FrameBuffer *buffer_pool;
} InputStream; } InputStream;
typedef struct InputFile { typedef struct InputFile {
...@@ -416,6 +434,131 @@ static void reset_options(OptionsContext *o) ...@@ -416,6 +434,131 @@ static void reset_options(OptionsContext *o)
init_opts(); init_opts();
} }
static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
{
AVCodecContext *s = ist->st->codec;
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
avcodec_align_dimensions(s, &w, &h);
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if (s->flags & CODEC_FLAG_EMU_EDGE)
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->ist = ist;
*pbuf = buf;
return 0;
}
static void free_buffer_pool(InputStream *ist)
{
FrameBuffer *buf = ist->buffer_pool;
while (buf) {
ist->buffer_pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = ist->buffer_pool;
}
}
static void unref_buffer(InputStream *ist, FrameBuffer *buf)
{
av_assert0(buf->refcount);
buf->refcount--;
if (!buf->refcount) {
buf->next = ist->buffer_pool;
ist->buffer_pool = buf;
}
}
static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf;
int ret, i;
if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
return ret;
buf = ist->buffer_pool;
ist->buffer_pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(ist, &buf)) < 0)
return ret;
}
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf = frame->opaque;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(ist, buf);
}
static void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf->ist, buf);
}
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
static int configure_video_filters(InputStream *ist, OutputStream *ost) static int configure_video_filters(InputStream *ist, OutputStream *ost)
...@@ -618,6 +761,7 @@ void exit_program(int ret) ...@@ -618,6 +761,7 @@ void exit_program(int ret)
av_freep(&input_streams[i].decoded_frame); av_freep(&input_streams[i].decoded_frame);
av_freep(&input_streams[i].filtered_frame); av_freep(&input_streams[i].filtered_frame);
av_dict_free(&input_streams[i].opts); av_dict_free(&input_streams[i].opts);
free_buffer_pool(&input_streams[i]);
} }
if (vstats_file) if (vstats_file)
...@@ -1855,20 +1999,35 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int ...@@ -1855,20 +1999,35 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
continue; continue;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
if (ost->input_video_filter) { if (!decoded_frame->sample_aspect_ratio.num)
if (!decoded_frame->sample_aspect_ratio.num) decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; decoded_frame->pts = ist->pts;
decoded_frame->pts = ist->pts; if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
FrameBuffer *buf = decoded_frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
decoded_frame->data, decoded_frame->linesize,
AV_PERM_READ | AV_PERM_PRESERVE,
ist->st->codec->width, ist->st->codec->height,
ist->st->codec->pix_fmt);
avfilter_copy_frame_props(fb, decoded_frame);
fb->pts = ist->pts;
fb->buf->priv = buf;
fb->buf->free = filter_release_buffer;
buf->refcount++;
av_buffersrc_buffer(ost->input_video_filter, fb);
} else
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE); av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
av_free(buffer_to_free); if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
return AVERROR(ENOMEM); av_free(buffer_to_free);
} else return AVERROR(ENOMEM);
avcodec_get_frame_defaults(ist->filtered_frame); } else
filtered_frame = ist->filtered_frame; avcodec_get_frame_defaults(ist->filtered_frame);
frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); filtered_frame = ist->filtered_frame;
}
frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
while (frame_available) { while (frame_available) {
if (ost->output_video_filter) { if (ost->output_video_filter) {
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base; AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
...@@ -2055,6 +2214,12 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb ...@@ -2055,6 +2214,12 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
ist->st->codec->get_buffer = codec_get_buffer;
ist->st->codec->release_buffer = codec_release_buffer;
ist->st->codec->opaque = ist;
}
if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d", snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
ist->file_index, ist->st->index); ist->file_index, ist->st->index);
......
This diff is collapsed.
</div>
<div id="footer">
<!--BEGIN GENERATE_TREEVIEW-->
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
</div>
</div>
</body>
</html>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
$treeview
$search
$mathjax
</head>
<div id="container">
<div id="body">
<div>
...@@ -272,6 +272,26 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \ ...@@ -272,6 +272,26 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
rm temp[12].[av] all.[av] rm temp[12].[av] all.[av]
@end example @end example
@section -profile option fails when encoding H.264 video with AAC audio
@command{ffmpeg} prints an error like
@example
Undefined constant or missing '(' in 'baseline'
Unable to parse option value "baseline"
Error setting option profile to value baseline.
@end example
Short answer: write @option{-profile:v} instead of @option{-profile}.
Long answer: this happens because the @option{-profile} option can apply to both
video and audio. Specifically the AAC encoder also defines some profiles, none
of which are named @var{baseline}.
The solution is to apply the @option{-profile} option to the video stream only
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
Appending @code{:v} to it will do exactly that.
@chapter Development @chapter Development
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat? @section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/libm.h" #include "libavutil/libm.h"
#include "libavutil/imgutils.h"
#include "libavformat/os_support.h" #include "libavformat/os_support.h"
#include "libswresample/swresample.h" #include "libswresample/swresample.h"
...@@ -54,6 +55,7 @@ ...@@ -54,6 +55,7 @@
# include "libavfilter/avfilter.h" # include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h" # include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h" # include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
# include "libavfilter/vsrc_buffer.h" # include "libavfilter/vsrc_buffer.h"
#endif #endif
...@@ -172,6 +174,19 @@ static uint8_t *input_tmp= NULL; ...@@ -172,6 +174,19 @@ static uint8_t *input_tmp= NULL;
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass" #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct InputStream *ist;
struct FrameBuffer *next;
} FrameBuffer;
typedef struct InputStream { typedef struct InputStream {
int file_index; int file_index;
AVStream *st; AVStream *st;
...@@ -189,6 +204,9 @@ typedef struct InputStream { ...@@ -189,6 +204,9 @@ typedef struct InputStream {
int is_start; /* is 1 at the start and after a discontinuity */ int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning; int showed_multi_packet_warning;
AVDictionary *opts; AVDictionary *opts;
/* a pool of free buffers for decoded data */
FrameBuffer *buffer_pool;
} InputStream; } InputStream;
typedef struct InputFile { typedef struct InputFile {
...@@ -441,6 +459,131 @@ static void reset_options(OptionsContext *o, int is_input) ...@@ -441,6 +459,131 @@ static void reset_options(OptionsContext *o, int is_input)
init_opts(); init_opts();
} }
static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
{
AVCodecContext *s = ist->st->codec;
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
avcodec_align_dimensions(s, &w, &h);
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if (s->flags & CODEC_FLAG_EMU_EDGE)
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->ist = ist;
*pbuf = buf;
return 0;
}
static void free_buffer_pool(InputStream *ist)
{
FrameBuffer *buf = ist->buffer_pool;
while (buf) {
ist->buffer_pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = ist->buffer_pool;
}
}
static void unref_buffer(InputStream *ist, FrameBuffer *buf)
{
av_assert0(buf->refcount);
buf->refcount--;
if (!buf->refcount) {
buf->next = ist->buffer_pool;
ist->buffer_pool = buf;
}
}
static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf;
int ret, i;
if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
return ret;
buf = ist->buffer_pool;
ist->buffer_pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(ist, &buf)) < 0)
return ret;
}
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf = frame->opaque;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(ist, buf);
}
static void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf->ist, buf);
}
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
static int configure_video_filters(InputStream *ist, OutputStream *ost) static int configure_video_filters(InputStream *ist, OutputStream *ost)
...@@ -667,6 +810,7 @@ void av_noreturn exit_program(int ret) ...@@ -667,6 +810,7 @@ void av_noreturn exit_program(int ret)
av_freep(&input_streams[i].decoded_frame); av_freep(&input_streams[i].decoded_frame);
av_freep(&input_streams[i].filtered_frame); av_freep(&input_streams[i].filtered_frame);
av_dict_free(&input_streams[i].opts); av_dict_free(&input_streams[i].opts);
free_buffer_pool(&input_streams[i]);
} }
if (vstats_file) if (vstats_file)
......
...@@ -1285,51 +1285,45 @@ typedef struct AVFrame { ...@@ -1285,51 +1285,45 @@ typedef struct AVFrame {
uint8_t **extended_data; uint8_t **extended_data;
/** /**
* frame timestamp estimated using various heuristics, in stream time base * sample aspect ratio for the video frame, 0/1 if unknown\unspecified
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
* - encoding: unused * - encoding: unused
* - decoding: set by libavcodec, read by user. * - decoding: Read by user.
*/ */
int64_t best_effort_timestamp; AVRational sample_aspect_ratio;
/** /**
* reordered pos from the last AVPacket that has been input into the decoder * width and height of the video frame
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "pkt_pos");
* - encoding: unused * - encoding: unused
* - decoding: Read by user. * - decoding: Read by user.
*/ */
int64_t pkt_pos; int width, height;
/** /**
* reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified * format of the frame, -1 if unknown or unset
* Code outside libavcodec should access this field using: * Values correspond to enum PixelFormat for video frames,
* av_opt_ptr(avcodec_get_frame_class(), frame, "sample_aspect_ratio"); * enum AVSampleFormat for audio)
* - encoding: unused * - encoding: unused
* - decoding: Read by user. * - decoding: Read by user.
*/ */
AVRational sample_aspect_ratio; int format;
/** /**
* width and height of the video frame * frame timestamp estimated using various heuristics, in stream time base
* Code outside libavcodec should access this field using: * Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "width"); * av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
* - encoding: unused * - encoding: unused
* - decoding: Read by user. * - decoding: set by libavcodec, read by user.
*/ */
int width, height; int64_t best_effort_timestamp;
/** /**
* format of the frame, -1 if unknown or unset * reordered pos from the last AVPacket that has been input into the decoder
* It should be cast to the corresponding enum (enum PixelFormat
* for video, enum AVSampleFormat for audio)
* Code outside libavcodec should access this field using: * Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "format"); * av_opt_ptr(avcodec_get_frame_class(), frame, "pkt_pos");
* - encoding: unused * - encoding: unused
* - decoding: Read by user. * - decoding: Read by user.
*/ */
int format; int64_t pkt_pos;
} AVFrame; } AVFrame;
......
...@@ -165,6 +165,15 @@ static av_cold int avs_decode_init(AVCodecContext * avctx) ...@@ -165,6 +165,15 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
return 0; return 0;
} }
static av_cold int avs_decode_end(AVCodecContext *avctx)
{
AvsContext *s = avctx->priv_data;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0;
}
AVCodec ff_avs_decoder = { AVCodec ff_avs_decoder = {
.name = "avs", .name = "avs",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
...@@ -172,6 +181,7 @@ AVCodec ff_avs_decoder = { ...@@ -172,6 +181,7 @@ AVCodec ff_avs_decoder = {
.priv_data_size = sizeof(AvsContext), .priv_data_size = sizeof(AvsContext),
.init = avs_decode_init, .init = avs_decode_init,
.decode = avs_decode_frame, .decode = avs_decode_frame,
.close = avs_decode_end,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"), .long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"),
}; };
This diff is collapsed.
...@@ -604,6 +604,10 @@ int ff_thread_decode_frame(AVCodecContext *avctx, ...@@ -604,6 +604,10 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
*picture = p->frame; *picture = p->frame;
*got_picture_ptr = p->got_frame; *got_picture_ptr = p->got_frame;
picture->pkt_dts = p->avpkt.dts; picture->pkt_dts = p->avpkt.dts;
picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
picture->width = avctx->width;
picture->height = avctx->height;
picture->format = avctx->pix_fmt;
/* /*
* A later call with avkpt->size == 0 may loop over all threads, * A later call with avkpt->size == 0 may loop over all threads,
......
...@@ -1093,6 +1093,8 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, ...@@ -1093,6 +1093,8 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
if (ret >= 0 && *got_frame_ptr) { if (ret >= 0 && *got_frame_ptr) {
avctx->frame_number++; avctx->frame_number++;
frame->pkt_dts = avpkt->dts; frame->pkt_dts = avpkt->dts;
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
} }
} }
return ret; return ret;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 47 #define LIBAVCODEC_VERSION_MINOR 48
#define LIBAVCODEC_VERSION_MICRO 100 #define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "avcodec.h" #include "avcodec.h"
void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{ {
dst->pts = src->pts; dst->pts = src->pts;
dst->pos = src->pkt_pos; dst->pos = src->pkt_pos;
...@@ -39,6 +39,8 @@ void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) ...@@ -39,6 +39,8 @@ void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
dst->video->key_frame = src->key_frame; dst->video->key_frame = src->key_frame;
dst->video->pict_type = src->pict_type; dst->video->pict_type = src->pict_type;
} }
return 0;
} }
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame,
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
* Copy the frame properties of src to dst, without copying the actual * Copy the frame properties of src to dst, without copying the actual
* image data. * image data.
*/ */
void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
/** /**
* Create and return a picref reference from the data and properties * Create and return a picref reference from the data and properties
......
...@@ -27,9 +27,10 @@ ...@@ -27,9 +27,10 @@
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h" #include "libavutil/pixfmt.h"
#include "libavutil/rational.h" #include "libavutil/rational.h"
#include "libavcodec/avcodec.h"
#define LIBAVFILTER_VERSION_MAJOR 2 #define LIBAVFILTER_VERSION_MAJOR 2
#define LIBAVFILTER_VERSION_MINOR 53 #define LIBAVFILTER_VERSION_MINOR 54
#define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
...@@ -953,4 +954,12 @@ static inline void avfilter_insert_outpad(AVFilterContext *f, unsigned index, ...@@ -953,4 +954,12 @@ static inline void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
&f->output_pads, &f->outputs, p); &f->output_pads, &f->outputs, p);
} }
/**
* Copy the frame properties of src to dst, without copying the actual
* image data.
*
* @return 0 on success, a negative number on error.
*/
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
#endif /* AVFILTER_AVFILTER_H */ #endif /* AVFILTER_AVFILTER_H */
/*
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_BUFFERSRC_H
#define AVFILTER_BUFFERSRC_H
/**
* @file
* Memory buffer source API.
*/
#include "avfilter.h"
/**
* Add a buffer to the filtergraph s.
*
* @param buf buffer containing frame data to be passed down the filtergraph.
* This function will take ownership of buf, the user must not free it.
*/
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
#endif /* AVFILTER_BUFFERSRC_H */
...@@ -264,6 +264,7 @@ static int movie_get_frame(AVFilterLink *outlink) ...@@ -264,6 +264,7 @@ static int movie_get_frame(AVFilterLink *outlink)
/* use pkt_dts if pkt_pts is not available */ /* use pkt_dts if pkt_pts is not available */
movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ? movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ?
movie->frame->pkt_dts : movie->frame->pkt_pts; movie->frame->pkt_dts : movie->frame->pkt_pts;
if (!movie->frame->sample_aspect_ratio.num) if (!movie->frame->sample_aspect_ratio.num)
movie->picref->video->sample_aspect_ratio = st->sample_aspect_ratio; movie->picref->video->sample_aspect_ratio = st->sample_aspect_ratio;
av_dlog(outlink->src, av_dlog(outlink->src,
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "avcodec.h" #include "avcodec.h"
#include "buffersrc.h"
#include "vsrc_buffer.h" #include "vsrc_buffer.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
...@@ -112,6 +113,23 @@ int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter, ...@@ -112,6 +113,23 @@ int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
return 0; return 0;
} }
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
{
BufferSourceContext *c = s->priv;
if (c->picref) {
av_log(s, AV_LOG_ERROR,
"Buffering several frames is not supported. "
"Please consume all available frames before adding a new one.\n"
);
return AVERROR(EINVAL);
}
c->picref = buf;
return 0;
}
#if CONFIG_AVCODEC #if CONFIG_AVCODEC
#include "avcodec.h" #include "avcodec.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment