Commit cc80caff authored by S.N. Hemanth Meenakshisundaram's avatar S.N. Hemanth Meenakshisundaram Committed by Stefano Sabatini

Separate video specific BufferRef properties into VideoProps.

Define a new struct AVFilterBufferRefVideoProps and add a type field
to AVFilterBufferRef.

Video specific properties in AVFilterBufferRefVideoProps are now
referred to by *video pointer in AVFilterBufferRef.

Patch by S.N. Hemanth Meenakshisundaram smeenaks->ucsd.edu.

Originally committed as revision 24763 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent fd7b11d0
......@@ -377,8 +377,8 @@ static int get_filtered_video_pic(AVFilterContext *ctx,
memcpy(pic2->data, pic->data, sizeof(pic->data));
memcpy(pic2->linesize, pic->linesize, sizeof(pic->linesize));
pic2->interlaced_frame = pic->interlaced;
pic2->top_field_first = pic->top_field_first;
pic2->interlaced_frame = pic->video->interlaced;
pic2->top_field_first = pic->video->top_field_first;
return 1;
}
......@@ -1701,7 +1701,8 @@ static int output_packet(AVInputStream *ist, int ist_index,
break;
case AVMEDIA_TYPE_VIDEO:
#if CONFIG_AVFILTER
ost->st->codec->sample_aspect_ratio = ist->picref->pixel_aspect;
if (ist->picref->video)
ost->st->codec->sample_aspect_ratio = ist->picref->video->pixel_aspect;
#endif
do_video_out(os, ost, ist, &picture, &frame_size);
if (vstats_filename && frame_size)
......
......@@ -694,10 +694,10 @@ static void video_image_display(VideoState *is)
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
#if CONFIG_AVFILTER
if (vp->picref->pixel_aspect.num == 0)
if (vp->picref->video->pixel_aspect.num == 0)
aspect_ratio = 0;
else
aspect_ratio = av_q2d(vp->picref->pixel_aspect);
aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
#else
/* XXX: use variable in the frame */
......@@ -1582,8 +1582,8 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
return -1;
ref->w = codec->width;
ref->h = codec->height;
ref->video->w = codec->width;
ref->video->h = codec->height;
for(i = 0; i < 4; i ++) {
unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
......@@ -1616,7 +1616,7 @@ static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
return codec->get_buffer(codec, pic);
}
if ((codec->width != ref->w) || (codec->height != ref->h) ||
if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
(codec->pix_fmt != ref->format)) {
av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
return -1;
......@@ -1677,7 +1677,7 @@ static int input_request_frame(AVFilterLink *link)
picref->pts = pts;
picref->pos = pkt.pos;
picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
......
......@@ -49,6 +49,10 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
{
AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef));
*ret = *ref;
if (ref->type == AVMEDIA_TYPE_VIDEO) {
ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps));
*ret->video = *ref->video;
}
ret->perms &= pmask;
ret->buf->refcount ++;
return ret;
......@@ -58,6 +62,7 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref)
{
if(!(--ref->buf->refcount))
ref->buf->free(ref->buf);
av_free(ref->video);
av_free(ref);
}
......@@ -179,7 +184,7 @@ void ff_dprintf_picref(void *ctx, AVFilterBufferRef *picref, int end)
picref->data [0], picref->data [1], picref->data [2], picref->data [3],
picref->linesize[0], picref->linesize[1], picref->linesize[2], picref->linesize[3],
picref->pts, picref->pos,
picref->pixel_aspect.num, picref->pixel_aspect.den, picref->w, picref->h,
picref->video->pixel_aspect.num, picref->video->pixel_aspect.den, picref->video->w, picref->video->h,
end ? "\n" : "");
}
......@@ -314,7 +319,7 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
for(i = 0; i < 4; i ++) {
int planew =
ff_get_plane_bytewidth(link->format, link->cur_buf->w, i);
ff_get_plane_bytewidth(link->format, link->cur_buf->video->w, i);
if(!src[i]) continue;
......
......@@ -25,7 +25,7 @@
#include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 1
#define LIBAVFILTER_VERSION_MINOR 31
#define LIBAVFILTER_VERSION_MINOR 32
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
......@@ -88,6 +88,20 @@ typedef struct AVFilterBuffer
#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time
#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time
/**
* Video specific properties in a reference to an AVFilterBuffer. Since
* AVFilterBufferRef is common to different media formats, video specific
* per reference properties must be separated out.
*/
typedef struct AVFilterBufferRefVideoProps
{
int w; ///< image width
int h; ///< image height
AVRational pixel_aspect; ///< pixel aspect ratio
int interlaced; ///< is frame interlaced
int top_field_first; ///< field order
} AVFilterBufferRefVideoProps;
/**
* A reference to an AVFilterBuffer. Since filters can manipulate the origin of
* a buffer to, for example, crop image without any memcpy, the buffer origin
......@@ -101,19 +115,15 @@ typedef struct AVFilterBufferRef
AVFilterBuffer *buf; ///< the buffer that this is a reference to
uint8_t *data[4]; ///< picture data for each plane
int linesize[4]; ///< number of bytes per line
int w; ///< image width
int h; ///< image height
int format; ///< media format
int64_t pts; ///< presentation timestamp in units of 1/AV_TIME_BASE
int64_t pos; ///< byte position in stream, -1 if unknown
AVRational pixel_aspect; ///< pixel aspect ratio
int perms; ///< permissions, see the AV_PERM_* flags
int interlaced; ///< is frame interlaced
int top_field_first;
enum AVMediaType type; ///< media type of buffer data
AVFilterBufferRefVideoProps *video; ///< video buffer specific properties
} AVFilterBufferRef;
/**
......@@ -122,13 +132,13 @@ typedef struct AVFilterBufferRef
*/
static inline void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
{
// copy common properties
dst->pts = src->pts;
dst->pos = src->pos;
dst->pixel_aspect = src->pixel_aspect;
dst->interlaced = src->interlaced;
dst->top_field_first = src->top_field_first;
dst->w = src->w;
dst->h = src->h;
switch (src->type) {
case AVMEDIA_TYPE_VIDEO: *dst->video = *src->video; break;
}
}
/**
......
......@@ -39,9 +39,10 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
int i, tempsize;
char *buf;
ref->buf = pic;
ref->w = w;
ref->h = h;
ref->buf = pic;
ref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps));
ref->video->w = w;
ref->video->h = h;
/* make sure the buffer gets read permission or it's useless for output */
ref->perms = perms | AV_PERM_READ;
......@@ -49,15 +50,15 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
pic->refcount = 1;
ref->format = link->format;
pic->free = avfilter_default_free_buffer;
av_fill_image_linesizes(pic->linesize, ref->format, ref->w);
av_fill_image_linesizes(pic->linesize, ref->format, ref->video->w);
for (i=0; i<4;i++)
pic->linesize[i] = FFALIGN(pic->linesize[i], 16);
tempsize = av_fill_image_pointers(pic->data, ref->format, ref->h, NULL, pic->linesize);
tempsize = av_fill_image_pointers(pic->data, ref->format, ref->video->h, NULL, pic->linesize);
buf = av_malloc(tempsize + 16); // +2 is needed for swscaler, +16 to be
// SIMD-friendly
av_fill_image_pointers(pic->data, ref->format, ref->h, buf, pic->linesize);
av_fill_image_pointers(pic->data, ref->format, ref->video->h, buf, pic->linesize);
memcpy(ref->data, pic->data, 4*sizeof(pic->data[0]));
memcpy(ref->linesize, pic->linesize, 4*sizeof(pic->linesize[0]));
......
......@@ -60,7 +60,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
AspectContext *aspect = link->dst->priv;
picref->pixel_aspect = aspect->aspect;
picref->video->pixel_aspect = aspect->aspect;
avfilter_start_frame(link->dst->outputs[0], picref);
}
......
......@@ -135,8 +135,8 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
AVFilterBufferRef *ref2 = avfilter_ref_buffer(picref, ~0);
int i;
ref2->w = crop->w;
ref2->h = crop->h;
picref->video->w = crop->w;
picref->video->h = crop->h;
ref2->data[0] += crop->y * ref2->linesize[0];
ref2->data[0] += (crop->x * crop->max_step[0]);
......
......@@ -428,9 +428,9 @@ static int color_request_frame(AVFilterLink *link)
{
ColorContext *color = link->src->priv;
AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
picref->pixel_aspect = (AVRational) {1, 1};
picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q);
picref->pos = 0;
picref->video->pixel_aspect = (AVRational) {1, 1};
picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q);
picref->pos = 0;
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
draw_rectangle(picref,
......
......@@ -156,9 +156,9 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
outlink->out_buf = outpicref;
av_reduce(&outpicref->pixel_aspect.num, &outpicref->pixel_aspect.den,
(int64_t)picref->pixel_aspect.num * outlink->h * link->w,
(int64_t)picref->pixel_aspect.den * outlink->w * link->h,
av_reduce(&outpicref->video->pixel_aspect.num, &outpicref->video->pixel_aspect.den,
(int64_t)picref->video->pixel_aspect.num * outlink->h * link->w,
(int64_t)picref->video->pixel_aspect.den * outlink->w * link->h,
INT_MAX);
scale->slice_y = 0;
......
......@@ -122,10 +122,10 @@ static int request_frame(AVFilterLink *link)
av_picture_copy((AVPicture *)&picref->data, (AVPicture *)&c->frame,
picref->format, link->w, link->h);
picref->pts = c->pts;
picref->pixel_aspect = c->pixel_aspect;
picref->interlaced = c->frame.interlaced_frame;
picref->top_field_first = c->frame.top_field_first;
picref->pts = c->pts;
picref->video->pixel_aspect = c->pixel_aspect;
picref->video->interlaced = c->frame.interlaced_frame;
picref->video->top_field_first = c->frame.top_field_first;
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment