Commit d0ad91c2 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  os_support: Define SHUT_RD, SHUT_WR and SHUT_RDWR on OS/2
  http: Add support for reading http POST reply headers
  http: Add http_shutdown() for ending writing of posts
  tcp: Allow signalling end of reading/writing
  avio: Add a function for signalling end of reading/writing
  lavfi: fix comment, audio is supported now.
  lavfi: fix incorrect comment.
  lavfi: remove avfilter_null_* from public API on next bump.
  lavfi: remove avfilter_default_* from public API on next bump.
  lavfi: deprecate default config_props() callback and refactor avfilter_config_links()
  avfiltergraph: smarter sample format selection.
  avconv: rename transcode_audio/video to decode_audio/video.
  asyncts: reset delta to 0 when it's not used.
  x86: lavc: use %if HAVE_AVX guards around AVX functions in yasm code.
  dwt: return errors from ff_slice_buffer_init()

Conflicts:
	ffmpeg.c
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/formats.c
	libavfilter/version.h
	libavfilter/vf_blackframe.c
	libavfilter/vf_drawtext.c
	libavfilter/vf_fade.c
	libavfilter/vf_format.c
	libavfilter/vf_showinfo.c
	libavfilter/video.c
	libavfilter/video.h
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents d0f78e77 3f9d6e42
...@@ -2538,7 +2538,7 @@ static int guess_input_channel_layout(InputStream *ist) ...@@ -2538,7 +2538,7 @@ static int guess_input_channel_layout(InputStream *ist)
return 1; return 1;
} }
static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output) static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
{ {
AVFrame *decoded_frame; AVFrame *decoded_frame;
AVCodecContext *avctx = ist->st->codec; AVCodecContext *avctx = ist->st->codec;
...@@ -2639,7 +2639,7 @@ static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output) ...@@ -2639,7 +2639,7 @@ static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
return ret; return ret;
} }
static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output) static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
{ {
AVFrame *decoded_frame; AVFrame *decoded_frame;
void *buffer_to_free = NULL; void *buffer_to_free = NULL;
...@@ -2824,10 +2824,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt) ...@@ -2824,10 +2824,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
switch (ist->st->codec->codec_type) { switch (ist->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ret = transcode_audio (ist, &avpkt, &got_output); ret = decode_audio (ist, &avpkt, &got_output);
break; break;
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
ret = transcode_video (ist, &avpkt, &got_output); ret = decode_video (ist, &avpkt, &got_output);
if (avpkt.duration) { if (avpkt.duration) {
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
} else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) { } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
......
...@@ -24,9 +24,9 @@ ...@@ -24,9 +24,9 @@
#include "dwt.h" #include "dwt.h"
#include "libavcodec/x86/dwt.h" #include "libavcodec/x86/dwt.h"
void ff_slice_buffer_init(slice_buffer *buf, int line_count, int ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width, int max_allocated_lines, int line_width,
IDWTELEM *base_buffer) IDWTELEM *base_buffer)
{ {
int i; int i;
...@@ -55,6 +55,7 @@ void ff_slice_buffer_init(slice_buffer *buf, int line_count, ...@@ -55,6 +55,7 @@ void ff_slice_buffer_init(slice_buffer *buf, int line_count,
} }
buf->data_stack_top = max_allocated_lines - 1; buf->data_stack_top = max_allocated_lines - 1;
return 0;
} }
IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line) IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line)
......
...@@ -228,9 +228,9 @@ void ff_spatial_idwt_slice2(DWTContext *d, int y); ...@@ -228,9 +228,9 @@ void ff_spatial_idwt_slice2(DWTContext *d, int y);
: ff_slice_buffer_load_line((slice_buf), \ : ff_slice_buffer_load_line((slice_buf), \
(line_num))) (line_num)))
void ff_slice_buffer_init(slice_buffer *buf, int line_count, int ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width, int max_allocated_lines, int line_width,
IDWTELEM *base_buffer); IDWTELEM *base_buffer);
void ff_slice_buffer_release(slice_buffer *buf, int line); void ff_slice_buffer_release(slice_buffer *buf, int line);
void ff_slice_buffer_flush(slice_buffer *buf); void ff_slice_buffer_flush(slice_buffer *buf);
void ff_slice_buffer_destroy(slice_buffer *buf); void ff_slice_buffer_destroy(slice_buffer *buf);
......
...@@ -396,7 +396,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac ...@@ -396,7 +396,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
// realloc slice buffer for the case that spatial_decomposition_count changed // realloc slice buffer for the case that spatial_decomposition_count changed
ff_slice_buffer_destroy(&s->sb); ff_slice_buffer_destroy(&s->sb);
ff_slice_buffer_init(&s->sb, s->plane[0].height, (MB_SIZE >> s->block_max_depth) + s->spatial_decomposition_count * 8 + 1, s->plane[0].width, s->spatial_idwt_buffer); if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
(MB_SIZE >> s->block_max_depth) +
s->spatial_decomposition_count * 8 + 1,
s->plane[0].width,
s->spatial_idwt_buffer)) < 0)
return res;
for(plane_index=0; plane_index<3; plane_index++){ for(plane_index=0; plane_index<3; plane_index++){
Plane *p= &s->plane[plane_index]; Plane *p= &s->plane[plane_index];
......
...@@ -1156,8 +1156,10 @@ ALIGN 16 ...@@ -1156,8 +1156,10 @@ ALIGN 16
INIT_XMM sse INIT_XMM sse
VECTOR_FMUL VECTOR_FMUL
%if HAVE_AVX
INIT_YMM avx INIT_YMM avx
VECTOR_FMUL VECTOR_FMUL
%endif
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1, ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
...@@ -1198,8 +1200,10 @@ ALIGN 16 ...@@ -1198,8 +1200,10 @@ ALIGN 16
INIT_XMM sse INIT_XMM sse
VECTOR_FMUL_REVERSE VECTOR_FMUL_REVERSE
%if HAVE_AVX
INIT_YMM avx INIT_YMM avx
VECTOR_FMUL_REVERSE VECTOR_FMUL_REVERSE
%endif
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1, ; vector_fmul_add(float *dst, const float *src0, const float *src1,
...@@ -1231,8 +1235,10 @@ ALIGN 16 ...@@ -1231,8 +1235,10 @@ ALIGN 16
INIT_XMM sse INIT_XMM sse
VECTOR_FMUL_ADD VECTOR_FMUL_ADD
%if HAVE_AVX
INIT_YMM avx INIT_YMM avx
VECTOR_FMUL_ADD VECTOR_FMUL_ADD
%endif
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
; void ff_butterflies_float_interleave(float *dst, const float *src0, ; void ff_butterflies_float_interleave(float *dst, const float *src0,
......
...@@ -182,10 +182,13 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) ...@@ -182,10 +182,13 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (labs(delta) > s->min_delta) { if (labs(delta) > s->min_delta) {
av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta); av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta);
out_size += delta; out_size += delta;
} else if (s->resample) { } else {
int comp = av_clip(delta, -s->max_comp, s->max_comp); if (s->resample) {
av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp); int comp = av_clip(delta, -s->max_comp, s->max_comp);
avresample_set_compensation(s->avr, delta, inlink->sample_rate); av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
avresample_set_compensation(s->avr, delta, inlink->sample_rate);
}
delta = 0;
} }
if (out_size > 0) { if (out_size > 0) {
......
...@@ -202,7 +202,6 @@ int avfilter_config_links(AVFilterContext *filter) ...@@ -202,7 +202,6 @@ int avfilter_config_links(AVFilterContext *filter)
link->sample_aspect_ratio = inlink ? link->sample_aspect_ratio = inlink ?
inlink->sample_aspect_ratio : (AVRational){1,1}; inlink->sample_aspect_ratio : (AVRational){1,1};
#if 1
if (inlink) { if (inlink) {
if (!link->w) if (!link->w)
link->w = inlink->w; link->w = inlink->w;
...@@ -233,7 +232,6 @@ int avfilter_config_links(AVFilterContext *filter) ...@@ -233,7 +232,6 @@ int avfilter_config_links(AVFilterContext *filter)
link->time_base = (AVRational) {1, link->sample_rate}; link->time_base = (AVRational) {1, link->sample_rate};
} }
#endif
if ((config_link = link->dstpad->config_props)) if ((config_link = link->dstpad->config_props))
if ((ret = config_link(link)) < 0) if ((ret = config_link(link)) < 0)
return ret; return ret;
......
...@@ -383,7 +383,7 @@ struct AVFilterPad { ...@@ -383,7 +383,7 @@ struct AVFilterPad {
const char *name; const char *name;
/** /**
* AVFilterPad type. Can be AVMEDIA_TYPE_VIDEO or AVMEDIA_TYPE_AUDIO. * AVFilterPad type.
*/ */
enum AVMediaType type; enum AVMediaType type;
...@@ -465,7 +465,7 @@ struct AVFilterPad { ...@@ -465,7 +465,7 @@ struct AVFilterPad {
* *
* Defaults to just calling the source poll_frame() method. * Defaults to just calling the source poll_frame() method.
* *
* Output video pads only. * Output pads only.
*/ */
int (*poll_frame)(AVFilterLink *link); int (*poll_frame)(AVFilterLink *link);
...@@ -476,7 +476,7 @@ struct AVFilterPad { ...@@ -476,7 +476,7 @@ struct AVFilterPad {
* See avfilter_request_frame() for the error codes with a specific * See avfilter_request_frame() for the error codes with a specific
* meaning. * meaning.
* *
* Output video pads only. * Output pads only.
*/ */
int (*request_frame)(AVFilterLink *link); int (*request_frame)(AVFilterLink *link);
...@@ -500,19 +500,28 @@ struct AVFilterPad { ...@@ -500,19 +500,28 @@ struct AVFilterPad {
int (*config_props)(AVFilterLink *link); int (*config_props)(AVFilterLink *link);
}; };
#if FF_API_FILTERS_PUBLIC
/** default handler for start_frame() for video inputs */ /** default handler for start_frame() for video inputs */
attribute_deprecated
void avfilter_default_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); void avfilter_default_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/** default handler for draw_slice() for video inputs */ /** default handler for draw_slice() for video inputs */
attribute_deprecated
void avfilter_default_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); void avfilter_default_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
/** default handler for end_frame() for video inputs */ /** default handler for end_frame() for video inputs */
attribute_deprecated
void avfilter_default_end_frame(AVFilterLink *link); void avfilter_default_end_frame(AVFilterLink *link);
/** default handler for get_video_buffer() for video inputs */ /** default handler for get_video_buffer() for video inputs */
attribute_deprecated
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h); int perms, int w, int h);
/** Default handler for query_formats() */
attribute_deprecated
int avfilter_default_query_formats(AVFilterContext *ctx);
#endif
/** /**
* Helpers for query_formats() which set all links to the same list of * Helpers for query_formats() which set all links to the same list of
...@@ -527,21 +536,24 @@ void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats * ...@@ -527,21 +536,24 @@ void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *
void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats); void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats);
#endif #endif
/** Default handler for query_formats() */ #if FF_API_FILTERS_PUBLIC
int avfilter_default_query_formats(AVFilterContext *ctx);
/** start_frame() handler for filters which simply pass video along */ /** start_frame() handler for filters which simply pass video along */
attribute_deprecated
void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/** draw_slice() handler for filters which simply pass video along */ /** draw_slice() handler for filters which simply pass video along */
attribute_deprecated
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
/** end_frame() handler for filters which simply pass video along */ /** end_frame() handler for filters which simply pass video along */
attribute_deprecated
void avfilter_null_end_frame(AVFilterLink *link); void avfilter_null_end_frame(AVFilterLink *link);
/** get_video_buffer() handler for filters which simply pass video along */ /** get_video_buffer() handler for filters which simply pass video along */
attribute_deprecated
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link,
int perms, int w, int h); int perms, int w, int h);
#endif
/** /**
* Filter definition. This defines the pads a filter contains, and all the * Filter definition. This defines the pads a filter contains, and all the
......
...@@ -227,7 +227,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) ...@@ -227,7 +227,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if (graph->filters[i]->filter->query_formats) if (graph->filters[i]->filter->query_formats)
graph->filters[i]->filter->query_formats(graph->filters[i]); graph->filters[i]->filter->query_formats(graph->filters[i]);
else else
avfilter_default_query_formats(graph->filters[i]); ff_default_query_formats(graph->filters[i]);
} }
/* go through and merge as many format lists as possible */ /* go through and merge as many format lists as possible */
...@@ -571,6 +571,74 @@ static void swap_channel_layouts(AVFilterGraph *graph) ...@@ -571,6 +571,74 @@ static void swap_channel_layouts(AVFilterGraph *graph)
swap_channel_layouts_on_filter(graph->filters[i]); swap_channel_layouts_on_filter(graph->filters[i]);
} }
static void swap_sample_fmts_on_filter(AVFilterContext *filter)
{
AVFilterLink *link = NULL;
int format, bps;
int i, j;
for (i = 0; i < filter->input_count; i++) {
link = filter->inputs[i];
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->out_formats->format_count == 1)
break;
}
if (i == filter->input_count)
return;
format = link->out_formats->formats[0];
bps = av_get_bytes_per_sample(format);
for (i = 0; i < filter->output_count; i++) {
AVFilterLink *outlink = filter->outputs[i];
int best_idx, best_score = INT_MIN;
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
outlink->in_formats->format_count < 2)
continue;
for (j = 0; j < outlink->in_formats->format_count; j++) {
int out_format = outlink->in_formats->formats[j];
int out_bps = av_get_bytes_per_sample(out_format);
int score;
if (av_get_packed_sample_fmt(out_format) == format ||
av_get_planar_sample_fmt(out_format) == format) {
best_idx = j;
break;
}
/* for s32 and float prefer double to prevent loss of information */
if (bps == 4 && out_bps == 8) {
best_idx = j;
break;
}
/* prefer closest higher or equal bps */
score = -abs(out_bps - bps);
if (out_bps >= bps)
score += INT_MAX/2;
if (score > best_score) {
best_score = score;
best_idx = j;
}
}
FFSWAP(int, outlink->in_formats->formats[0],
outlink->in_formats->formats[best_idx]);
}
}
static void swap_sample_fmts(AVFilterGraph *graph)
{
int i;
for (i = 0; i < graph->filter_count; i++)
swap_sample_fmts_on_filter(graph->filters[i]);
}
static int pick_formats(AVFilterGraph *graph) static int pick_formats(AVFilterGraph *graph)
{ {
int i, j, ret; int i, j, ret;
...@@ -633,8 +701,9 @@ int ff_avfilter_graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx) ...@@ -633,8 +701,9 @@ int ff_avfilter_graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
* of format conversion inside filters */ * of format conversion inside filters */
reduce_formats(graph); reduce_formats(graph);
/* for audio filters, ensure the best sample rate and channel layout /* for audio filters, ensure the best format, sample rate and channel layout
* is selected */ * is selected */
swap_sample_fmts(graph);
swap_samplerates(graph); swap_samplerates(graph);
swap_channel_layouts(graph); swap_channel_layouts(graph);
......
...@@ -449,7 +449,7 @@ void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats) ...@@ -449,7 +449,7 @@ void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
avfilter_formats_ref, formats); avfilter_formats_ref, formats);
} }
int avfilter_default_query_formats(AVFilterContext *ctx) int ff_default_query_formats(AVFilterContext *ctx)
{ {
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type : enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type : ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
...@@ -534,6 +534,12 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx) ...@@ -534,6 +534,12 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
return 0; return 0;
} }
#if FF_API_FILTERS_PUBLIC
int avfilter_default_query_formats(AVFilterContext *ctx)
{
return ff_default_query_formats(ctx);
}
#endif
#ifdef TEST #ifdef TEST
#undef printf #undef printf
...@@ -552,4 +558,3 @@ int main(void) ...@@ -552,4 +558,3 @@ int main(void)
} }
#endif #endif
...@@ -78,4 +78,6 @@ void ff_channel_layouts_unref(AVFilterChannelLayouts **ref); ...@@ -78,4 +78,6 @@ void ff_channel_layouts_unref(AVFilterChannelLayouts **ref);
void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref, void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
AVFilterChannelLayouts **newref); AVFilterChannelLayouts **newref);
int ff_default_query_formats(AVFilterContext *ctx);
#endif // AVFILTER_FORMATS_H #endif // AVFILTER_FORMATS_H
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "audio.h" #include "audio.h"
#include "video.h"
static int split_init(AVFilterContext *ctx, const char *args, void *opaque) static int split_init(AVFilterContext *ctx, const char *args, void *opaque)
{ {
...@@ -100,7 +101,7 @@ AVFilter avfilter_vf_split = { ...@@ -100,7 +101,7 @@ AVFilter avfilter_vf_split = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer, .get_video_buffer= ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.end_frame = end_frame, }, .end_frame = end_frame, },
......
...@@ -56,5 +56,11 @@ ...@@ -56,5 +56,11 @@
#ifndef FF_API_PACKING #ifndef FF_API_PACKING
#define FF_API_PACKING (LIBAVFILTER_VERSION_MAJOR < 3) #define FF_API_PACKING (LIBAVFILTER_VERSION_MAJOR < 3)
#endif #endif
#ifndef FF_API_DEFAULT_CONFIG_OUTPUT_LINK
#define FF_API_DEFAULT_CONFIG_OUTPUT_LINK (LIBAVFILTER_VERSION_MAJOR < 3)
#endif
#ifndef FF_API_FILTERS_PUBLIC
#define FF_API_FILTERS_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 3)
#endif
#endif // AVFILTER_VERSION_H #endif // AVFILTER_VERSION_H
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
typedef struct { typedef struct {
AVRational ratio; AVRational ratio;
...@@ -86,9 +87,9 @@ AVFilter avfilter_vf_setdar = { ...@@ -86,9 +87,9 @@ AVFilter avfilter_vf_setdar = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = setdar_config_props, .config_props = setdar_config_props,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.end_frame = avfilter_null_end_frame }, .end_frame = ff_null_end_frame },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
...@@ -118,9 +119,9 @@ AVFilter avfilter_vf_setsar = { ...@@ -118,9 +119,9 @@ AVFilter avfilter_vf_setsar = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = setsar_config_props, .config_props = setsar_config_props,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.end_frame = avfilter_null_end_frame }, .end_frame = ff_null_end_frame },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "video.h"
typedef struct { typedef struct {
unsigned int bamount; ///< black amount unsigned int bamount; ///< black amount
...@@ -127,7 +128,7 @@ AVFilter avfilter_vf_blackframe = { ...@@ -127,7 +128,7 @@ AVFilter avfilter_vf_blackframe = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame_keep_ref, .start_frame = ff_null_start_frame_keep_ref,
.end_frame = end_frame, }, .end_frame = end_frame, },
{ .name = NULL}}, { .name = NULL}},
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
AVFilter avfilter_vf_copy = { AVFilter avfilter_vf_copy = {
.name = "copy", .name = "copy",
...@@ -29,9 +30,9 @@ AVFilter avfilter_vf_copy = { ...@@ -29,9 +30,9 @@ AVFilter avfilter_vf_copy = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.end_frame = avfilter_null_end_frame, .end_frame = ff_null_end_frame,
.rej_perms = ~0 }, .rej_perms = ~0 },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
/* #define DEBUG */ /* #define DEBUG */
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/libm.h" #include "libavutil/libm.h"
...@@ -352,7 +353,7 @@ AVFilter avfilter_vf_crop = { ...@@ -352,7 +353,7 @@ AVFilter avfilter_vf_crop = {
.start_frame = start_frame, .start_frame = start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.end_frame = end_frame, .end_frame = end_frame,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input, }, .config_props = config_input, },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
typedef struct { typedef struct {
int x1, y1, x2, y2; int x1, y1, x2, y2;
...@@ -203,8 +204,8 @@ AVFilter avfilter_vf_cropdetect = { ...@@ -203,8 +204,8 @@ AVFilter avfilter_vf_cropdetect = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input, .config_props = config_input,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.end_frame = end_frame, }, .end_frame = end_frame, },
{ .name = NULL}}, { .name = NULL}},
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
/** /**
* Apply a simple delogo algorithm to the image in dst and put the * Apply a simple delogo algorithm to the image in dst and put the
...@@ -275,7 +276,7 @@ AVFilter avfilter_vf_delogo = { ...@@ -275,7 +276,7 @@ AVFilter avfilter_vf_delogo = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.draw_slice = null_draw_slice, .draw_slice = null_draw_slice,
.end_frame = end_frame, .end_frame = end_frame,
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
enum { Y, U, V, A }; enum { Y, U, V, A };
...@@ -130,10 +131,10 @@ AVFilter avfilter_vf_drawbox = { ...@@ -130,10 +131,10 @@ AVFilter avfilter_vf_drawbox = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input, .config_props = config_input,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.end_frame = avfilter_null_end_frame, .end_frame = ff_null_end_frame,
.min_perms = AV_PERM_WRITE | AV_PERM_READ, .min_perms = AV_PERM_WRITE | AV_PERM_READ,
.rej_perms = AV_PERM_PRESERVE }, .rej_perms = AV_PERM_PRESERVE },
{ .name = NULL}}, { .name = NULL}},
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include "libavutil/lfg.h" #include "libavutil/lfg.h"
#include "avfilter.h" #include "avfilter.h"
#include "drawutils.h" #include "drawutils.h"
#include "video.h"
#undef time #undef time
...@@ -830,8 +831,8 @@ AVFilter avfilter_vf_drawtext = { ...@@ -830,8 +831,8 @@ AVFilter avfilter_vf_drawtext = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.draw_slice = null_draw_slice, .draw_slice = null_draw_slice,
.end_frame = end_frame, .end_frame = end_frame,
.config_props = config_input, .config_props = config_input,
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "drawutils.h" #include "drawutils.h"
#include "internal.h" #include "internal.h"
#include "video.h"
#define R 0 #define R 0
#define G 1 #define G 1
...@@ -288,8 +289,8 @@ AVFilter avfilter_vf_fade = { ...@@ -288,8 +289,8 @@ AVFilter avfilter_vf_fade = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props, .config_props = config_props,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.end_frame = end_frame, .end_frame = end_frame,
.min_perms = AV_PERM_READ | AV_PERM_WRITE, .min_perms = AV_PERM_READ | AV_PERM_WRITE,
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
typedef struct BufPic { typedef struct BufPic {
AVFilterBufferRef *picref; AVFilterBufferRef *picref;
...@@ -106,7 +107,7 @@ AVFilter avfilter_vf_fifo = { ...@@ -106,7 +107,7 @@ AVFilter avfilter_vf_fifo = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer, .get_video_buffer= ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.end_frame = end_frame, .end_frame = end_frame,
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "video.h"
typedef struct { typedef struct {
/** /**
...@@ -102,10 +103,10 @@ AVFilter avfilter_vf_format = { ...@@ -102,10 +103,10 @@ AVFilter avfilter_vf_format = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer, .get_video_buffer= ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.draw_slice = avfilter_null_draw_slice, .draw_slice = ff_null_draw_slice,
.end_frame = avfilter_null_end_frame, }, .end_frame = ff_null_end_frame, },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO }, .type = AVMEDIA_TYPE_VIDEO },
...@@ -132,10 +133,10 @@ AVFilter avfilter_vf_noformat = { ...@@ -132,10 +133,10 @@ AVFilter avfilter_vf_noformat = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer, .get_video_buffer= ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.draw_slice = avfilter_null_draw_slice, .draw_slice = ff_null_draw_slice,
.end_frame = avfilter_null_end_frame, }, .end_frame = ff_null_end_frame, },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO }, .type = AVMEDIA_TYPE_VIDEO },
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
AVFilter avfilter_vf_null = { AVFilter avfilter_vf_null = {
.name = "null", .name = "null",
...@@ -31,9 +32,9 @@ AVFilter avfilter_vf_null = { ...@@ -31,9 +32,9 @@ AVFilter avfilter_vf_null = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = avfilter_null_start_frame, .start_frame = ff_null_start_frame,
.end_frame = avfilter_null_end_frame }, .end_frame = ff_null_end_frame },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/fifo.h" #include "libavutil/fifo.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
"TB", ///< timebase "TB", ///< timebase
...@@ -326,7 +327,7 @@ AVFilter avfilter_vf_select = { ...@@ -326,7 +327,7 @@ AVFilter avfilter_vf_select = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input, .config_props = config_input,
.start_frame = start_frame, .start_frame = start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
"INTERLACED", ///< tell if the current frame is interlaced "INTERLACED", ///< tell if the current frame is interlaced
...@@ -139,7 +140,7 @@ AVFilter avfilter_vf_setpts = { ...@@ -139,7 +140,7 @@ AVFilter avfilter_vf_setpts = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input, .config_props = config_input,
.start_frame = start_frame, }, .start_frame = start_frame, },
{ .name = NULL }}, { .name = NULL }},
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "libavutil/rational.h" #include "libavutil/rational.h"
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
"AVTB", /* default timebase 1/AV_TIME_BASE */ "AVTB", /* default timebase 1/AV_TIME_BASE */
...@@ -121,9 +122,9 @@ AVFilter avfilter_vf_settb = { ...@@ -121,9 +122,9 @@ AVFilter avfilter_vf_settb = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.end_frame = avfilter_null_end_frame }, .end_frame = ff_null_end_frame },
{ .name = NULL }}, { .name = NULL }},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "libavutil/timestamp.h" #include "libavutil/timestamp.h"
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "video.h"
typedef struct { typedef struct {
unsigned int frame; unsigned int frame;
...@@ -93,7 +94,7 @@ AVFilter avfilter_vf_showinfo = { ...@@ -93,7 +94,7 @@ AVFilter avfilter_vf_showinfo = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame_keep_ref, .start_frame = ff_null_start_frame_keep_ref,
.end_frame = end_frame, .end_frame = end_frame,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
typedef struct { typedef struct {
...@@ -105,11 +106,11 @@ AVFilter avfilter_vf_slicify = { ...@@ -105,11 +106,11 @@ AVFilter avfilter_vf_slicify = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer, .get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame, .start_frame = start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.config_props = config_props, .config_props = config_props,
.end_frame = avfilter_null_end_frame, }, .end_frame = ff_null_end_frame, },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, }, .type = AVMEDIA_TYPE_VIDEO, },
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
typedef struct { typedef struct {
int vsub; ///< vertical chroma subsampling int vsub; ///< vertical chroma subsampling
...@@ -47,7 +48,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, ...@@ -47,7 +48,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
int i; int i;
if (!(perms & AV_PERM_NEG_LINESIZES)) if (!(perms & AV_PERM_NEG_LINESIZES))
return avfilter_default_get_video_buffer(link, perms, w, h); return ff_default_get_video_buffer(link, perms, w, h);
picref = avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h); picref = avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
for (i = 0; i < 4; i ++) { for (i = 0; i < 4; i ++) {
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
#include "yadif.h" #include "yadif.h"
#undef NDEBUG #undef NDEBUG
...@@ -179,7 +180,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, ...@@ -179,7 +180,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w,
int height= FFALIGN(h+2, 32); int height= FFALIGN(h+2, 32);
int i; int i;
picref = avfilter_default_get_video_buffer(link, perms, width, height); picref = ff_default_get_video_buffer(link, perms, width, height);
picref->video->w = w; picref->video->w = w;
picref->video->h = h; picref->video->h = h;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "video.h"
static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms) static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms)
{ {
...@@ -61,12 +62,12 @@ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end) ...@@ -61,12 +62,12 @@ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
av_dlog(ctx, "]%s", end ? "\n" : ""); av_dlog(ctx, "]%s", end ? "\n" : "");
} }
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h) AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{ {
return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h); return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
} }
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h) AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{ {
int linesize[4]; int linesize[4];
uint8_t *data[4]; uint8_t *data[4];
...@@ -174,7 +175,7 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int ...@@ -174,7 +175,7 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int
ret = link->dstpad->get_video_buffer(link, perms, w, h); ret = link->dstpad->get_video_buffer(link, perms, w, h);
if (!ret) if (!ret)
ret = avfilter_default_get_video_buffer(link, perms, w, h); ret = ff_default_get_video_buffer(link, perms, w, h);
if (ret) if (ret)
ret->type = AVMEDIA_TYPE_VIDEO; ret->type = AVMEDIA_TYPE_VIDEO;
...@@ -184,12 +185,12 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int ...@@ -184,12 +185,12 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int
return ret; return ret;
} }
void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{ {
avfilter_start_frame(link->dst->outputs[0], picref); avfilter_start_frame(link->dst->outputs[0], picref);
} }
void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{ {
AVFilterLink *outlink = NULL; AVFilterLink *outlink = NULL;
...@@ -215,7 +216,7 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) ...@@ -215,7 +216,7 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1); FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1);
if (!(start_frame = dst->start_frame)) if (!(start_frame = dst->start_frame))
start_frame = avfilter_default_start_frame; start_frame = default_start_frame;
if (picref->linesize[0] < 0) if (picref->linesize[0] < 0)
perms |= AV_PERM_NEG_LINESIZES; perms |= AV_PERM_NEG_LINESIZES;
...@@ -246,12 +247,12 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) ...@@ -246,12 +247,12 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
ff_update_link_current_pts(link, link->cur_buf->pts); ff_update_link_current_pts(link, link->cur_buf->pts);
} }
void avfilter_null_end_frame(AVFilterLink *link) void ff_null_end_frame(AVFilterLink *link)
{ {
avfilter_end_frame(link->dst->outputs[0]); avfilter_end_frame(link->dst->outputs[0]);
} }
void avfilter_default_end_frame(AVFilterLink *inlink) static void default_end_frame(AVFilterLink *inlink)
{ {
AVFilterLink *outlink = NULL; AVFilterLink *outlink = NULL;
...@@ -275,7 +276,7 @@ void avfilter_end_frame(AVFilterLink *link) ...@@ -275,7 +276,7 @@ void avfilter_end_frame(AVFilterLink *link)
void (*end_frame)(AVFilterLink *); void (*end_frame)(AVFilterLink *);
if (!(end_frame = link->dstpad->end_frame)) if (!(end_frame = link->dstpad->end_frame))
end_frame = avfilter_default_end_frame; end_frame = default_end_frame;
end_frame(link); end_frame(link);
...@@ -287,12 +288,12 @@ void avfilter_end_frame(AVFilterLink *link) ...@@ -287,12 +288,12 @@ void avfilter_end_frame(AVFilterLink *link)
} }
} }
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{ {
avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir); avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir);
} }
void avfilter_default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{ {
AVFilterLink *outlink = NULL; AVFilterLink *outlink = NULL;
...@@ -340,7 +341,41 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) ...@@ -340,7 +341,41 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
} }
if (!(draw_slice = link->dstpad->draw_slice)) if (!(draw_slice = link->dstpad->draw_slice))
draw_slice = avfilter_default_draw_slice; draw_slice = default_draw_slice;
draw_slice(link, y, h, slice_dir); draw_slice(link, y, h, slice_dir);
} }
#if FF_API_FILTERS_PUBLIC
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return ff_default_get_video_buffer(link, perms, w, h);
}
void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
default_start_frame(inlink, picref);
}
void avfilter_default_end_frame(AVFilterLink *inlink)
{
default_end_frame(inlink);
}
void avfilter_default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{
default_draw_slice(inlink, y, h, slice_dir);
}
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return ff_null_get_video_buffer(link, perms, w, h);
}
void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
ff_null_start_frame(link, picref);
}
void avfilter_null_end_frame(AVFilterLink *link)
{
ff_null_end_frame(link);
}
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
ff_null_draw_slice(link, y, h, slice_dir);
}
#endif
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_VIDEO_H
#define AVFILTER_VIDEO_H
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h);
void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
void ff_null_end_frame(AVFilterLink *link);
#endif /* AVFILTER_VIDEO_H */
...@@ -376,6 +376,13 @@ int ffurl_get_file_handle(URLContext *h) ...@@ -376,6 +376,13 @@ int ffurl_get_file_handle(URLContext *h)
return h->prot->url_get_file_handle(h); return h->prot->url_get_file_handle(h);
} }
int ffurl_shutdown(URLContext *h, int flags)
{
if (!h->prot->url_shutdown)
return AVERROR(EINVAL);
return h->prot->url_shutdown(h, flags);
}
int ff_check_interrupt(AVIOInterruptCB *cb) int ff_check_interrupt(AVIOInterruptCB *cb)
{ {
int ret; int ret;
......
...@@ -52,6 +52,8 @@ typedef struct { ...@@ -52,6 +52,8 @@ typedef struct {
char *headers; char *headers;
int willclose; /**< Set if the server correctly handles Connection: close and will close the connection after feeding us the content. */ int willclose; /**< Set if the server correctly handles Connection: close and will close the connection after feeding us the content. */
int chunked_post; int chunked_post;
int end_chunked_post; /**< A flag which indicates if the end of chunked encoding has been sent. */
int end_header; /**< A flag which indicates we have finished to read POST reply. */
} HTTPContext; } HTTPContext;
#define OFFSET(x) offsetof(HTTPContext, x) #define OFFSET(x) offsetof(HTTPContext, x)
...@@ -251,8 +253,10 @@ static int process_line(URLContext *h, char *line, int line_count, ...@@ -251,8 +253,10 @@ static int process_line(URLContext *h, char *line, int line_count,
char *tag, *p, *end; char *tag, *p, *end;
/* end of header */ /* end of header */
if (line[0] == '\0') if (line[0] == '\0') {
s->end_header = 1;
return 0; return 0;
}
p = line; p = line;
if (line_count == 0) { if (line_count == 0) {
...@@ -419,6 +423,7 @@ static int http_connect(URLContext *h, const char *path, const char *local_path, ...@@ -419,6 +423,7 @@ static int http_connect(URLContext *h, const char *path, const char *local_path,
s->off = 0; s->off = 0;
s->filesize = -1; s->filesize = -1;
s->willclose = 0; s->willclose = 0;
s->end_chunked_post = 0;
if (post) { if (post) {
/* Pretend that it did work. We didn't read any header yet, since /* Pretend that it did work. We didn't read any header yet, since
* we've still to send the POST data, but the code calling this * we've still to send the POST data, but the code calling this
...@@ -464,6 +469,17 @@ static int http_buf_read(URLContext *h, uint8_t *buf, int size) ...@@ -464,6 +469,17 @@ static int http_buf_read(URLContext *h, uint8_t *buf, int size)
static int http_read(URLContext *h, uint8_t *buf, int size) static int http_read(URLContext *h, uint8_t *buf, int size)
{ {
HTTPContext *s = h->priv_data; HTTPContext *s = h->priv_data;
int err, new_location;
if (s->end_chunked_post) {
if (!s->end_header) {
err = http_read_header(h, &new_location);
if (err < 0)
return err;
}
return http_buf_read(h, buf, size);
}
if (s->chunksize >= 0) { if (s->chunksize >= 0) {
if (!s->chunksize) { if (!s->chunksize) {
...@@ -516,16 +532,30 @@ static int http_write(URLContext *h, const uint8_t *buf, int size) ...@@ -516,16 +532,30 @@ static int http_write(URLContext *h, const uint8_t *buf, int size)
return size; return size;
} }
static int http_close(URLContext *h) static int http_shutdown(URLContext *h, int flags)
{ {
int ret = 0; int ret = 0;
char footer[] = "0\r\n\r\n"; char footer[] = "0\r\n\r\n";
HTTPContext *s = h->priv_data; HTTPContext *s = h->priv_data;
/* signal end of chunked encoding if used */ /* signal end of chunked encoding if used */
if ((h->flags & AVIO_FLAG_WRITE) && s->chunked_post) { if ((flags & AVIO_FLAG_WRITE) && s->chunked_post) {
ret = ffurl_write(s->hd, footer, sizeof(footer) - 1); ret = ffurl_write(s->hd, footer, sizeof(footer) - 1);
ret = ret > 0 ? 0 : ret; ret = ret > 0 ? 0 : ret;
s->end_chunked_post = 1;
}
return ret;
}
static int http_close(URLContext *h)
{
int ret = 0;
HTTPContext *s = h->priv_data;
if (!s->end_chunked_post) {
/* Close the write direction by sending the end of chunked encoding. */
ret = http_shutdown(h, h->flags);
} }
if (s->hd) if (s->hd)
...@@ -585,6 +615,7 @@ URLProtocol ff_http_protocol = { ...@@ -585,6 +615,7 @@ URLProtocol ff_http_protocol = {
.url_seek = http_seek, .url_seek = http_seek,
.url_close = http_close, .url_close = http_close,
.url_get_file_handle = http_get_file_handle, .url_get_file_handle = http_get_file_handle,
.url_shutdown = http_shutdown,
.priv_data_size = sizeof(HTTPContext), .priv_data_size = sizeof(HTTPContext),
.priv_data_class = &http_context_class, .priv_data_class = &http_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK, .flags = URL_PROTOCOL_FLAG_NETWORK,
......
...@@ -48,6 +48,18 @@ static inline int is_dos_path(const char *path) ...@@ -48,6 +48,18 @@ static inline int is_dos_path(const char *path)
return 0; return 0;
} }
#if defined(__OS2__)
#define SHUT_RD 0
#define SHUT_WR 1
#define SHUT_RDWR 2
#endif
#if defined(_WIN32)
#define SHUT_RD SD_RECEIVE
#define SHUT_WR SD_SEND
#define SHUT_RDWR SD_BOTH
#endif
#if defined(_WIN32) && !defined(__MINGW32CE__) #if defined(_WIN32) && !defined(__MINGW32CE__)
int ff_win32_open(const char *filename, int oflag, int pmode); int ff_win32_open(const char *filename, int oflag, int pmode);
#define open ff_win32_open #define open ff_win32_open
......
...@@ -182,6 +182,22 @@ static int tcp_write(URLContext *h, const uint8_t *buf, int size) ...@@ -182,6 +182,22 @@ static int tcp_write(URLContext *h, const uint8_t *buf, int size)
return ret < 0 ? ff_neterrno() : ret; return ret < 0 ? ff_neterrno() : ret;
} }
static int tcp_shutdown(URLContext *h, int flags)
{
TCPContext *s = h->priv_data;
int how;
if (flags & AVIO_FLAG_WRITE && flags & AVIO_FLAG_READ) {
how = SHUT_RDWR;
} else if (flags & AVIO_FLAG_WRITE) {
how = SHUT_WR;
} else {
how = SHUT_RD;
}
return shutdown(s->fd, how);
}
static int tcp_close(URLContext *h) static int tcp_close(URLContext *h)
{ {
TCPContext *s = h->priv_data; TCPContext *s = h->priv_data;
...@@ -202,6 +218,7 @@ URLProtocol ff_tcp_protocol = { ...@@ -202,6 +218,7 @@ URLProtocol ff_tcp_protocol = {
.url_write = tcp_write, .url_write = tcp_write,
.url_close = tcp_close, .url_close = tcp_close,
.url_get_file_handle = tcp_get_file_handle, .url_get_file_handle = tcp_get_file_handle,
.url_shutdown = tcp_shutdown,
.priv_data_size = sizeof(TCPContext), .priv_data_size = sizeof(TCPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK, .flags = URL_PROTOCOL_FLAG_NETWORK,
}; };
...@@ -81,6 +81,7 @@ typedef struct URLProtocol { ...@@ -81,6 +81,7 @@ typedef struct URLProtocol {
int64_t (*url_read_seek)(URLContext *h, int stream_index, int64_t (*url_read_seek)(URLContext *h, int stream_index,
int64_t timestamp, int flags); int64_t timestamp, int flags);
int (*url_get_file_handle)(URLContext *h); int (*url_get_file_handle)(URLContext *h);
int (*url_shutdown)(URLContext *h, int flags);
int priv_data_size; int priv_data_size;
const AVClass *priv_data_class; const AVClass *priv_data_class;
int flags; int flags;
...@@ -200,6 +201,18 @@ int64_t ffurl_size(URLContext *h); ...@@ -200,6 +201,18 @@ int64_t ffurl_size(URLContext *h);
*/ */
int ffurl_get_file_handle(URLContext *h); int ffurl_get_file_handle(URLContext *h);
/**
* Signal the URLContext that we are done reading or writing the stream.
*
* @param h pointer to the resource
* @param flags flags which control how the resource indicated by url
* is to be shutdown
*
* @return a negative value if an error condition occurred, 0
* otherwise
*/
int ffurl_shutdown(URLContext *h, int flags);
/** /**
* Register the URLProtocol protocol. * Register the URLProtocol protocol.
* *
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
#include "libavfilter/avfilter.h" #include "libavfilter/avfilter.h"
#include "libavfilter/formats.h"
static void print_formats(AVFilterContext *filter_ctx) static void print_formats(AVFilterContext *filter_ctx)
{ {
...@@ -114,7 +115,7 @@ int main(int argc, char **argv) ...@@ -114,7 +115,7 @@ int main(int argc, char **argv)
if (filter->query_formats) if (filter->query_formats)
filter->query_formats(filter_ctx); filter->query_formats(filter_ctx);
else else
avfilter_default_query_formats(filter_ctx); ff_default_query_formats(filter_ctx);
print_formats(filter_ctx); print_formats(filter_ctx);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment