Commit 8381ab14 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master: (29 commits)
  ARM: disable ff_vector_fmul_vfp on VFPv3 systems
  ARM: check for VFPv3
  swscale: Remove unused variables in x86 code.
  doc: Drop DJGPP section, Libav now compiles out-of-the-box on FreeDOS.
  x86: Add appropriate ifdefs around certain AVX functions.
  cmdutils: use sws_freeContext() instead of av_freep().
  swscale: delay allocation of formatConvBuffer().
  swscale: fix build with --disable-swscale-alpha.
  movenc: Deprecate the global RTP hinting flag, use a private AVOption instead
  movenc: Add an AVClass for setting muxer specific options
  swscale: fix non-bitexact yuv2yuv[X2]() MMX/MMX2 functions.
  configure: report yasm/nasm presence properly
  tcp: make connect() timeout properly
  rawdec: factor video demuxer definitions into a macro.
  rtspdec: add initial_pause private option.
  lavf: deprecate AVFormatParameters.width/height.
  tty: add video_size private option.
  rawdec: add video_size private option.
  x11grab: add video_size private option.
  x11grab: factorize returning error codes.
  ...
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents b8773e44 79aeade6
...@@ -76,7 +76,8 @@ void uninit_opts(void) ...@@ -76,7 +76,8 @@ void uninit_opts(void)
av_freep(&avformat_opts->key); av_freep(&avformat_opts->key);
av_freep(&avformat_opts); av_freep(&avformat_opts);
#if CONFIG_SWSCALE #if CONFIG_SWSCALE
av_freep(&sws_opts); sws_freeContext(sws_opts);
sws_opts = NULL;
#endif #endif
for (i = 0; i < opt_name_count; i++) { for (i = 0; i < opt_name_count; i++) {
av_freep(&opt_names[i]); av_freep(&opt_names[i]);
......
...@@ -1024,6 +1024,7 @@ ARCH_EXT_LIST=' ...@@ -1024,6 +1024,7 @@ ARCH_EXT_LIST='
ppc4xx ppc4xx
sse sse
ssse3 ssse3
vfpv3
vis vis
' '
...@@ -1212,6 +1213,7 @@ armv6t2_deps="arm" ...@@ -1212,6 +1213,7 @@ armv6t2_deps="arm"
armvfp_deps="arm" armvfp_deps="arm"
iwmmxt_deps="arm" iwmmxt_deps="arm"
neon_deps="arm" neon_deps="arm"
vfpv3_deps="armvfp"
mmi_deps="mips" mmi_deps="mips"
...@@ -2659,6 +2661,7 @@ EOF ...@@ -2659,6 +2661,7 @@ EOF
enabled armvfp && check_asm armvfp '"fadds s0, s0, s0"' enabled armvfp && check_asm armvfp '"fadds s0, s0, s0"'
enabled iwmmxt && check_asm iwmmxt '"wunpckelub wr6, wr4"' enabled iwmmxt && check_asm iwmmxt '"wunpckelub wr6, wr4"'
enabled neon && check_asm neon '"vadd.i16 q0, q0, q0"' enabled neon && check_asm neon '"vadd.i16 q0, q0, q0"'
enabled vfpv3 && check_asm vfpv3 '"vmov.f32 s0, #1.0"'
enabled_all armv6t2 shared !pic && enable_pic enabled_all armv6t2 shared !pic && enable_pic
...@@ -3158,7 +3161,7 @@ fi ...@@ -3158,7 +3161,7 @@ fi
echo "big-endian ${bigendian-no}" echo "big-endian ${bigendian-no}"
echo "runtime cpu detection ${runtime_cpudetect-no}" echo "runtime cpu detection ${runtime_cpudetect-no}"
if enabled x86; then if enabled x86; then
echo "yasm ${yasm-no}" echo "${yasmexe} ${yasm-no}"
echo "MMX enabled ${mmx-no}" echo "MMX enabled ${mmx-no}"
echo "MMX2 enabled ${mmx2-no}" echo "MMX2 enabled ${mmx2-no}"
echo "3DNow! enabled ${amd3dnow-no}" echo "3DNow! enabled ${amd3dnow-no}"
......
...@@ -762,12 +762,6 @@ performance on systems without hardware floating point support). ...@@ -762,12 +762,6 @@ performance on systems without hardware floating point support).
Using a cross-compiler is preferred for various reasons. Using a cross-compiler is preferred for various reasons.
@subsection DJGPP
FFmpeg cannot be compiled because of broken system headers, add
@code{--extra-cflags=-U__STRICT_ANSI__} to the configure options as a
workaround.
@section OS/2 @section OS/2
For information about compiling FFmpeg on OS/2 see For information about compiling FFmpeg on OS/2 see
......
...@@ -28,6 +28,7 @@ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0, ...@@ -28,6 +28,7 @@ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
void ff_dsputil_init_vfp(DSPContext* c, AVCodecContext *avctx) void ff_dsputil_init_vfp(DSPContext* c, AVCodecContext *avctx)
{ {
c->vector_fmul = ff_vector_fmul_vfp; if (!HAVE_VFPV3)
c->vector_fmul = ff_vector_fmul_vfp;
c->vector_fmul_reverse = ff_vector_fmul_reverse_vfp; c->vector_fmul_reverse = ff_vector_fmul_reverse_vfp;
} }
...@@ -300,7 +300,6 @@ IF%1 mova Z(1), m5 ...@@ -300,7 +300,6 @@ IF%1 mova Z(1), m5
INIT_YMM INIT_YMM
%ifdef HAVE_AVX %ifdef HAVE_AVX
align 16 align 16
fft8_avx: fft8_avx:
mova m0, Z(0) mova m0, Z(0)
...@@ -536,6 +535,7 @@ DEFINE_ARGS z, w, n, o1, o3 ...@@ -536,6 +535,7 @@ DEFINE_ARGS z, w, n, o1, o3
INIT_YMM INIT_YMM
%ifdef HAVE_AVX
%macro INTERL_AVX 5 %macro INTERL_AVX 5
vunpckhps %3, %2, %1 vunpckhps %3, %2, %1
vunpcklps %2, %2, %1 vunpcklps %2, %2, %1
...@@ -547,7 +547,6 @@ INIT_YMM ...@@ -547,7 +547,6 @@ INIT_YMM
%define INTERL INTERL_AVX %define INTERL INTERL_AVX
%ifdef HAVE_AVX
DECL_PASS pass_avx, PASS_BIG 1 DECL_PASS pass_avx, PASS_BIG 1
DECL_PASS pass_interleave_avx, PASS_BIG 0 DECL_PASS pass_interleave_avx, PASS_BIG 0
%endif %endif
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "libavutil/x86_cpu.h" #include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "fft.h" #include "fft.h"
#include "config.h"
DECLARE_ASM_CONST(16, int, ff_m1m1m1m1)[4] = DECLARE_ASM_CONST(16, int, ff_m1m1m1m1)[4] =
{ 1 << 31, 1 << 31, 1 << 31, 1 << 31 }; { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "libavutil/log.h" #include "libavutil/log.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H #if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H
# include <dev/bktr/ioctl_meteor.h> # include <dev/bktr/ioctl_meteor.h>
# include <dev/bktr/ioctl_bt848.h> # include <dev/bktr/ioctl_bt848.h>
...@@ -57,6 +58,7 @@ typedef struct { ...@@ -57,6 +58,7 @@ typedef struct {
int frame_rate_base; int frame_rate_base;
uint64_t per_frame; uint64_t per_frame;
int standard; int standard;
char *video_size; /**< String describing video size, set by a private option. */
} VideoData; } VideoData;
...@@ -249,18 +251,31 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -249,18 +251,31 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
int width, height; int width, height;
int frame_rate; int frame_rate;
int frame_rate_base; int frame_rate_base;
int ret = 0;
if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) if (ap->time_base.den <= 0) {
return -1; ret = AVERROR(EINVAL);
goto out;
}
width = ap->width; if ((ret = av_parse_video_size(&width, &height, s->video_size)) < 0) {
height = ap->height; av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto out;
}
#if FF_API_FORMAT_PARAMETERS
if (ap->width > 0)
width = ap->width;
if (ap->height > 0)
height = ap->height;
#endif
frame_rate = ap->time_base.den; frame_rate = ap->time_base.den;
frame_rate_base = ap->time_base.num; frame_rate_base = ap->time_base.num;
st = av_new_stream(s1, 0); st = av_new_stream(s1, 0);
if (!st) if (!st) {
return AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto out;
}
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
s->width = width; s->width = width;
...@@ -289,13 +304,17 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -289,13 +304,17 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
#endif #endif
if (bktr_init(s1->filename, width, height, s->standard, if (bktr_init(s1->filename, width, height, s->standard,
&(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0) &(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0) {
return AVERROR(EIO); ret = AVERROR(EIO);
goto out;
}
nsignals = 0; nsignals = 0;
last_frame_time = 0; last_frame_time = 0;
return 0; out:
av_freep(&s->video_size);
return ret;
} }
static int grab_read_close(AVFormatContext *s1) static int grab_read_close(AVFormatContext *s1)
...@@ -316,6 +335,8 @@ static int grab_read_close(AVFormatContext *s1) ...@@ -316,6 +335,8 @@ static int grab_read_close(AVFormatContext *s1)
return 0; return 0;
} }
#define OFFSET(x) offsetof(VideoData, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), FF_OPT_TYPE_INT, {.dbl = VIDEO_FORMAT}, PAL, NTSCJ, AV_OPT_FLAG_DECODING_PARAM, "standard" }, { "standard", "", offsetof(VideoData, standard), FF_OPT_TYPE_INT, {.dbl = VIDEO_FORMAT}, PAL, NTSCJ, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, FF_OPT_TYPE_CONST, {.dbl = PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, { "PAL", "", 0, FF_OPT_TYPE_CONST, {.dbl = PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
...@@ -324,6 +345,7 @@ static const AVOption options[] = { ...@@ -324,6 +345,7 @@ static const AVOption options[] = {
{ "PALN", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, { "PALN", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, { "PALM", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, { "NTSCJ", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
{ NULL }, { NULL },
}; };
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include "libavutil/parseutils.h"
#include <dc1394/dc1394.h> #include <dc1394/dc1394.h>
...@@ -40,6 +41,7 @@ typedef struct dc1394_data { ...@@ -40,6 +41,7 @@ typedef struct dc1394_data {
dc1394video_frame_t *frame; dc1394video_frame_t *frame;
int current_frame; int current_frame;
int fps; int fps;
char *video_size; /**< String describing video size, set by a private option. */
AVPacket packet; AVPacket packet;
} dc1394_data; } dc1394_data;
...@@ -76,7 +78,10 @@ struct dc1394_frame_rate { ...@@ -76,7 +78,10 @@ struct dc1394_frame_rate {
{ 0, 0 } /* gotta be the last one */ { 0, 0 } /* gotta be the last one */
}; };
#define OFFSET(x) offsetof(dc1394_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC },
{ NULL }, { NULL },
}; };
...@@ -103,6 +108,7 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap) ...@@ -103,6 +108,7 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
int score, max_score; int score, max_score;
int final_width, final_height, final_pix_fmt, final_frame_rate; int final_width, final_height, final_pix_fmt, final_frame_rate;
int res, i, j; int res, i, j;
int ret=-1;
/* Now let us prep the hardware. */ /* Now let us prep the hardware. */
dc1394->d = dc1394_new(); dc1394->d = dc1394_new();
...@@ -127,6 +133,14 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap) ...@@ -127,6 +133,14 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
av_log(c, AV_LOG_ERROR, "Could not get video formats.\n"); av_log(c, AV_LOG_ERROR, "Could not get video formats.\n");
goto out_camera; goto out_camera;
} }
if (dc1394->video_size) {
if ((ret = av_parse_video_size(&ap->width, &ap->height, dc1394->video_size)) < 0) {
av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto out;
}
}
/* Choose the best mode. */ /* Choose the best mode. */
rate = (ap->time_base.num ? av_rescale(1000, ap->time_base.den, ap->time_base.num) : -1); rate = (ap->time_base.num ? av_rescale(1000, ap->time_base.den, ap->time_base.num) : -1);
max_score = -1; max_score = -1;
...@@ -290,7 +304,7 @@ out_camera: ...@@ -290,7 +304,7 @@ out_camera:
dc1394_camera_free (dc1394->camera); dc1394_camera_free (dc1394->camera);
out: out:
dc1394_free(dc1394->d); dc1394_free(dc1394->d);
return -1; return ret;
} }
static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt) static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "libavutil/log.h" #include "libavutil/log.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "avdevice.h" #include "avdevice.h"
#include "libavutil/parseutils.h"
static const int desired_video_buffers = 256; static const int desired_video_buffers = 256;
...@@ -69,6 +70,7 @@ struct video_data { ...@@ -69,6 +70,7 @@ struct video_data {
unsigned int *buf_len; unsigned int *buf_len;
char *standard; char *standard;
int channel; int channel;
char *video_size; /**< String describing video size, set by a private option. */
}; };
struct buff_data { struct buff_data {
...@@ -577,23 +579,33 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -577,23 +579,33 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{ {
struct video_data *s = s1->priv_data; struct video_data *s = s1->priv_data;
AVStream *st; AVStream *st;
int res; int res = 0;
uint32_t desired_format, capabilities; uint32_t desired_format, capabilities;
enum CodecID codec_id; enum CodecID codec_id;
st = av_new_stream(s1, 0); st = av_new_stream(s1, 0);
if (!st) { if (!st) {
return AVERROR(ENOMEM); res = AVERROR(ENOMEM);
goto out;
} }
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
s->width = ap->width; if (s->video_size && (res = av_parse_video_size(&s->width, &s->height, s->video_size)) < 0) {
s->height = ap->height; av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto out;
}
#if FF_API_FORMAT_PARAMETERS
if (ap->width > 0)
s->width = ap->width;
if (ap->height > 0)
s->height = ap->height;
#endif
capabilities = 0; capabilities = 0;
s->fd = device_open(s1, &capabilities); s->fd = device_open(s1, &capabilities);
if (s->fd < 0) { if (s->fd < 0) {
return AVERROR(EIO); res = AVERROR(EIO);
goto out;
} }
av_log(s1, AV_LOG_VERBOSE, "[%d]Capabilities: %x\n", s->fd, capabilities); av_log(s1, AV_LOG_VERBOSE, "[%d]Capabilities: %x\n", s->fd, capabilities);
...@@ -604,7 +616,8 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -604,7 +616,8 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) { if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", strerror(errno)); av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", strerror(errno));
return AVERROR(errno); res = AVERROR(errno);
goto out;
} }
s->width = fmt.fmt.pix.width; s->width = fmt.fmt.pix.width;
s->height = fmt.fmt.pix.height; s->height = fmt.fmt.pix.height;
...@@ -617,14 +630,15 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -617,14 +630,15 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
"codec_id %d, pix_fmt %d.\n", s1->video_codec_id, ap->pix_fmt); "codec_id %d, pix_fmt %d.\n", s1->video_codec_id, ap->pix_fmt);
close(s->fd); close(s->fd);
return AVERROR(EIO); res = AVERROR(EIO);
goto out;
} }
if (av_image_check_size(s->width, s->height, 0, s1) < 0) if ((res = av_image_check_size(s->width, s->height, 0, s1) < 0))
return AVERROR(EINVAL); goto out;
s->frame_format = desired_format; s->frame_format = desired_format;
if (v4l2_set_parameters(s1, ap) < 0) if ((res = v4l2_set_parameters(s1, ap) < 0))
return AVERROR(EIO); goto out;
st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id); st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id);
s->frame_size = avpicture_get_size(st->codec->pix_fmt, s->width, s->height); s->frame_size = avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
...@@ -641,7 +655,8 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -641,7 +655,8 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if (res < 0) { if (res < 0) {
close(s->fd); close(s->fd);
return AVERROR(EIO); res = AVERROR(EIO);
goto out;
} }
s->top_field_first = first_field(s->fd); s->top_field_first = first_field(s->fd);
...@@ -653,7 +668,9 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -653,7 +668,9 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st->codec->time_base.num = ap->time_base.num; st->codec->time_base.num = ap->time_base.num;
st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8; st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
return 0; out:
av_freep(&s->video_size);
return res;
} }
static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt) static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
...@@ -696,9 +713,12 @@ static int v4l2_read_close(AVFormatContext *s1) ...@@ -696,9 +713,12 @@ static int v4l2_read_close(AVFormatContext *s1)
return 0; return 0;
} }
#define OFFSET(x) offsetof(struct video_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
{ "standard", "", offsetof(struct video_data, standard), FF_OPT_TYPE_STRING, {.str = "NTSC" }, 0, 0, AV_OPT_FLAG_DECODING_PARAM }, { "standard", "", offsetof(struct video_data, standard), FF_OPT_TYPE_STRING, {.str = "NTSC" }, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ "channel", "", offsetof(struct video_data, channel), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, { "channel", "", offsetof(struct video_data, channel), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL }, { NULL },
}; };
......
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include <windows.h> #include <windows.h>
#include <vfw.h> #include <vfw.h>
#include "avdevice.h" #include "avdevice.h"
...@@ -32,12 +35,14 @@ ...@@ -32,12 +35,14 @@
/* End of missing MinGW defines */ /* End of missing MinGW defines */
struct vfw_ctx { struct vfw_ctx {
const AVClass *class;
HWND hwnd; HWND hwnd;
HANDLE mutex; HANDLE mutex;
HANDLE event; HANDLE event;
AVPacketList *pktl; AVPacketList *pktl;
unsigned int curbufsize; unsigned int curbufsize;
unsigned int frame_num; unsigned int frame_num;
char *video_size; /**< A string describing video size, set by a private option. */
}; };
static enum PixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount) static enum PixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount)
...@@ -228,6 +233,8 @@ static int vfw_read_close(AVFormatContext *s) ...@@ -228,6 +233,8 @@ static int vfw_read_close(AVFormatContext *s)
pktl = next; pktl = next;
} }
av_freep(&ctx->video_size);
return 0; return 0;
} }
...@@ -242,8 +249,6 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -242,8 +249,6 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
CAPTUREPARMS cparms; CAPTUREPARMS cparms;
DWORD biCompression; DWORD biCompression;
WORD biBitCount; WORD biBitCount;
int width;
int height;
int ret; int ret;
if (!strcmp(s->filename, "list")) { if (!strcmp(s->filename, "list")) {
...@@ -316,10 +321,20 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -316,10 +321,20 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
dump_bih(s, &bi->bmiHeader); dump_bih(s, &bi->bmiHeader);
width = ap->width ? ap->width : bi->bmiHeader.biWidth ;
height = ap->height ? ap->height : bi->bmiHeader.biHeight; if (ctx->video_size) {
bi->bmiHeader.biWidth = width ; ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
bi->bmiHeader.biHeight = height; if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto fail_bi;
}
}
#if FF_API_FORMAT_PARAMETERS
if (ap->width > 0)
bi->bmiHeader.biWidth = ap->width;
if (ap->height > 0)
bi->bmiHeader.biHeight = ap->height;
#endif
if (0) { if (0) {
/* For testing yet unsupported compressions /* For testing yet unsupported compressions
...@@ -368,8 +383,8 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -368,8 +383,8 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
codec = st->codec; codec = st->codec;
codec->time_base = ap->time_base; codec->time_base = ap->time_base;
codec->codec_type = AVMEDIA_TYPE_VIDEO; codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->width = width; codec->width = bi->bmiHeader.biWidth;
codec->height = height; codec->height = bi->bmiHeader.biHeight;
codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount); codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount);
if(codec->pix_fmt == PIX_FMT_NONE) { if(codec->pix_fmt == PIX_FMT_NONE) {
codec->codec_id = vfw_codecid(biCompression); codec->codec_id = vfw_codecid(biCompression);
...@@ -450,6 +465,20 @@ static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -450,6 +465,20 @@ static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt)
return pkt->size; return pkt->size;
} }
#define OFFSET(x) offsetof(struct vfw_ctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
static const AVClass vfw_class = {
.class_name = "VFW indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_vfwcap_demuxer = { AVInputFormat ff_vfwcap_demuxer = {
"vfwcap", "vfwcap",
NULL_IF_CONFIG_SMALL("VFW video capture"), NULL_IF_CONFIG_SMALL("VFW video capture"),
...@@ -459,4 +488,5 @@ AVInputFormat ff_vfwcap_demuxer = { ...@@ -459,4 +488,5 @@ AVInputFormat ff_vfwcap_demuxer = {
vfw_read_packet, vfw_read_packet,
vfw_read_close, vfw_read_close,
.flags = AVFMT_NOFILE, .flags = AVFMT_NOFILE,
.priv_class = &vfw_class,
}; };
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
*/ */
#include "config.h" #include "config.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include <time.h> #include <time.h>
#include <X11/X.h> #include <X11/X.h>
#include <X11/Xlib.h> #include <X11/Xlib.h>
...@@ -52,10 +55,12 @@ ...@@ -52,10 +55,12 @@
*/ */
struct x11_grab struct x11_grab
{ {
const AVClass *class; /**< Class for private options. */
int frame_size; /**< Size in bytes of a grabbed frame */ int frame_size; /**< Size in bytes of a grabbed frame */
AVRational time_base; /**< Time base */ AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */ int64_t time_frame; /**< Current time */
char *video_size; /**< String describing video size, set by a private option. */
int height; /**< Height of the grab frame */ int height; /**< Height of the grab frame */
int width; /**< Width of the grab frame */ int width; /**< Width of the grab frame */
int x_off; /**< Horizontal top-left corner coordinate */ int x_off; /**< Horizontal top-left corner coordinate */
...@@ -91,6 +96,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -91,6 +96,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
int y_off = 0; int y_off = 0;
int use_shm; int use_shm;
char *dpyname, *offset; char *dpyname, *offset;
int ret = 0;
dpyname = av_strdup(s1->filename); dpyname = av_strdup(s1->filename);
offset = strchr(dpyname, '+'); offset = strchr(dpyname, '+');
...@@ -100,23 +106,37 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -100,23 +106,37 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
*offset= 0; *offset= 0;
} }
av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, dpyname, x_off, y_off, ap->width, ap->height); if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) {
av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto out;
}
#if FF_API_FORMAT_PARAMETERS
if (ap->width > 0)
x11grab->width = ap->width;
if (ap->height > 0)
x11grab->height = ap->height;
#endif
av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);
dpy = XOpenDisplay(dpyname); dpy = XOpenDisplay(dpyname);
av_freep(&dpyname); av_freep(&dpyname);
if(!dpy) { if(!dpy) {
av_log(s1, AV_LOG_ERROR, "Could not open X display.\n"); av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
return AVERROR(EIO); ret = AVERROR(EIO);
goto out;
} }
if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) { if (ap->time_base.den <= 0) {
av_log(s1, AV_LOG_ERROR, "AVParameters don't have video size and/or rate. Use -s and -r.\n"); av_log(s1, AV_LOG_ERROR, "AVParameters don't have video size and/or rate. Use -s and -r.\n");
return AVERROR(EIO); ret = AVERROR(EINVAL);
goto out;
} }
st = av_new_stream(s1, 0); st = av_new_stream(s1, 0);
if (!st) { if (!st) {
return AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto out;
} }
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
...@@ -131,13 +151,14 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -131,13 +151,14 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
ZPixmap, ZPixmap,
NULL, NULL,
&x11grab->shminfo, &x11grab->shminfo,
ap->width, ap->height); x11grab->width, x11grab->height);
x11grab->shminfo.shmid = shmget(IPC_PRIVATE, x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
image->bytes_per_line * image->height, image->bytes_per_line * image->height,
IPC_CREAT|0777); IPC_CREAT|0777);
if (x11grab->shminfo.shmid == -1) { if (x11grab->shminfo.shmid == -1) {
av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n"); av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
return AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto out;
} }
x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0); x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
x11grab->shminfo.readOnly = False; x11grab->shminfo.readOnly = False;
...@@ -145,12 +166,13 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -145,12 +166,13 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if (!XShmAttach(dpy, &x11grab->shminfo)) { if (!XShmAttach(dpy, &x11grab->shminfo)) {
av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n"); av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
/* needs some better error subroutine :) */ /* needs some better error subroutine :) */
return AVERROR(EIO); ret = AVERROR(EIO);
goto out;
} }
} else { } else {
image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)),
x_off,y_off, x_off,y_off,
ap->width,ap->height, x11grab->width, x11grab->height,
AllPlanes, ZPixmap); AllPlanes, ZPixmap);
} }
...@@ -173,7 +195,8 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -173,7 +195,8 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
} else { } else {
av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
return AVERROR(EIO); ret = AVERROR(EIO);
goto out;
} }
break; break;
case 24: case 24:
...@@ -188,7 +211,8 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -188,7 +211,8 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
} else { } else {
av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
return AVERROR(EIO); ret = AVERROR(EIO);
goto out;
} }
break; break;
case 32: case 32:
...@@ -211,13 +235,12 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -211,13 +235,12 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
break; break;
default: default:
av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
return -1; ret = AVERROR(EINVAL);
goto out;
} }
x11grab->frame_size = ap->width * ap->height * image->bits_per_pixel/8; x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8;
x11grab->dpy = dpy; x11grab->dpy = dpy;
x11grab->width = ap->width;
x11grab->height = ap->height;
x11grab->time_base = ap->time_base; x11grab->time_base = ap->time_base;
x11grab->time_frame = av_gettime() / av_q2d(ap->time_base); x11grab->time_frame = av_gettime() / av_q2d(ap->time_base);
x11grab->x_off = x_off; x11grab->x_off = x_off;
...@@ -227,13 +250,15 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -227,13 +250,15 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->width = ap->width; st->codec->width = x11grab->width;
st->codec->height = ap->height; st->codec->height = x11grab->height;
st->codec->pix_fmt = input_pixfmt; st->codec->pix_fmt = input_pixfmt;
st->codec->time_base = ap->time_base; st->codec->time_base = ap->time_base;
st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(ap->time_base) * 8; st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(ap->time_base) * 8;
return 0; out:
av_freep(&x11grab->video_size);
return ret;
} }
/** /**
...@@ -436,6 +461,20 @@ x11grab_read_close(AVFormatContext *s1) ...@@ -436,6 +461,20 @@ x11grab_read_close(AVFormatContext *s1)
return 0; return 0;
} }
#define OFFSET(x) offsetof(struct x11_grab, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
{ NULL },
};
static const AVClass x11_class = {
.class_name = "X11grab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
/** x11 grabber device demuxer declaration */ /** x11 grabber device demuxer declaration */
AVInputFormat ff_x11_grab_device_demuxer = AVInputFormat ff_x11_grab_device_demuxer =
{ {
...@@ -447,4 +486,5 @@ AVInputFormat ff_x11_grab_device_demuxer = ...@@ -447,4 +486,5 @@ AVInputFormat ff_x11_grab_device_demuxer =
x11grab_read_packet, x11grab_read_packet,
x11grab_read_close, x11grab_read_close,
.flags = AVFMT_NOFILE, .flags = AVFMT_NOFILE,
.priv_class = &x11_class,
}; };
...@@ -231,9 +231,9 @@ typedef struct AVFormatParameters { ...@@ -231,9 +231,9 @@ typedef struct AVFormatParameters {
#if FF_API_FORMAT_PARAMETERS #if FF_API_FORMAT_PARAMETERS
attribute_deprecated int sample_rate; attribute_deprecated int sample_rate;
attribute_deprecated int channels; attribute_deprecated int channels;
attribute_deprecated int width;
attribute_deprecated int height;
#endif #endif
int width;
int height;
enum PixelFormat pix_fmt; enum PixelFormat pix_fmt;
#if FF_API_FORMAT_PARAMETERS #if FF_API_FORMAT_PARAMETERS
attribute_deprecated int channel; /**< Used to select DV channel. */ attribute_deprecated int channel; /**< Used to select DV channel. */
...@@ -241,9 +241,9 @@ typedef struct AVFormatParameters { ...@@ -241,9 +241,9 @@ typedef struct AVFormatParameters {
attribute_deprecated unsigned int mpeg2ts_raw:1; /**< deprecated, use mpegtsraw demuxer */ attribute_deprecated unsigned int mpeg2ts_raw:1; /**< deprecated, use mpegtsraw demuxer */
/**< deprecated, use mpegtsraw demuxer-specific options instead */ /**< deprecated, use mpegtsraw demuxer-specific options instead */
attribute_deprecated unsigned int mpeg2ts_compute_pcr:1; attribute_deprecated unsigned int mpeg2ts_compute_pcr:1;
attribute_deprecated unsigned int initial_pause:1; /**< Do not begin to play the stream
immediately (RTSP only). */
#endif #endif
unsigned int initial_pause:1; /**< Do not begin to play the stream
immediately (RTSP only). */
unsigned int prealloced_context:1; unsigned int prealloced_context:1;
} AVFormatParameters; } AVFormatParameters;
...@@ -733,7 +733,9 @@ typedef struct AVFormatContext { ...@@ -733,7 +733,9 @@ typedef struct AVFormatContext {
#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS #define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS
#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container #define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container
#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled #define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled
#define AVFMT_FLAG_RTP_HINT 0x0040 ///< Add RTP hinting to the output file #if FF_API_FLAG_RTP_HINT
#define AVFMT_FLAG_RTP_HINT 0x0040 ///< Deprecated, use the -movflags rtphint muxer specific AVOption instead
#endif
#define AVFMT_FLAG_MP4A_LATM 0x0080 ///< Enable RTP MP4A-LATM payload #define AVFMT_FLAG_MP4A_LATM 0x0080 ///< Enable RTP MP4A-LATM payload
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
......
...@@ -65,13 +65,4 @@ static int cavsvideo_probe(AVProbeData *p) ...@@ -65,13 +65,4 @@ static int cavsvideo_probe(AVProbeData *p)
return 0; return 0;
} }
AVInputFormat ff_cavsvideo_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(cavsvideo, "raw Chinese AVS video", cavsvideo_probe, NULL, CODEC_ID_CAVS)
"cavsvideo",
NULL_IF_CONFIG_SMALL("raw Chinese AVS video"),
0,
cavsvideo_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_CAVS,
};
...@@ -31,13 +31,4 @@ static int dirac_probe(AVProbeData *p) ...@@ -31,13 +31,4 @@ static int dirac_probe(AVProbeData *p)
return 0; return 0;
} }
AVInputFormat ff_dirac_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(dirac, "raw Dirac", dirac_probe, NULL, CODEC_ID_DIRAC)
"dirac",
NULL_IF_CONFIG_SMALL("raw Dirac"),
0,
dirac_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_DIRAC,
};
...@@ -42,13 +42,4 @@ static int dnxhd_probe(AVProbeData *p) ...@@ -42,13 +42,4 @@ static int dnxhd_probe(AVProbeData *p)
return AVPROBE_SCORE_MAX; return AVPROBE_SCORE_MAX;
} }
AVInputFormat ff_dnxhd_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(dnxhd, "raw DNxHD (SMPTE VC-3)", dnxhd_probe, NULL, CODEC_ID_DNXHD)
"dnxhd",
NULL_IF_CONFIG_SMALL("raw DNxHD (SMPTE VC-3)"),
0,
dnxhd_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_DNXHD,
};
...@@ -62,14 +62,4 @@ static int h261_probe(AVProbeData *p) ...@@ -62,14 +62,4 @@ static int h261_probe(AVProbeData *p)
return 0; return 0;
} }
AVInputFormat ff_h261_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(h261, "raw H.261", h261_probe, "h261", CODEC_ID_H261)
"h261",
NULL_IF_CONFIG_SMALL("raw H.261"),
0,
h261_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "h261",
.value = CODEC_ID_H261,
};
...@@ -64,14 +64,4 @@ static int h263_probe(AVProbeData *p) ...@@ -64,14 +64,4 @@ static int h263_probe(AVProbeData *p)
return 0; return 0;
} }
AVInputFormat ff_h263_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(h263, "raw H.263", h263_probe, NULL, CODEC_ID_H263)
"h263",
NULL_IF_CONFIG_SMALL("raw H.263"),
0,
h263_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
// .extensions = "h263", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_H263,
};
...@@ -67,14 +67,4 @@ static int h264_probe(AVProbeData *p) ...@@ -67,14 +67,4 @@ static int h264_probe(AVProbeData *p)
return 0; return 0;
} }
AVInputFormat ff_h264_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(h264 , "raw H.264 video format", h264_probe, "h26l,h264,264", CODEC_ID_H264)
"h264",
NULL_IF_CONFIG_SMALL("raw H.264 video format"),
0,
h264_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "h26l,h264,264", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_H264,
};
...@@ -228,7 +228,7 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t ...@@ -228,7 +228,7 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t
avio_skip(s->pb, get_size(s->pb, 4)); avio_skip(s->pb, get_size(s->pb, 4));
while (len >= taghdrlen) { while (len >= taghdrlen) {
unsigned int tflags; unsigned int tflags = 0;
int tunsync = 0; int tunsync = 0;
if (isv34) { if (isv34) {
...@@ -245,7 +245,7 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t ...@@ -245,7 +245,7 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t
tag[3] = 0; tag[3] = 0;
tlen = avio_rb24(s->pb); tlen = avio_rb24(s->pb);
} }
if (tlen > (1<<28)) if (tlen > (1<<28) || !tlen)
break; break;
len -= taghdrlen + tlen; len -= taghdrlen + tlen;
...@@ -268,6 +268,10 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t ...@@ -268,6 +268,10 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t
if (unsync || tunsync) { if (unsync || tunsync) {
int i, j; int i, j;
av_fast_malloc(&buffer, &buffer_size, tlen); av_fast_malloc(&buffer, &buffer_size, tlen);
if (!buffer) {
av_log(s, AV_LOG_ERROR, "Failed to alloc %d bytes\n", tlen);
goto seek;
}
for (i = 0, j = 0; i < tlen; i++, j++) { for (i = 0, j = 0; i < tlen; i++, j++) {
buffer[j] = avio_r8(s->pb); buffer[j] = avio_r8(s->pb);
if (j > 0 && !buffer[j] && buffer[j - 1] == 0xff) { if (j > 0 && !buffer[j] && buffer[j - 1] == 0xff) {
...@@ -288,6 +292,7 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t ...@@ -288,6 +292,7 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t
break; break;
} }
/* Skip to end of tag */ /* Skip to end of tag */
seek:
avio_seek(s->pb, next, SEEK_SET); avio_seek(s->pb, next, SEEK_SET);
} }
......
...@@ -49,14 +49,4 @@ static int mpeg4video_probe(AVProbeData *probe_packet) ...@@ -49,14 +49,4 @@ static int mpeg4video_probe(AVProbeData *probe_packet)
return 0; return 0;
} }
AVInputFormat ff_m4v_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(m4v, "raw MPEG-4 video format", mpeg4video_probe, "m4v", CODEC_ID_MPEG4)
"m4v",
NULL_IF_CONFIG_SMALL("raw MPEG-4 video format"),
0,
mpeg4video_probe, /** probing for MPEG-4 data */
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "m4v",
.value = CODEC_ID_MPEG4,
};
...@@ -32,10 +32,24 @@ ...@@ -32,10 +32,24 @@
#include "libavcodec/put_bits.h" #include "libavcodec/put_bits.h"
#include "internal.h" #include "internal.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/opt.h"
#undef NDEBUG #undef NDEBUG
#include <assert.h> #include <assert.h>
static const AVOption options[] = {
{ "movflags", "MOV muxer flags", offsetof(MOVMuxContext, flags), FF_OPT_TYPE_FLAGS, {.dbl = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "rtphint", "Add RTP hint tracks", 0, FF_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_RTP_HINT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ NULL },
};
static const AVClass mov_muxer_class = {
.class_name = "MOV/3GP/MP4/3G2 muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
//FIXME support 64 bit variant with wide placeholders //FIXME support 64 bit variant with wide placeholders
static int64_t updateSize(AVIOContext *pb, int64_t pos) static int64_t updateSize(AVIOContext *pb, int64_t pos)
{ {
...@@ -2125,7 +2139,15 @@ static int mov_write_header(AVFormatContext *s) ...@@ -2125,7 +2139,15 @@ static int mov_write_header(AVFormatContext *s)
if (mov->mode & (MODE_MOV|MODE_IPOD) && s->nb_chapters) if (mov->mode & (MODE_MOV|MODE_IPOD) && s->nb_chapters)
mov->chapter_track = mov->nb_streams++; mov->chapter_track = mov->nb_streams++;
#if FF_API_FLAG_RTP_HINT
if (s->flags & AVFMT_FLAG_RTP_HINT) { if (s->flags & AVFMT_FLAG_RTP_HINT) {
av_log(s, AV_LOG_WARNING, "The RTP_HINT flag is deprecated, enable it "
"via the -movflags rtphint muxer option "
"instead.\n");
mov->flags |= FF_MOV_FLAG_RTP_HINT;
}
#endif
if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
/* Add hint tracks for each audio and video stream */ /* Add hint tracks for each audio and video stream */
hint_track = mov->nb_streams; hint_track = mov->nb_streams;
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
...@@ -2221,7 +2243,7 @@ static int mov_write_header(AVFormatContext *s) ...@@ -2221,7 +2243,7 @@ static int mov_write_header(AVFormatContext *s)
if (mov->chapter_track) if (mov->chapter_track)
mov_create_chapter_track(s, mov->chapter_track); mov_create_chapter_track(s, mov->chapter_track);
if (s->flags & AVFMT_FLAG_RTP_HINT) { if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
/* Initialize the hint tracks for each audio and video stream */ /* Initialize the hint tracks for each audio and video stream */
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
...@@ -2298,6 +2320,7 @@ AVOutputFormat ff_mov_muxer = { ...@@ -2298,6 +2320,7 @@ AVOutputFormat ff_mov_muxer = {
mov_write_trailer, mov_write_trailer,
.flags = AVFMT_GLOBALHEADER, .flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag* const []){codec_movvideo_tags, codec_movaudio_tags, 0}, .codec_tag = (const AVCodecTag* const []){codec_movvideo_tags, codec_movaudio_tags, 0},
.priv_class = &mov_muxer_class,
}; };
#endif #endif
#if CONFIG_TGP_MUXER #if CONFIG_TGP_MUXER
...@@ -2314,6 +2337,7 @@ AVOutputFormat ff_tgp_muxer = { ...@@ -2314,6 +2337,7 @@ AVOutputFormat ff_tgp_muxer = {
mov_write_trailer, mov_write_trailer,
.flags = AVFMT_GLOBALHEADER, .flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag* const []){codec_3gp_tags, 0}, .codec_tag = (const AVCodecTag* const []){codec_3gp_tags, 0},
.priv_class = &mov_muxer_class,
}; };
#endif #endif
#if CONFIG_MP4_MUXER #if CONFIG_MP4_MUXER
...@@ -2330,6 +2354,7 @@ AVOutputFormat ff_mp4_muxer = { ...@@ -2330,6 +2354,7 @@ AVOutputFormat ff_mp4_muxer = {
mov_write_trailer, mov_write_trailer,
.flags = AVFMT_GLOBALHEADER, .flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag* const []){ff_mp4_obj_type, 0}, .codec_tag = (const AVCodecTag* const []){ff_mp4_obj_type, 0},
.priv_class = &mov_muxer_class,
}; };
#endif #endif
#if CONFIG_PSP_MUXER #if CONFIG_PSP_MUXER
...@@ -2346,6 +2371,7 @@ AVOutputFormat ff_psp_muxer = { ...@@ -2346,6 +2371,7 @@ AVOutputFormat ff_psp_muxer = {
mov_write_trailer, mov_write_trailer,
.flags = AVFMT_GLOBALHEADER, .flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag* const []){ff_mp4_obj_type, 0}, .codec_tag = (const AVCodecTag* const []){ff_mp4_obj_type, 0},
.priv_class = &mov_muxer_class,
}; };
#endif #endif
#if CONFIG_TG2_MUXER #if CONFIG_TG2_MUXER
...@@ -2362,6 +2388,7 @@ AVOutputFormat ff_tg2_muxer = { ...@@ -2362,6 +2388,7 @@ AVOutputFormat ff_tg2_muxer = {
mov_write_trailer, mov_write_trailer,
.flags = AVFMT_GLOBALHEADER, .flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag* const []){codec_3gp_tags, 0}, .codec_tag = (const AVCodecTag* const []){codec_3gp_tags, 0},
.priv_class = &mov_muxer_class,
}; };
#endif #endif
#if CONFIG_IPOD_MUXER #if CONFIG_IPOD_MUXER
...@@ -2378,5 +2405,6 @@ AVOutputFormat ff_ipod_muxer = { ...@@ -2378,5 +2405,6 @@ AVOutputFormat ff_ipod_muxer = {
mov_write_trailer, mov_write_trailer,
.flags = AVFMT_GLOBALHEADER, .flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag* const []){codec_ipod_tags, 0}, .codec_tag = (const AVCodecTag* const []){codec_ipod_tags, 0},
.priv_class = &mov_muxer_class,
}; };
#endif #endif
...@@ -101,6 +101,7 @@ typedef struct MOVIndex { ...@@ -101,6 +101,7 @@ typedef struct MOVIndex {
} MOVTrack; } MOVTrack;
typedef struct MOVMuxContext { typedef struct MOVMuxContext {
const AVClass *av_class;
int mode; int mode;
int64_t time; int64_t time;
int nb_streams; int nb_streams;
...@@ -108,8 +109,12 @@ typedef struct MOVMuxContext { ...@@ -108,8 +109,12 @@ typedef struct MOVMuxContext {
int64_t mdat_pos; int64_t mdat_pos;
uint64_t mdat_size; uint64_t mdat_size;
MOVTrack *tracks; MOVTrack *tracks;
int flags;
} MOVMuxContext; } MOVMuxContext;
#define FF_MOV_FLAG_RTP_HINT 1
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt); int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt);
int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index); int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index);
......
...@@ -55,13 +55,4 @@ static int mpegvideo_probe(AVProbeData *p) ...@@ -55,13 +55,4 @@ static int mpegvideo_probe(AVProbeData *p)
return 0; return 0;
} }
AVInputFormat ff_mpegvideo_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(mpegvideo, "raw MPEG video", mpegvideo_probe, NULL, CODEC_ID_MPEG1VIDEO)
"mpegvideo",
NULL_IF_CONFIG_SMALL("raw MPEG video"),
0,
mpegvideo_probe,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_MPEG1VIDEO,
};
...@@ -49,7 +49,9 @@ static const AVOption options[]={ ...@@ -49,7 +49,9 @@ static const AVOption options[]={
{"nofillin", "do not fill in missing values that can be exactly calculated", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_NOFILLIN }, INT_MIN, INT_MAX, D, "fflags"}, {"nofillin", "do not fill in missing values that can be exactly calculated", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_NOFILLIN }, INT_MIN, INT_MAX, D, "fflags"},
{"noparse", "disable AVParsers, this needs nofillin too", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_NOPARSE }, INT_MIN, INT_MAX, D, "fflags"}, {"noparse", "disable AVParsers, this needs nofillin too", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_NOPARSE }, INT_MIN, INT_MAX, D, "fflags"},
{"igndts", "ignore dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_IGNDTS }, INT_MIN, INT_MAX, D, "fflags"}, {"igndts", "ignore dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_IGNDTS }, INT_MIN, INT_MAX, D, "fflags"},
{"rtphint", "add rtp hinting", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_RTP_HINT }, INT_MIN, INT_MAX, E, "fflags"}, #if FF_API_FLAG_RTP_HINT
{"rtphint", "add rtp hinting (deprecated, use the -movflags rtphint option instead)", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_RTP_HINT }, INT_MIN, INT_MAX, E, "fflags"},
#endif
{"sortdts", "try to interleave outputted packets by dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_SORT_DTS }, INT_MIN, INT_MAX, D, "fflags"}, {"sortdts", "try to interleave outputted packets by dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_SORT_DTS }, INT_MIN, INT_MAX, D, "fflags"},
{"keepside", "dont merge side data", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_KEEP_SIDE_DATA }, INT_MIN, INT_MAX, D, "fflags"}, {"keepside", "dont merge side data", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_KEEP_SIDE_DATA }, INT_MIN, INT_MAX, D, "fflags"},
{"latm", "enable RTP MP4A-LATM payload", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_MP4A_LATM }, INT_MIN, INT_MAX, E, "fflags"}, {"latm", "enable RTP MP4A-LATM payload", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_MP4A_LATM }, INT_MIN, INT_MAX, E, "fflags"},
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "avio_internal.h" #include "avio_internal.h"
#include "rawdec.h" #include "rawdec.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/parseutils.h"
/* raw input */ /* raw input */
int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
...@@ -66,17 +67,34 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -66,17 +67,34 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
av_set_pts_info(st, 64, 1, st->codec->sample_rate); av_set_pts_info(st, 64, 1, st->codec->sample_rate);
break; break;
} }
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO: {
FFRawVideoDemuxerContext *s1 = s->priv_data;
int width = 0, height = 0, ret;
if(ap->time_base.num) if(ap->time_base.num)
av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den); av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
else else
av_set_pts_info(st, 64, 1, 25); av_set_pts_info(st, 64, 1, 25);
st->codec->width = ap->width; if (s1->video_size) {
st->codec->height = ap->height; ret = av_parse_video_size(&width, &height, s1->video_size);
av_freep(&s1->video_size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
return ret;
}
}
#if FF_API_FORMAT_PARAMETERS
if (ap->width > 0)
width = ap->width;
if (ap->height > 0)
height = ap->height;
#endif
st->codec->width = width;
st->codec->height = height;
st->codec->pix_fmt = ap->pix_fmt; st->codec->pix_fmt = ap->pix_fmt;
if(st->codec->pix_fmt == PIX_FMT_NONE) if(st->codec->pix_fmt == PIX_FMT_NONE)
st->codec->pix_fmt= PIX_FMT_YUV420P; st->codec->pix_fmt= PIX_FMT_YUV420P;
break; break;
}
default: default:
return -1; return -1;
} }
...@@ -166,6 +184,22 @@ const AVClass ff_rawaudio_demuxer_class = { ...@@ -166,6 +184,22 @@ const AVClass ff_rawaudio_demuxer_class = {
.version = LIBAVUTIL_VERSION_INT, .version = LIBAVUTIL_VERSION_INT,
}; };
#define OFFSET(x) offsetof(FFRawVideoDemuxerContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption video_options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
#undef OFFSET
#undef DEC
const AVClass ff_rawvideo_demuxer_class = {
.class_name = "rawvideo demuxer",
.item_name = av_default_item_name,
.option = video_options,
.version = LIBAVUTIL_VERSION_INT,
};
#if CONFIG_G722_DEMUXER #if CONFIG_G722_DEMUXER
AVInputFormat ff_g722_demuxer = { AVInputFormat ff_g722_demuxer = {
"g722", "g722",
...@@ -196,17 +230,7 @@ AVInputFormat ff_gsm_demuxer = { ...@@ -196,17 +230,7 @@ AVInputFormat ff_gsm_demuxer = {
#endif #endif
#if CONFIG_MJPEG_DEMUXER #if CONFIG_MJPEG_DEMUXER
AVInputFormat ff_mjpeg_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(mjpeg, "raw MJPEG video", NULL, "mjpg,mjpeg", CODEC_ID_MJPEG)
"mjpeg",
NULL_IF_CONFIG_SMALL("raw MJPEG video"),
0,
NULL,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "mjpg,mjpeg",
.value = CODEC_ID_MJPEG,
};
#endif #endif
#if CONFIG_MLP_DEMUXER #if CONFIG_MLP_DEMUXER
...@@ -252,14 +276,5 @@ AVInputFormat ff_shorten_demuxer = { ...@@ -252,14 +276,5 @@ AVInputFormat ff_shorten_demuxer = {
#endif #endif
#if CONFIG_VC1_DEMUXER #if CONFIG_VC1_DEMUXER
AVInputFormat ff_vc1_demuxer = { FF_DEF_RAWVIDEO_DEMUXER(vc1, "raw VC-1", NULL, "vc1", CODEC_ID_VC1)
"vc1",
NULL_IF_CONFIG_SMALL("raw VC-1"),
0,
NULL /* vc1_probe */,
ff_raw_video_read_header,
ff_raw_read_partial_packet,
.extensions = "vc1",
.value = CODEC_ID_VC1,
};
#endif #endif
...@@ -31,7 +31,13 @@ typedef struct RawAudioDemuxerContext { ...@@ -31,7 +31,13 @@ typedef struct RawAudioDemuxerContext {
int channels; int channels;
} RawAudioDemuxerContext; } RawAudioDemuxerContext;
typedef struct FFRawVideoDemuxerContext {
const AVClass *class; /**< Class for private options. */
char *video_size; /**< String describing video size, set by a private option. */
} FFRawVideoDemuxerContext;
extern const AVClass ff_rawaudio_demuxer_class; extern const AVClass ff_rawaudio_demuxer_class;
extern const AVClass ff_rawvideo_demuxer_class;
int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap); int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap);
...@@ -41,4 +47,16 @@ int ff_raw_audio_read_header(AVFormatContext *s, AVFormatParameters *ap); ...@@ -41,4 +47,16 @@ int ff_raw_audio_read_header(AVFormatContext *s, AVFormatParameters *ap);
int ff_raw_video_read_header(AVFormatContext *s, AVFormatParameters *ap); int ff_raw_video_read_header(AVFormatContext *s, AVFormatParameters *ap);
#define FF_DEF_RAWVIDEO_DEMUXER(shortname, longname, probe, ext, id)\
AVInputFormat ff_ ## shortname ## _demuxer = {\
.name = #shortname,\
.long_name = NULL_IF_CONFIG_SMALL(longname),\
.read_probe = probe,\
.read_header = ff_raw_video_read_header,\
.read_packet = ff_raw_read_partial_packet,\
.extensions = ext,\
.flags = AVFMT_GENERIC_INDEX,\
.value = id,\
};
#endif /* AVFORMAT_RAWDEC_H */ #endif /* AVFORMAT_RAWDEC_H */
...@@ -47,11 +47,12 @@ static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -47,11 +47,12 @@ static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt)
AVInputFormat ff_rawvideo_demuxer = { AVInputFormat ff_rawvideo_demuxer = {
"rawvideo", "rawvideo",
NULL_IF_CONFIG_SMALL("raw video format"), NULL_IF_CONFIG_SMALL("raw video format"),
0, sizeof(FFRawVideoDemuxerContext),
NULL, NULL,
ff_raw_read_header, ff_raw_read_header,
rawvideo_read_packet, rawvideo_read_packet,
.flags= AVFMT_GENERIC_INDEX, .flags= AVFMT_GENERIC_INDEX,
.extensions = "yuv,cif,qcif,rgb", .extensions = "yuv,cif,qcif,rgb",
.value = CODEC_ID_RAWVIDEO, .value = CODEC_ID_RAWVIDEO,
.priv_class = &ff_rawvideo_demuxer_class,
}; };
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include "network.h" #include "network.h"
#include "httpauth.h" #include "httpauth.h"
#include "libavutil/log.h"
/** /**
* Network layer over which RTP/etc packet data will be transported. * Network layer over which RTP/etc packet data will be transported.
*/ */
...@@ -196,6 +198,7 @@ enum RTSPServerType { ...@@ -196,6 +198,7 @@ enum RTSPServerType {
* @todo Use AVIOContext instead of URLContext * @todo Use AVIOContext instead of URLContext
*/ */
typedef struct RTSPState { typedef struct RTSPState {
const AVClass *class; /**< Class for private options. */
URLContext *rtsp_hd; /* RTSP TCP connection handle */ URLContext *rtsp_hd; /* RTSP TCP connection handle */
/** number of items in the 'rtsp_streams' variable */ /** number of items in the 'rtsp_streams' variable */
...@@ -336,6 +339,11 @@ typedef struct RTSPState { ...@@ -336,6 +339,11 @@ typedef struct RTSPState {
* Whether the server supports the GET_PARAMETER method. * Whether the server supports the GET_PARAMETER method.
*/ */
int get_parameter_supported; int get_parameter_supported;
/**
* Do not begin to play the stream immediately.
*/
int initial_pause;
} RTSPState; } RTSPState;
/** /**
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
...@@ -165,7 +166,12 @@ static int rtsp_read_header(AVFormatContext *s, ...@@ -165,7 +166,12 @@ static int rtsp_read_header(AVFormatContext *s,
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
rt->real_setup = rt->real_setup_cache + s->nb_streams; rt->real_setup = rt->real_setup_cache + s->nb_streams;
if (ap->initial_pause) { #if FF_API_FORMAT_PARAMETERS
if (ap->initial_pause)
rt->initial_pause = ap->initial_pause;
#endif
if (rt->initial_pause) {
/* do not start immediately */ /* do not start immediately */
} else { } else {
if (rtsp_read_play(s) < 0) { if (rtsp_read_play(s) < 0) {
...@@ -399,6 +405,18 @@ static int rtsp_read_close(AVFormatContext *s) ...@@ -399,6 +405,18 @@ static int rtsp_read_close(AVFormatContext *s)
return 0; return 0;
} }
static const AVOption options[] = {
{ "initial_pause", "Don't start playing the stream immediately", offsetof(RTSPState, initial_pause), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
const AVClass rtsp_demuxer_class = {
.class_name = "RTSP demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_rtsp_demuxer = { AVInputFormat ff_rtsp_demuxer = {
"rtsp", "rtsp",
NULL_IF_CONFIG_SMALL("RTSP input format"), NULL_IF_CONFIG_SMALL("RTSP input format"),
...@@ -411,4 +429,5 @@ AVInputFormat ff_rtsp_demuxer = { ...@@ -411,4 +429,5 @@ AVInputFormat ff_rtsp_demuxer = {
.flags = AVFMT_NOFILE, .flags = AVFMT_NOFILE,
.read_play = rtsp_read_play, .read_play = rtsp_read_play,
.read_pause = rtsp_read_pause, .read_pause = rtsp_read_pause,
.priv_class = &rtsp_demuxer_class,
}; };
...@@ -45,6 +45,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -45,6 +45,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
char buf[256]; char buf[256];
int ret; int ret;
socklen_t optlen; socklen_t optlen;
int timeout = 100;
char hostname[1024],proto[1024],path[1024]; char hostname[1024],proto[1024],path[1024];
char portstr[10]; char portstr[10];
...@@ -57,6 +58,9 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -57,6 +58,9 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
if (p) { if (p) {
if (av_find_info_tag(buf, sizeof(buf), "listen", p)) if (av_find_info_tag(buf, sizeof(buf), "listen", p))
listen_socket = 1; listen_socket = 1;
if (av_find_info_tag(buf, sizeof(buf), "timeout", p)) {
timeout = strtol(buf, NULL, 10);
}
} }
memset(&hints, 0, sizeof(hints)); memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC; hints.ai_family = AF_UNSPEC;
...@@ -73,6 +77,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -73,6 +77,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
cur_ai = ai; cur_ai = ai;
restart: restart:
ret = AVERROR(EIO);
fd = socket(cur_ai->ai_family, cur_ai->ai_socktype, cur_ai->ai_protocol); fd = socket(cur_ai->ai_family, cur_ai->ai_socktype, cur_ai->ai_protocol);
if (fd < 0) if (fd < 0)
goto fail; goto fail;
...@@ -84,29 +89,30 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -84,29 +89,30 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
fd1 = accept(fd, NULL, NULL); fd1 = accept(fd, NULL, NULL);
closesocket(fd); closesocket(fd);
fd = fd1; fd = fd1;
ff_socket_nonblock(fd, 1);
} else { } else {
redo: redo:
ff_socket_nonblock(fd, 1);
ret = connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen); ret = connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
} }
ff_socket_nonblock(fd, 1);
if (ret < 0) { if (ret < 0) {
int timeout=50; int timeout=50;
struct pollfd p = {fd, POLLOUT, 0}; struct pollfd p = {fd, POLLOUT, 0};
if (ff_neterrno() == AVERROR(EINTR)) { ret = ff_neterrno();
if (ret == AVERROR(EINTR)) {
if (url_interrupt_cb()) { if (url_interrupt_cb()) {
ret = AVERROR_EXIT; ret = AVERROR_EXIT;
goto fail1; goto fail1;
} }
goto redo; goto redo;
} }
if (ff_neterrno() != AVERROR(EINPROGRESS) && if (ret != AVERROR(EINPROGRESS) &&
ff_neterrno() != AVERROR(EAGAIN)) ret != AVERROR(EAGAIN))
goto fail; goto fail;
/* wait until we are connected or until abort */ /* wait until we are connected or until abort */
for(;;) { while(timeout--) {
if (url_interrupt_cb()) { if (url_interrupt_cb()) {
ret = AVERROR_EXIT; ret = AVERROR_EXIT;
goto fail1; goto fail1;
...@@ -121,7 +127,10 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -121,7 +127,10 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
goto fail; goto fail;
} }
} }
if (ret <= 0) {
ret = AVERROR(ETIMEDOUT);
goto fail;
}
/* test error */ /* test error */
optlen = sizeof(ret); optlen = sizeof(ret);
getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen); getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen);
...@@ -129,6 +138,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -129,6 +138,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
av_log(h, AV_LOG_ERROR, av_log(h, AV_LOG_ERROR,
"TCP connection to %s:%d failed: %s\n", "TCP connection to %s:%d failed: %s\n",
hostname, port, strerror(ret)); hostname, port, strerror(ret));
ret = AVERROR(ret);
goto fail; goto fail;
} }
} }
...@@ -151,7 +161,6 @@ static int tcp_open(URLContext *h, const char *uri, int flags) ...@@ -151,7 +161,6 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
closesocket(fd); closesocket(fd);
goto restart; goto restart;
} }
ret = AVERROR(EIO);
fail1: fail1:
if (fd >= 0) if (fd >= 0)
closesocket(fd); closesocket(fd);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/log.h" #include "libavutil/log.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avformat.h" #include "avformat.h"
#include "sauce.h" #include "sauce.h"
...@@ -35,6 +36,7 @@ typedef struct { ...@@ -35,6 +36,7 @@ typedef struct {
AVClass *class; AVClass *class;
int chars_per_frame; int chars_per_frame;
uint64_t fsize; /**< file size less metadata buffer */ uint64_t fsize; /**< file size less metadata buffer */
char *video_size;/**< A string describing video size, set by a private option. */
} TtyDemuxContext; } TtyDemuxContext;
/** /**
...@@ -71,14 +73,30 @@ static int read_header(AVFormatContext *avctx, ...@@ -71,14 +73,30 @@ static int read_header(AVFormatContext *avctx,
AVFormatParameters *ap) AVFormatParameters *ap)
{ {
TtyDemuxContext *s = avctx->priv_data; TtyDemuxContext *s = avctx->priv_data;
int width = 0, height = 0, ret;
AVStream *st = av_new_stream(avctx, 0); AVStream *st = av_new_stream(avctx, 0);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codec->codec_tag = 0; st->codec->codec_tag = 0;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_ANSI; st->codec->codec_id = CODEC_ID_ANSI;
if (ap->width) st->codec->width = ap->width;
if (ap->height) st->codec->height = ap->height; if (s->video_size) {
ret = av_parse_video_size(&width, &height, s->video_size);
av_freep(&s->video_size);
if (ret < 0) {
av_log (avctx, AV_LOG_ERROR, "Couldn't parse video size.\n");
return ret;
}
}
#if FF_API_FORMAT_PARAMETERS
if (ap->width > 0)
width = ap->width;
if (ap->height > 0)
height = ap->height;
#endif
st->codec->width = width;
st->codec->height = height;
if (!ap->time_base.num) { if (!ap->time_base.num) {
av_set_pts_info(st, 60, 1, 25); av_set_pts_info(st, 60, 1, 25);
...@@ -129,8 +147,11 @@ static int read_packet(AVFormatContext *avctx, AVPacket *pkt) ...@@ -129,8 +147,11 @@ static int read_packet(AVFormatContext *avctx, AVPacket *pkt)
return 0; return 0;
} }
#define OFFSET(x) offsetof(TtyDemuxContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
{ "chars_per_frame", "", offsetof(TtyDemuxContext, chars_per_frame), FF_OPT_TYPE_INT, {.dbl = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM}, { "chars_per_frame", "", offsetof(TtyDemuxContext, chars_per_frame), FF_OPT_TYPE_INT, {.dbl = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL }, { NULL },
}; };
......
...@@ -74,5 +74,8 @@ ...@@ -74,5 +74,8 @@
#ifndef FF_API_FORMAT_PARAMETERS #ifndef FF_API_FORMAT_PARAMETERS
#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 54) #define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif #endif
#ifndef FF_API_FLAG_RTP_HINT
#define FF_API_FLAG_RTP_HINT (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
#endif /* AVFORMAT_VERSION_H */ #endif /* AVFORMAT_VERSION_H */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment