Commit 4606ac8d authored by Zdenek Kabelac's avatar Zdenek Kabelac

* some minor modification by Philip Gladston

* grab containes new code

Originally committed as revision 388 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent a266644f
...@@ -29,6 +29,7 @@ typedef struct { ...@@ -29,6 +29,7 @@ typedef struct {
/* use for reading */ /* use for reading */
AVPacket pkt; AVPacket pkt;
int frag_offset; int frag_offset;
INT64 duration;
} ASFStream; } ASFStream;
typedef struct { typedef struct {
...@@ -120,6 +121,21 @@ static const GUID my_guid = { ...@@ -120,6 +121,21 @@ static const GUID my_guid = {
0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0 }, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0 },
}; };
CodecTag codec_asf_bmp_tags[] = {
{ CODEC_ID_H263, MKTAG('U', '2', '6', '3') },
{ CODEC_ID_H263P, MKTAG('U', '2', '6', '3') },
{ CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* intel h263 */
{ CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
{ CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') },
{ CODEC_ID_MPEG4, MKTAG('d', 'i', 'v', 'x') },
{ CODEC_ID_MPEG4, MKTAG(0x04, 0, 0, 0) }, /* some broken avi use this */
{ CODEC_ID_MSMPEG4, MKTAG('M', 'P', '4', '3') }, /* default signature when using MSMPEG4 */
{ CODEC_ID_MSMPEG4, MKTAG('D', 'I', 'V', '3') },
{ 0, 0 },
};
static void put_guid(ByteIOContext *s, const GUID *g) static void put_guid(ByteIOContext *s, const GUID *g)
{ {
int i; int i;
...@@ -237,7 +253,7 @@ static int asf_write_header1(AVFormatContext *s, INT64 file_size, INT64 data_chu ...@@ -237,7 +253,7 @@ static int asf_write_header1(AVFormatContext *s, INT64 file_size, INT64 data_chu
put_le64(pb, asf->duration); /* duration (in 100ns units) */ put_le64(pb, asf->duration); /* duration (in 100ns units) */
put_le32(pb, 0); /* start time stamp */ put_le32(pb, 0); /* start time stamp */
put_le32(pb, 0); /* ??? */ put_le32(pb, 0); /* ??? */
put_le32(pb, 0); /* ??? */ put_le32(pb, url_is_streamed(&s->pb) ? 1 : 0); /* ??? */
put_le32(pb, asf->packet_size); /* packet size */ put_le32(pb, asf->packet_size); /* packet size */
put_le32(pb, asf->packet_size); /* packet size */ put_le32(pb, asf->packet_size); /* packet size */
put_le32(pb, 80 * asf->packet_size); /* frame_size ??? */ put_le32(pb, 80 * asf->packet_size); /* frame_size ??? */
...@@ -310,7 +326,7 @@ static int asf_write_header1(AVFormatContext *s, INT64 file_size, INT64 data_chu ...@@ -310,7 +326,7 @@ static int asf_write_header1(AVFormatContext *s, INT64 file_size, INT64 data_chu
put_le16(pb, 40); /* size */ put_le16(pb, 40); /* size */
/* BITMAPINFOHEADER header */ /* BITMAPINFOHEADER header */
put_bmp_header(pb, enc); put_bmp_header(pb, enc, codec_asf_bmp_tags);
} }
end_header(pb, hpos); end_header(pb, hpos);
} }
...@@ -332,7 +348,7 @@ static int asf_write_header1(AVFormatContext *s, INT64 file_size, INT64 data_chu ...@@ -332,7 +348,7 @@ static int asf_write_header1(AVFormatContext *s, INT64 file_size, INT64 data_chu
put_le16(pb, codec_get_tag(codec_wav_tags, enc->codec_id)); put_le16(pb, codec_get_tag(codec_wav_tags, enc->codec_id));
} else { } else {
put_le16(pb, 4); put_le16(pb, 4);
put_le32(pb, codec_get_tag(codec_bmp_tags, enc->codec_id)); put_le32(pb, codec_get_tag(codec_asf_bmp_tags, enc->codec_id));
} }
} }
end_header(pb, hpos); end_header(pb, hpos);
...@@ -735,7 +751,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -735,7 +751,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
get_le16(pb); /* depth */ get_le16(pb); /* depth */
tag1 = get_le32(pb); tag1 = get_le32(pb);
st->codec.codec_tag = tag1; st->codec.codec_tag = tag1;
st->codec.codec_id = codec_get_id(codec_bmp_tags, tag1); st->codec.codec_id = codec_get_id(codec_asf_bmp_tags, tag1);
url_fskip(pb, size - 5 * 4); url_fskip(pb, size - 5 * 4);
} }
pos2 = url_ftell(pb); pos2 = url_ftell(pb);
...@@ -944,7 +960,11 @@ AVFormat asf_format = { ...@@ -944,7 +960,11 @@ AVFormat asf_format = {
"asf format", "asf format",
"application/octet-stream", "application/octet-stream",
"asf,wmv", "asf,wmv",
#ifdef CONFIG_MP3LAME
CODEC_ID_MP3LAME,
#else
CODEC_ID_MP2, CODEC_ID_MP2,
#endif
CODEC_ID_MSMPEG4, CODEC_ID_MSMPEG4,
asf_write_header, asf_write_header,
asf_write_packet, asf_write_packet,
......
...@@ -248,6 +248,11 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt) ...@@ -248,6 +248,11 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
ret = read(s->fd, pkt->data, pkt->size); ret = read(s->fd, pkt->data, pkt->size);
if (ret > 0) if (ret > 0)
break; break;
if (ret == -1 && (errno == EAGAIN || errno == EINTR)) {
av_free_packet(pkt);
pkt->size = 0;
return 0;
}
if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) { if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) {
av_free_packet(pkt); av_free_packet(pkt);
return -EIO; return -EIO;
......
...@@ -12,6 +12,7 @@ typedef struct AVPacket { ...@@ -12,6 +12,7 @@ typedef struct AVPacket {
int stream_index; int stream_index;
int flags; int flags;
#define PKT_FLAG_KEY 0x0001 #define PKT_FLAG_KEY 0x0001
#define PKT_FLAG_DROPPED_FRAME 0x0002
} AVPacket; } AVPacket;
int av_new_packet(AVPacket *pkt, int size); int av_new_packet(AVPacket *pkt, int size);
...@@ -29,7 +30,7 @@ typedef struct AVFormatParameters { ...@@ -29,7 +30,7 @@ typedef struct AVFormatParameters {
int channels; int channels;
int width; int width;
int height; int height;
int pix_fmt; enum PixelFormat pix_fmt;
} AVFormatParameters; } AVFormatParameters;
typedef struct AVFormat { typedef struct AVFormat {
......
...@@ -9,15 +9,15 @@ ...@@ -9,15 +9,15 @@
offset_t start_tag(ByteIOContext *pb, char *tag); offset_t start_tag(ByteIOContext *pb, char *tag);
void end_tag(ByteIOContext *pb, offset_t start); void end_tag(ByteIOContext *pb, offset_t start);
void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc);
int put_wav_header(ByteIOContext *pb, AVCodecContext *enc);
int wav_codec_get_id(unsigned int tag, int bps);
typedef struct CodecTag { typedef struct CodecTag {
int id; int id;
unsigned int tag; unsigned int tag;
} CodecTag; } CodecTag;
void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, CodecTag *tags);
int put_wav_header(ByteIOContext *pb, AVCodecContext *enc);
int wav_codec_get_id(unsigned int tag, int bps);
extern CodecTag codec_bmp_tags[]; extern CodecTag codec_bmp_tags[];
extern CodecTag codec_wav_tags[]; extern CodecTag codec_wav_tags[];
......
...@@ -19,7 +19,7 @@ typedef struct URLFormat { ...@@ -19,7 +19,7 @@ typedef struct URLFormat {
int channels; int channels;
int height; int height;
int width; int width;
int pix_fmt; enum PixelFormat pix_fmt;
} URLFormat; } URLFormat;
typedef struct URLContext URLContext; typedef struct URLContext URLContext;
......
...@@ -101,7 +101,7 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence) ...@@ -101,7 +101,7 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
if (s->write_flag) { if (s->write_flag) {
if (whence == SEEK_CUR) { if (whence == SEEK_CUR) {
offset1 = s->pos + s->buf_ptr - s->buffer; offset1 = s->pos + (s->buf_ptr - s->buffer);
if (offset == 0) if (offset == 0)
return offset1; return offset1;
offset += offset1; offset += offset1;
......
...@@ -39,7 +39,7 @@ enum { ...@@ -39,7 +39,7 @@ enum {
typedef struct FFMContext { typedef struct FFMContext {
/* only reading mode */ /* only reading mode */
offset_t write_index, file_size; offset_t write_index, file_size;
int read_state; int read_state;
UINT8 header[FRAME_HEADER_SIZE]; UINT8 header[FRAME_HEADER_SIZE];
/* read and write */ /* read and write */
...@@ -59,9 +59,9 @@ static void flush_packet(AVFormatContext *s) ...@@ -59,9 +59,9 @@ static void flush_packet(AVFormatContext *s)
fill_size = ffm->packet_end - ffm->packet_ptr; fill_size = ffm->packet_end - ffm->packet_ptr;
memset(ffm->packet_ptr, 0, fill_size); memset(ffm->packet_ptr, 0, fill_size);
/* put header */ /* put header */
put_be16(pb, PACKET_ID); put_be16(pb, PACKET_ID);
put_be16(pb, fill_size); put_be16(pb, fill_size);
put_be64(pb, ffm->pts); put_be64(pb, ffm->pts);
h = ffm->frame_offset; h = ffm->frame_offset;
...@@ -78,8 +78,8 @@ static void flush_packet(AVFormatContext *s) ...@@ -78,8 +78,8 @@ static void flush_packet(AVFormatContext *s)
} }
/* 'first' is true if first data of a frame */ /* 'first' is true if first data of a frame */
static void ffm_write_data(AVFormatContext *s, static void ffm_write_data(AVFormatContext *s,
UINT8 *buf, int size, UINT8 *buf, int size,
INT64 pts, int first) INT64 pts, int first)
{ {
FFMContext *ffm = s->priv_data; FFMContext *ffm = s->priv_data;
...@@ -96,7 +96,7 @@ static void ffm_write_data(AVFormatContext *s, ...@@ -96,7 +96,7 @@ static void ffm_write_data(AVFormatContext *s,
if (len > size) if (len > size)
len = size; len = size;
memcpy(ffm->packet_ptr, buf, len); memcpy(ffm->packet_ptr, buf, len);
ffm->packet_ptr += len; ffm->packet_ptr += len;
buf += len; buf += len;
size -= len; size -= len;
...@@ -125,7 +125,7 @@ static int ffm_write_header(AVFormatContext *s) ...@@ -125,7 +125,7 @@ static int ffm_write_header(AVFormatContext *s)
s->priv_data = ffm; s->priv_data = ffm;
ffm->packet_size = FFM_PACKET_SIZE; ffm->packet_size = FFM_PACKET_SIZE;
/* header */ /* header */
put_tag(pb, "FFM1"); put_tag(pb, "FFM1");
put_be32(pb, ffm->packet_size); put_be32(pb, ffm->packet_size);
...@@ -159,6 +159,11 @@ static int ffm_write_header(AVFormatContext *s) ...@@ -159,6 +159,11 @@ static int ffm_write_header(AVFormatContext *s)
put_be32(pb, (codec->frame_rate * 1000) / FRAME_RATE_BASE); put_be32(pb, (codec->frame_rate * 1000) / FRAME_RATE_BASE);
put_be16(pb, codec->width); put_be16(pb, codec->width);
put_be16(pb, codec->height); put_be16(pb, codec->height);
put_byte(pb, codec->qmin);
put_byte(pb, codec->qmax);
put_byte(pb, codec->max_qdiff);
put_be16(pb, (int) (codec->qcompress * 10000.0));
put_be16(pb, (int) (codec->qblur * 10000.0));
break; break;
case CODEC_TYPE_AUDIO: case CODEC_TYPE_AUDIO:
put_be32(pb, codec->sample_rate); put_be32(pb, codec->sample_rate);
...@@ -268,7 +273,7 @@ static int ffm_is_avail_data(AVFormatContext *s, int size) ...@@ -268,7 +273,7 @@ static int ffm_is_avail_data(AVFormatContext *s, int size)
} }
/* first is true if we read the frame header */ /* first is true if we read the frame header */
static int ffm_read_data(AVFormatContext *s, static int ffm_read_data(AVFormatContext *s,
UINT8 *buf, int size, int first) UINT8 *buf, int size, int first)
{ {
FFMContext *ffm = s->priv_data; FFMContext *ffm = s->priv_data;
...@@ -367,6 +372,11 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -367,6 +372,11 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
codec->frame_rate = ((INT64)get_be32(pb) * FRAME_RATE_BASE) / 1000; codec->frame_rate = ((INT64)get_be32(pb) * FRAME_RATE_BASE) / 1000;
codec->width = get_be16(pb); codec->width = get_be16(pb);
codec->height = get_be16(pb); codec->height = get_be16(pb);
codec->qmin = get_byte(pb);
codec->qmax = get_byte(pb);
codec->max_qdiff = get_byte(pb);
codec->qcompress = get_be16(pb) / 10000.0;
codec->qblur = get_be16(pb) / 10000.0;
break; break;
case CODEC_TYPE_AUDIO: case CODEC_TYPE_AUDIO:
codec->sample_rate = get_be32(pb); codec->sample_rate = get_be32(pb);
...@@ -414,12 +424,12 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -414,12 +424,12 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
if (!ffm_is_avail_data(s, FRAME_HEADER_SIZE)) if (!ffm_is_avail_data(s, FRAME_HEADER_SIZE))
return -EAGAIN; return -EAGAIN;
#if 0 #if 0
printf("pos=%08Lx spos=%Lx, write_index=%Lx size=%Lx\n", printf("pos=%08Lx spos=%Lx, write_index=%Lx size=%Lx\n",
url_ftell(&s->pb), s->pb.pos, ffm->write_index, ffm->file_size); url_ftell(&s->pb), s->pb.pos, ffm->write_index, ffm->file_size);
#endif #endif
if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) != FRAME_HEADER_SIZE) if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) != FRAME_HEADER_SIZE)
return -EAGAIN; return -EAGAIN;
#if 0 #if 0
{ {
int i; int i;
...@@ -440,7 +450,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -440,7 +450,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->stream_index = ffm->header[0]; pkt->stream_index = ffm->header[0];
if (ffm->header[1] & FLAG_KEY_FRAME) if (ffm->header[1] & FLAG_KEY_FRAME)
pkt->flags |= PKT_FLAG_KEY; pkt->flags |= PKT_FLAG_KEY;
ffm->read_state = READ_HEADER; ffm->read_state = READ_HEADER;
if (ffm_read_data(s, pkt->data, size, 0) != size) { if (ffm_read_data(s, pkt->data, size, 0) != size) {
/* bad case: desynchronized packet. we cancel all the packet loading */ /* bad case: desynchronized packet. we cancel all the packet loading */
...@@ -476,7 +486,7 @@ static INT64 get_pts(AVFormatContext *s, offset_t pos) ...@@ -476,7 +486,7 @@ static INT64 get_pts(AVFormatContext *s, offset_t pos)
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
INT64 pts; INT64 pts;
ffm_seek1(s, pos); ffm_seek1(s, pos);
url_fskip(pb, 4); url_fskip(pb, 4);
pts = get_be64(pb); pts = get_be64(pb);
#ifdef DEBUG_SEEK #ifdef DEBUG_SEEK
...@@ -506,7 +516,7 @@ static int ffm_seek(AVFormatContext *s, INT64 wanted_pts) ...@@ -506,7 +516,7 @@ static int ffm_seek(AVFormatContext *s, INT64 wanted_pts)
pts_min = get_pts(s, pos_min); pts_min = get_pts(s, pos_min);
pts_max = get_pts(s, pos_max); pts_max = get_pts(s, pos_max);
/* linear interpolation */ /* linear interpolation */
pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) / pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) /
(double)(pts_max - pts_min); (double)(pts_max - pts_min);
pos = (((INT64)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE; pos = (((INT64)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE;
if (pos <= pos_min) if (pos <= pos_min)
...@@ -540,7 +550,7 @@ offset_t ffm_read_write_index(int fd) ...@@ -540,7 +550,7 @@ offset_t ffm_read_write_index(int fd)
lseek(fd, 8, SEEK_SET); lseek(fd, 8, SEEK_SET);
read(fd, buf, 8); read(fd, buf, 8);
pos = 0; pos = 0;
for(i=0;i<8;i++) for(i=0;i<8;i++)
pos |= buf[i] << (56 - i * 8); pos |= buf[i] << (56 - i * 8);
return pos; return pos;
} }
...@@ -550,7 +560,7 @@ void ffm_write_write_index(int fd, offset_t pos) ...@@ -550,7 +560,7 @@ void ffm_write_write_index(int fd, offset_t pos)
UINT8 buf[8]; UINT8 buf[8];
int i; int i;
for(i=0;i<8;i++) for(i=0;i<8;i++)
buf[i] = (pos >> (56 - i * 8)) & 0xff; buf[i] = (pos >> (56 - i * 8)) & 0xff;
lseek(fd, 8, SEEK_SET); lseek(fd, 8, SEEK_SET);
write(fd, buf, 8); write(fd, buf, 8);
......
...@@ -53,6 +53,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -53,6 +53,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
int width, height; int width, height;
int video_fd, frame_size; int video_fd, frame_size;
int ret, frame_rate; int ret, frame_rate;
int desired_palette;
if (!ap || ap->width <= 0 || ap->height <= 0 || ap->frame_rate <= 0) if (!ap || ap->width <= 0 || ap->height <= 0 || ap->frame_rate <= 0)
return -1; return -1;
...@@ -92,6 +93,15 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -92,6 +93,15 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
fprintf(stderr, "Fatal: grab device does not handle capture\n"); fprintf(stderr, "Fatal: grab device does not handle capture\n");
goto fail; goto fail;
} }
desired_palette = -1;
if (st->codec.pix_fmt == PIX_FMT_YUV420P) {
desired_palette = VIDEO_PALETTE_YUV420P;
} else if (st->codec.pix_fmt == PIX_FMT_YUV422) {
desired_palette = VIDEO_PALETTE_YUV422;
} else if (st->codec.pix_fmt == PIX_FMT_BGR24) {
desired_palette = VIDEO_PALETTE_RGB24;
}
/* unmute audio */ /* unmute audio */
ioctl(video_fd, VIDIOCGAUDIO, &audio); ioctl(video_fd, VIDIOCGAUDIO, &audio);
...@@ -125,16 +135,19 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -125,16 +135,19 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
pict.whiteness); pict.whiteness);
#endif #endif
/* try to choose a suitable video format */ /* try to choose a suitable video format */
pict.palette=VIDEO_PALETTE_YUV420P; pict.palette = desired_palette;
ret = ioctl(video_fd, VIDIOCSPICT, &pict); if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
if (ret < 0) { pict.palette=VIDEO_PALETTE_YUV420P;
pict.palette=VIDEO_PALETTE_YUV422;
ret = ioctl(video_fd, VIDIOCSPICT, &pict); ret = ioctl(video_fd, VIDIOCSPICT, &pict);
if (ret < 0) { if (ret < 0) {
pict.palette=VIDEO_PALETTE_RGB24; pict.palette=VIDEO_PALETTE_YUV422;
ret = ioctl(video_fd, VIDIOCSPICT, &pict); ret = ioctl(video_fd, VIDIOCSPICT, &pict);
if (ret < 0) if (ret < 0) {
goto fail1; pict.palette=VIDEO_PALETTE_RGB24;
ret = ioctl(video_fd, VIDIOCSPICT, &pict);
if (ret < 0)
goto fail1;
}
} }
} }
...@@ -155,22 +168,26 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -155,22 +168,26 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->time_frame = gettime(); s->time_frame = gettime();
/* start to grab the first frame */ /* start to grab the first frame */
gb_buf.frame = (gb_frame + 1) % gb_buffers.frames; gb_buf.frame = gb_frame % gb_buffers.frames;
gb_buf.height = height; gb_buf.height = height;
gb_buf.width = width; gb_buf.width = width;
gb_buf.format = VIDEO_PALETTE_YUV420P; gb_buf.format = desired_palette;
ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf); if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf)) < 0) {
if (ret < 0 && errno != EAGAIN) { gb_buf.format = VIDEO_PALETTE_YUV420P;
/* try YUV422 */
gb_buf.format = VIDEO_PALETTE_YUV422;
ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf); ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf);
if (ret < 0 && errno != EAGAIN) { if (ret < 0 && errno != EAGAIN) {
/* try RGB24 */ /* try YUV422 */
gb_buf.format = VIDEO_PALETTE_RGB24; gb_buf.format = VIDEO_PALETTE_YUV422;
ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf);
} ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf);
if (ret < 0 && errno != EAGAIN) {
/* try RGB24 */
gb_buf.format = VIDEO_PALETTE_RGB24;
ret = ioctl(video_fd, VIDIOCMCAPTURE, &gb_buf);
}
}
} }
if (ret < 0) { if (ret < 0) {
if (errno != EAGAIN) { if (errno != EAGAIN) {
...@@ -221,8 +238,11 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -221,8 +238,11 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
static int v4l_mm_read_picture(VideoData *s, UINT8 *buf) static int v4l_mm_read_picture(VideoData *s, UINT8 *buf)
{ {
UINT8 *ptr; UINT8 *ptr;
struct timeval tv_s, tv_e;
int delay;
gb_buf.frame = gb_frame; /* Setup to capture the next frame */
gb_buf.frame = (gb_frame + 1) % gb_buffers.frames;
if (ioctl(s->fd, VIDIOCMCAPTURE, &gb_buf) < 0) { if (ioctl(s->fd, VIDIOCMCAPTURE, &gb_buf) < 0) {
if (errno == EAGAIN) if (errno == EAGAIN)
fprintf(stderr,"Cannot Sync\n"); fprintf(stderr,"Cannot Sync\n");
...@@ -230,13 +250,26 @@ static int v4l_mm_read_picture(VideoData *s, UINT8 *buf) ...@@ -230,13 +250,26 @@ static int v4l_mm_read_picture(VideoData *s, UINT8 *buf)
perror("VIDIOCMCAPTURE"); perror("VIDIOCMCAPTURE");
return -EIO; return -EIO;
} }
gb_frame = (gb_frame + 1) % gb_buffers.frames;
gettimeofday(&tv_s, 0);
while (ioctl(s->fd, VIDIOCSYNC, &gb_frame) < 0 && while (ioctl(s->fd, VIDIOCSYNC, &gb_frame) < 0 &&
(errno == EAGAIN || errno == EINTR)); (errno == EAGAIN || errno == EINTR));
/*
gettimeofday(&tv_e, 0);
delay = (tv_e.tv_sec - tv_s.tv_sec) * 1000000 + tv_e.tv_usec - tv_s.tv_usec;
if (delay > 10000)
printf("VIDIOCSYNC took %d us\n", delay);
*/
ptr = video_buf + gb_buffers.offsets[gb_frame]; ptr = video_buf + gb_buffers.offsets[gb_frame];
memcpy(buf, ptr, s->frame_size); memcpy(buf, ptr, s->frame_size);
/* This is now the grabbing frame */
gb_frame = gb_buf.frame;
return s->frame_size; return s->frame_size;
} }
...@@ -245,14 +278,25 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt) ...@@ -245,14 +278,25 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
VideoData *s = s1->priv_data; VideoData *s = s1->priv_data;
INT64 curtime, delay; INT64 curtime, delay;
struct timespec ts; struct timespec ts;
int first;
INT64 per_frame = (INT64_C(1000000) * FRAME_RATE_BASE) / s->frame_rate;
int dropped = 0;
/* Calculate the time of the next frame */
s->time_frame += per_frame;
/* wait based on the frame rate */ /* wait based on the frame rate */
s->time_frame += (INT64_C(1000000) * FRAME_RATE_BASE) / s->frame_rate; for(first = 1;; first = 0) {
for(;;) {
curtime = gettime(); curtime = gettime();
delay = s->time_frame - curtime; delay = s->time_frame - curtime;
if (delay <= 0) if (delay <= 0) {
if (delay < -per_frame) {
/* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
dropped = 1;
s->time_frame += per_frame;
}
break; break;
}
ts.tv_sec = delay / 1000000; ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000; ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL); nanosleep(&ts, NULL);
...@@ -261,6 +305,9 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt) ...@@ -261,6 +305,9 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
if (av_new_packet(pkt, s->frame_size) < 0) if (av_new_packet(pkt, s->frame_size) < 0)
return -EIO; return -EIO;
if (dropped)
pkt->flags |= PKT_FLAG_DROPPED_FRAME;
/* read one frame */ /* read one frame */
if (s->use_mmap) { if (s->use_mmap) {
return v4l_mm_read_picture(s, pkt->data); return v4l_mm_read_picture(s, pkt->data);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment