Commit 1e491e29 authored by Michael Niedermayer's avatar Michael Niedermayer

cleanup

 adding AVVideoFrame
 moving quality, pict_type, key_frame, qscale_table, ... to AVVideoFrame
 removing obsolete variables in AVCodecContext
 skiping of MBs in b frames
 correctly initalizing AVCodecContext
 picture buffer cleanup

Originally committed as revision 1302 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 855ea723
...@@ -285,6 +285,7 @@ int read_ffserver_streams(AVFormatContext *s, const char *filename) ...@@ -285,6 +285,7 @@ int read_ffserver_streams(AVFormatContext *s, const char *filename)
s->nb_streams = ic->nb_streams; s->nb_streams = ic->nb_streams;
for(i=0;i<ic->nb_streams;i++) { for(i=0;i<ic->nb_streams;i++) {
AVStream *st; AVStream *st;
st = av_mallocz(sizeof(AVFormatContext)); st = av_mallocz(sizeof(AVFormatContext));
memcpy(st, ic->streams[i], sizeof(AVStream)); memcpy(st, ic->streams[i], sizeof(AVStream));
s->streams[i] = st; s->streams[i] = st;
...@@ -605,15 +606,21 @@ static void do_video_out(AVFormatContext *s, ...@@ -605,15 +606,21 @@ static void do_video_out(AVFormatContext *s,
/* XXX: pb because no interleaving */ /* XXX: pb because no interleaving */
for(i=0;i<nb_frames;i++) { for(i=0;i<nb_frames;i++) {
if (enc->codec_id != CODEC_ID_RAWVIDEO) { if (enc->codec_id != CODEC_ID_RAWVIDEO) {
AVVideoFrame big_picture;
memset(&big_picture, 0, sizeof(AVVideoFrame));
*(AVPicture*)&big_picture= *picture;
/* handles sameq here. This is not correct because it may /* handles sameq here. This is not correct because it may
not be a global option */ not be a global option */
if (same_quality) { if (same_quality) {
enc->quality = dec->quality; big_picture.quality = ist->st->quality;
} }else
big_picture.quality = ost->st->quality;
ret = avcodec_encode_video(enc, ret = avcodec_encode_video(enc,
video_buffer, VIDEO_BUFFER_SIZE, video_buffer, VIDEO_BUFFER_SIZE,
picture); &big_picture);
//enc->frame_number = enc->real_pict_num; //enc->frame_number = enc->real_pict_num;
av_write_frame(s, ost->index, video_buffer, ret); av_write_frame(s, ost->index, video_buffer, ret);
*frame_size = ret; *frame_size = ret;
...@@ -674,7 +681,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, ...@@ -674,7 +681,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
total_size += frame_size; total_size += frame_size;
if (enc->codec_type == CODEC_TYPE_VIDEO) { if (enc->codec_type == CODEC_TYPE_VIDEO) {
frame_number = ost->frame_number; frame_number = ost->frame_number;
fprintf(fvstats, "frame= %5d q= %2d ", frame_number, enc->quality); fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_picture->quality);
if (do_psnr) if (do_psnr)
fprintf(fvstats, "PSNR= %6.2f ", enc->psnr_y); fprintf(fvstats, "PSNR= %6.2f ", enc->psnr_y);
...@@ -688,7 +695,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, ...@@ -688,7 +695,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0; avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0;
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
(double)total_size / 1024, ti1, bitrate, avg_bitrate); (double)total_size / 1024, ti1, bitrate, avg_bitrate);
fprintf(fvstats,"type= %s\n", enc->key_frame == 1 ? "I" : "P"); fprintf(fvstats,"type= %s\n", enc->coded_picture->key_frame == 1 ? "I" : "P");
} }
} }
...@@ -731,13 +738,13 @@ void print_report(AVFormatContext **output_files, ...@@ -731,13 +738,13 @@ void print_report(AVFormatContext **output_files,
os = output_files[ost->file_index]; os = output_files[ost->file_index];
enc = &ost->st->codec; enc = &ost->st->codec;
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) { if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
sprintf(buf + strlen(buf), "q=%2d ", sprintf(buf + strlen(buf), "q=%2.1f ",
enc->quality); enc->coded_picture->quality);
} }
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) { if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
frame_number = ost->frame_number; frame_number = ost->frame_number;
sprintf(buf + strlen(buf), "frame=%5d q=%2d ", sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
frame_number, enc->quality); frame_number, enc->coded_picture ? enc->coded_picture->quality : 0);
if (do_psnr) if (do_psnr)
sprintf(buf + strlen(buf), "PSNR=%6.2f ", enc->psnr_y); sprintf(buf + strlen(buf), "PSNR=%6.2f ", enc->psnr_y);
vid = 1; vid = 1;
...@@ -1236,9 +1243,13 @@ static int av_encode(AVFormatContext **output_files, ...@@ -1236,9 +1243,13 @@ static int av_encode(AVFormatContext **output_files,
ist->st->codec.height); ist->st->codec.height);
ret = len; ret = len;
} else { } else {
AVVideoFrame big_picture;
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2; data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
ret = avcodec_decode_video(&ist->st->codec, ret = avcodec_decode_video(&ist->st->codec,
&picture, &got_picture, ptr, len); &big_picture, &got_picture, ptr, len);
picture= *(AVPicture*)&big_picture;
ist->st->quality= big_picture.quality;
if (ret < 0) { if (ret < 0) {
fail_decode: fail_decode:
fprintf(stderr, "Error while decoding stream #%d.%d\n", fprintf(stderr, "Error while decoding stream #%d.%d\n",
...@@ -2046,6 +2057,7 @@ void opt_output_file(const char *filename) ...@@ -2046,6 +2057,7 @@ void opt_output_file(const char *filename)
fprintf(stderr, "Could not alloc stream\n"); fprintf(stderr, "Could not alloc stream\n");
exit(1); exit(1);
} }
avcodec_get_context_defaults(&st->codec);
video_enc = &st->codec; video_enc = &st->codec;
if (video_stream_copy) { if (video_stream_copy) {
...@@ -2074,7 +2086,7 @@ void opt_output_file(const char *filename) ...@@ -2074,7 +2086,7 @@ void opt_output_file(const char *filename)
video_enc->gop_size = 0; video_enc->gop_size = 0;
if (video_qscale || same_quality) { if (video_qscale || same_quality) {
video_enc->flags |= CODEC_FLAG_QSCALE; video_enc->flags |= CODEC_FLAG_QSCALE;
video_enc->quality = video_qscale; st->quality = video_qscale;
} }
if (use_hq) { if (use_hq) {
...@@ -2181,6 +2193,7 @@ void opt_output_file(const char *filename) ...@@ -2181,6 +2193,7 @@ void opt_output_file(const char *filename)
fprintf(stderr, "Could not alloc stream\n"); fprintf(stderr, "Could not alloc stream\n");
exit(1); exit(1);
} }
avcodec_get_context_defaults(&st->codec);
audio_enc = &st->codec; audio_enc = &st->codec;
audio_enc->codec_type = CODEC_TYPE_AUDIO; audio_enc->codec_type = CODEC_TYPE_AUDIO;
......
...@@ -1955,7 +1955,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt) ...@@ -1955,7 +1955,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
/* we use the codec indication because it is /* we use the codec indication because it is
more accurate than the demux flags */ more accurate than the demux flags */
pkt->flags = 0; pkt->flags = 0;
if (st->codec.key_frame) if (st->codec.coded_picture->key_frame)
pkt->flags |= PKT_FLAG_KEY; pkt->flags |= PKT_FLAG_KEY;
return 0; return 0;
} }
...@@ -3942,7 +3942,7 @@ int parse_ffconfig(const char *filename) ...@@ -3942,7 +3942,7 @@ int parse_ffconfig(const char *filename)
} else if (!strcasecmp(cmd, "AudioQuality")) { } else if (!strcasecmp(cmd, "AudioQuality")) {
get_arg(arg, sizeof(arg), &p); get_arg(arg, sizeof(arg), &p);
if (stream) { if (stream) {
audio_enc.quality = atof(arg) * 1000; // audio_enc.quality = atof(arg) * 1000;
} }
} else if (!strcasecmp(cmd, "VideoBitRateRange")) { } else if (!strcasecmp(cmd, "VideoBitRateRange")) {
if (stream) { if (stream) {
......
This diff is collapsed.
...@@ -33,6 +33,7 @@ typedef struct DVVideoDecodeContext { ...@@ -33,6 +33,7 @@ typedef struct DVVideoDecodeContext {
int sampling_411; /* 0 = 420, 1 = 411 */ int sampling_411; /* 0 = 420, 1 = 411 */
int width, height; int width, height;
UINT8 *current_picture[3]; /* picture structure */ UINT8 *current_picture[3]; /* picture structure */
AVVideoFrame picture;
int linesize[3]; int linesize[3];
DCTELEM block[5*6][64] __align8; DCTELEM block[5*6][64] __align8;
UINT8 dv_zigzag[2][64]; UINT8 dv_zigzag[2][64];
...@@ -128,7 +129,7 @@ static int dvvideo_decode_init(AVCodecContext *avctx) ...@@ -128,7 +129,7 @@ static int dvvideo_decode_init(AVCodecContext *avctx)
/* XXX: do it only for constant case */ /* XXX: do it only for constant case */
dv_build_unquantize_tables(s); dv_build_unquantize_tables(s);
return 0; return 0;
} }
...@@ -499,7 +500,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, ...@@ -499,7 +500,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
unsigned size; unsigned size;
UINT8 *buf_ptr; UINT8 *buf_ptr;
const UINT16 *mb_pos_ptr; const UINT16 *mb_pos_ptr;
AVPicture *picture;
/* parse id */ /* parse id */
init_get_bits(&s->gb, buf, buf_size); init_get_bits(&s->gb, buf, buf_size);
...@@ -561,45 +561,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, ...@@ -561,45 +561,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
avctx->width = width; avctx->width = width;
avctx->height = height; avctx->height = height;
if (avctx->flags & CODEC_FLAG_DR1) s->picture.reference= 0;
{ if(avctx->get_buffer(avctx, &s->picture) < 0) {
s->width = -1; fprintf(stderr, "get_buffer() failed\n");
avctx->dr_buffer[0] = avctx->dr_buffer[1] = avctx->dr_buffer[2] = 0; return -1;
if(avctx->get_buffer_callback(avctx, width, height, I_TYPE) < 0
&& avctx->flags & CODEC_FLAG_DR1) {
fprintf(stderr, "get_buffer() failed\n");
return -1;
}
} }
/* (re)alloc picture if needed */ for(i=0;i<3;i++) {
if (s->width != width || s->height != height) { s->current_picture[i] = s->picture.data[i];
if (!(avctx->flags & CODEC_FLAG_DR1)) s->linesize[i] = s->picture.linesize[i];
for(i=0;i<3;i++) { if (!s->current_picture[i])
if (avctx->dr_buffer[i] != s->current_picture[i]) return -1;
av_freep(&s->current_picture[i]);
avctx->dr_buffer[i] = 0;
}
for(i=0;i<3;i++) {
if (avctx->dr_buffer[i]) {
s->current_picture[i] = avctx->dr_buffer[i];
s->linesize[i] = (i == 0) ? avctx->dr_stride : avctx->dr_uvstride;
} else {
size = width * height;
s->linesize[i] = width;
if (i >= 1) {
size >>= 2;
s->linesize[i] >>= s->sampling_411 ? 2 : 1;
}
s->current_picture[i] = av_malloc(size);
}
if (!s->current_picture[i])
return -1;
}
s->width = width;
s->height = height;
} }
s->width = width;
s->height = height;
/* for each DIF segment */ /* for each DIF segment */
buf_ptr = buf; buf_ptr = buf;
...@@ -620,12 +595,11 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, ...@@ -620,12 +595,11 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
emms_c(); emms_c();
/* return image */ /* return image */
*data_size = sizeof(AVPicture); *data_size = sizeof(AVVideoFrame);
picture = data; *(AVVideoFrame*)data= s->picture;
for(i=0;i<3;i++) {
picture->data[i] = s->current_picture[i]; avctx->release_buffer(avctx, &s->picture);
picture->linesize[i] = s->linesize[i];
}
return packet_size; return packet_size;
} }
...@@ -633,10 +607,15 @@ static int dvvideo_decode_end(AVCodecContext *avctx) ...@@ -633,10 +607,15 @@ static int dvvideo_decode_end(AVCodecContext *avctx)
{ {
DVVideoDecodeContext *s = avctx->priv_data; DVVideoDecodeContext *s = avctx->priv_data;
int i; int i;
if(avctx->get_buffer == avcodec_default_get_buffer){
for(i=0; i<4; i++){
av_freep(&s->picture.base[i]);
s->picture.data[i]= NULL;
}
av_freep(&s->picture.opaque);
}
for(i=0;i<3;i++)
if (avctx->dr_buffer[i] != s->current_picture[i])
av_freep(&s->current_picture[i]);
return 0; return 0;
} }
......
...@@ -464,7 +464,7 @@ int score_sum=0; ...@@ -464,7 +464,7 @@ int score_sum=0;
s->mb_y= mb_y; s->mb_y= mb_y;
for(j=0; j<pred_count; j++){ for(j=0; j<pred_count; j++){
int score=0; int score=0;
UINT8 *src= s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; UINT8 *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0]; s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1]; s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
...@@ -556,8 +556,8 @@ static int is_intra_more_likely(MpegEncContext *s){ ...@@ -556,8 +556,8 @@ static int is_intra_more_likely(MpegEncContext *s){
if((j%skip_amount) != 0) continue; //skip a few to speed things up if((j%skip_amount) != 0) continue; //skip a few to speed things up
if(s->pict_type==I_TYPE){ if(s->pict_type==I_TYPE){
UINT8 *mb_ptr = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; UINT8 *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
UINT8 *last_mb_ptr= s->last_picture [0] + mb_x*16 + mb_y*16*s->linesize; UINT8 *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize); is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize);
is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize); is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize);
...@@ -802,9 +802,9 @@ void ff_error_resilience(MpegEncContext *s){ ...@@ -802,9 +802,9 @@ void ff_error_resilience(MpegEncContext *s){
if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good? // if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
dest_y = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
dest_cb= s->current_picture[1] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_cr= s->current_picture[2] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
dc_ptr= &s->dc_val[0][mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2)]; dc_ptr= &s->dc_val[0][mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2)];
for(n=0; n<4; n++){ for(n=0; n<4; n++){
...@@ -852,9 +852,9 @@ void ff_error_resilience(MpegEncContext *s){ ...@@ -852,9 +852,9 @@ void ff_error_resilience(MpegEncContext *s){
if(!(s->mb_type[i]&MB_TYPE_INTRA)) continue; //inter if(!(s->mb_type[i]&MB_TYPE_INTRA)) continue; //inter
if(!(error&AC_ERROR)) continue; //undamaged if(!(error&AC_ERROR)) continue; //undamaged
dest_y = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
dest_cb= s->current_picture[1] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_cr= s->current_picture[2] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
} }
...@@ -863,14 +863,14 @@ void ff_error_resilience(MpegEncContext *s){ ...@@ -863,14 +863,14 @@ void ff_error_resilience(MpegEncContext *s){
if(s->avctx->error_concealment&FF_EC_DEBLOCK){ if(s->avctx->error_concealment&FF_EC_DEBLOCK){
/* filter horizontal block boundaries */ /* filter horizontal block boundaries */
h_block_filter(s, s->current_picture[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
h_block_filter(s, s->current_picture[1], s->mb_width , s->mb_height , s->uvlinesize, 0); h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
h_block_filter(s, s->current_picture[2], s->mb_width , s->mb_height , s->uvlinesize, 0); h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
/* filter vertical block boundaries */ /* filter vertical block boundaries */
v_block_filter(s, s->current_picture[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
v_block_filter(s, s->current_picture[1], s->mb_width , s->mb_height , s->uvlinesize, 0); v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
v_block_filter(s, s->current_picture[2], s->mb_width , s->mb_height , s->uvlinesize, 0); v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
} }
/* clean a few tables */ /* clean a few tables */
......
This diff is collapsed.
...@@ -199,6 +199,7 @@ static int decode_slice(MpegEncContext *s){ ...@@ -199,6 +199,7 @@ static int decode_slice(MpegEncContext *s){
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
// s->mb_skiped = 0;
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); //printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
ret= s->decode_mb(s, s->block); ret= s->decode_mb(s, s->block);
...@@ -347,7 +348,7 @@ static int h263_decode_frame(AVCodecContext *avctx, ...@@ -347,7 +348,7 @@ static int h263_decode_frame(AVCodecContext *avctx,
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
int ret,i; int ret,i;
AVPicture *pict = data; AVVideoFrame *pict = data;
float new_aspect; float new_aspect;
#ifdef PRINT_FRAME_TIME #ifdef PRINT_FRAME_TIME
...@@ -357,7 +358,6 @@ uint64_t time= rdtsc(); ...@@ -357,7 +358,6 @@ uint64_t time= rdtsc();
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
#endif #endif
s->flags= avctx->flags; s->flags= avctx->flags;
*data_size = 0; *data_size = 0;
...@@ -523,8 +523,9 @@ retry: ...@@ -523,8 +523,9 @@ retry:
return -1; return -1;
} }
s->avctx->key_frame = (s->pict_type == I_TYPE); // for hurry_up==5
s->avctx->pict_type = s->pict_type; s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == I_TYPE;
/* skip b frames if we dont have reference frames */ /* skip b frames if we dont have reference frames */
if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size);
...@@ -580,7 +581,9 @@ retry: ...@@ -580,7 +581,9 @@ retry:
} }
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE) if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE)
if(msmpeg4_decode_ext_header(s, buf_size) < 0) return -1; if(msmpeg4_decode_ext_header(s, buf_size) < 0){
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
}
/* divx 5.01+ bistream reorder stuff */ /* divx 5.01+ bistream reorder stuff */
if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_version>=500){ if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_version>=500){
...@@ -644,7 +647,7 @@ retry: ...@@ -644,7 +647,7 @@ retry:
int y= mb_y*16 + 8; int y= mb_y*16 + 8;
for(mb_x=0; mb_x<s->mb_width; mb_x++){ for(mb_x=0; mb_x<s->mb_width; mb_x++){
int x= mb_x*16 + 8; int x= mb_x*16 + 8;
uint8_t *ptr= s->last_picture[0]; uint8_t *ptr= s->last_picture.data[0];
int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
int mx= (s->motion_val[xy][0]>>1) + x; int mx= (s->motion_val[xy][0]>>1) + x;
int my= (s->motion_val[xy][1]>>1) + y; int my= (s->motion_val[xy][1]>>1) + y;
...@@ -669,21 +672,12 @@ retry: ...@@ -669,21 +672,12 @@ retry:
} }
} }
#endif #endif
if(s->pict_type==B_TYPE || (!s->has_b_frames)){ if(s->pict_type==B_TYPE || (!s->has_b_frames)){
pict->data[0] = s->current_picture[0]; *pict= *(AVVideoFrame*)&s->current_picture;
pict->data[1] = s->current_picture[1];
pict->data[2] = s->current_picture[2];
} else { } else {
pict->data[0] = s->last_picture[0]; *pict= *(AVVideoFrame*)&s->last_picture;
pict->data[1] = s->last_picture[1];
pict->data[2] = s->last_picture[2];
} }
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
avctx->quality = s->qscale;
/* Return the Picture timestamp as the frame number */ /* Return the Picture timestamp as the frame number */
/* we substract 1 because it is added on utils.c */ /* we substract 1 because it is added on utils.c */
...@@ -692,7 +686,7 @@ retry: ...@@ -692,7 +686,7 @@ retry:
/* dont output the last pic after seeking /* dont output the last pic after seeking
note we allready added +1 for the current pix in MPV_frame_end(s) */ note we allready added +1 for the current pix in MPV_frame_end(s) */
if(s->num_available_buffers>=2 || (!s->has_b_frames)) if(s->num_available_buffers>=2 || (!s->has_b_frames))
*data_size = sizeof(AVPicture); *data_size = sizeof(AVVideoFrame);
#ifdef PRINT_FRAME_TIME #ifdef PRINT_FRAME_TIME
printf("%Ld\n", rdtsc()-time); printf("%Ld\n", rdtsc()-time);
#endif #endif
......
This diff is collapsed.
...@@ -1180,9 +1180,11 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) ...@@ -1180,9 +1180,11 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
get_bits(&s->gb, 8), get_bits(&s->gb, 8)); get_bits(&s->gb, 8), get_bits(&s->gb, 8));
if (get_bits(&s->gb, 8) == 0) if (get_bits(&s->gb, 8) == 0)
{ {
s->avctx->aspect_ratio_info = FF_ASPECT_EXTENDED; int x_density = get_bits(&s->gb, 16);
s->avctx->aspected_width = get_bits(&s->gb, 16); int y_density = get_bits(&s->gb, 16);
s->avctx->aspected_height = get_bits(&s->gb, 16);
//MN: needs to be checked
s->avctx->aspect_ratio= s->width*y_density/((float)s->height*x_density);
} }
else else
{ {
...@@ -1468,7 +1470,7 @@ eoi_parser: ...@@ -1468,7 +1470,7 @@ eoi_parser:
} }
/* dummy quality */ /* dummy quality */
/* XXX: infer it with matrix */ /* XXX: infer it with matrix */
avctx->quality = 3; // avctx->quality = 3;
goto the_end; goto the_end;
} }
break; break;
...@@ -1635,7 +1637,7 @@ read_header: ...@@ -1635,7 +1637,7 @@ read_header:
} }
/* dummy quality */ /* dummy quality */
/* XXX: infer it with matrix */ /* XXX: infer it with matrix */
avctx->quality = 3; // avctx->quality = 3;
return buf_ptr - buf; return buf_ptr - buf;
} }
......
...@@ -92,7 +92,7 @@ static int full_motion_search(MpegEncContext * s, ...@@ -92,7 +92,7 @@ static int full_motion_search(MpegEncContext * s,
y2 = yy + range - 1; y2 = yy + range - 1;
if (y2 > ymax) if (y2 > ymax)
y2 = ymax; y2 = ymax;
pix = s->new_picture[0] + (yy * s->linesize) + xx; pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
dmin = 0x7fffffff; dmin = 0x7fffffff;
mx = 0; mx = 0;
my = 0; my = 0;
...@@ -155,7 +155,7 @@ static int log_motion_search(MpegEncContext * s, ...@@ -155,7 +155,7 @@ static int log_motion_search(MpegEncContext * s,
if (y2 > ymax) if (y2 > ymax)
y2 = ymax; y2 = ymax;
pix = s->new_picture[0] + (yy * s->linesize) + xx; pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
dmin = 0x7fffffff; dmin = 0x7fffffff;
mx = 0; mx = 0;
my = 0; my = 0;
...@@ -231,7 +231,7 @@ static int phods_motion_search(MpegEncContext * s, ...@@ -231,7 +231,7 @@ static int phods_motion_search(MpegEncContext * s,
if (y2 > ymax) if (y2 > ymax)
y2 = ymax; y2 = ymax;
pix = s->new_picture[0] + (yy * s->linesize) + xx; pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
mx = 0; mx = 0;
my = 0; my = 0;
...@@ -560,7 +560,7 @@ static int epzs_motion_search(MpegEncContext * s, ...@@ -560,7 +560,7 @@ static int epzs_motion_search(MpegEncContext * s,
uint16_t *score_map= s->me_score_map; uint16_t *score_map= s->me_score_map;
int map_generation; int map_generation;
new_pic = s->new_picture[0] + pic_xy; new_pic = s->new_picture.data[0] + pic_xy;
old_pic = ref_picture + pic_xy; old_pic = ref_picture + pic_xy;
map_generation= update_map_generation(s); map_generation= update_map_generation(s);
...@@ -649,7 +649,7 @@ static int epzs_motion_search4(MpegEncContext * s, int block, ...@@ -649,7 +649,7 @@ static int epzs_motion_search4(MpegEncContext * s, int block,
uint16_t *score_map= s->me_score_map; uint16_t *score_map= s->me_score_map;
int map_generation; int map_generation;
new_pic = s->new_picture[0] + pic_xy; new_pic = s->new_picture.data[0] + pic_xy;
old_pic = ref_picture + pic_xy; old_pic = ref_picture + pic_xy;
map_generation= update_map_generation(s); map_generation= update_map_generation(s);
...@@ -723,7 +723,7 @@ static inline int halfpel_motion_search(MpegEncContext * s, ...@@ -723,7 +723,7 @@ static inline int halfpel_motion_search(MpegEncContext * s,
xx = 16 * s->mb_x + 8*(n&1); xx = 16 * s->mb_x + 8*(n&1);
yy = 16 * s->mb_y + 8*(n>>1); yy = 16 * s->mb_y + 8*(n>>1);
pix = s->new_picture[0] + (yy * s->linesize) + xx; pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
mx = *mx_ptr; mx = *mx_ptr;
my = *my_ptr; my = *my_ptr;
...@@ -789,7 +789,7 @@ static inline int fast_halfpel_motion_search(MpegEncContext * s, ...@@ -789,7 +789,7 @@ static inline int fast_halfpel_motion_search(MpegEncContext * s,
xx = 16 * s->mb_x + 8*(n&1); xx = 16 * s->mb_x + 8*(n&1);
yy = 16 * s->mb_y + 8*(n>>1); yy = 16 * s->mb_y + 8*(n>>1);
pix = s->new_picture[0] + (yy * s->linesize) + xx; pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
mx = *mx_ptr; mx = *mx_ptr;
my = *my_ptr; my = *my_ptr;
...@@ -931,7 +931,7 @@ static inline int mv4_search(MpegEncContext *s, int xmin, int ymin, int xmax, in ...@@ -931,7 +931,7 @@ static inline int mv4_search(MpegEncContext *s, int xmin, int ymin, int xmax, in
{ {
int block; int block;
int P[10][2]; int P[10][2];
uint8_t *ref_picture= s->last_picture[0]; uint8_t *ref_picture= s->last_picture.data[0];
int dmin_sum=0; int dmin_sum=0;
for(block=0; block<4; block++){ for(block=0; block<4; block++){
...@@ -1019,7 +1019,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1019,7 +1019,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
int P[10][2]; int P[10][2];
const int shift= 1+s->quarter_sample; const int shift= 1+s->quarter_sample;
int mb_type=0; int mb_type=0;
uint8_t *ref_picture= s->last_picture[0]; uint8_t *ref_picture= s->last_picture.data[0];
Picture * const pic= &s->current_picture;
get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code); get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code);
rel_xmin= xmin - mb_x*16; rel_xmin= xmin - mb_x*16;
...@@ -1104,7 +1105,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1104,7 +1105,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
xx = mb_x * 16; xx = mb_x * 16;
yy = mb_y * 16; yy = mb_y * 16;
pix = s->new_picture[0] + (yy * s->linesize) + xx; pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
/* At this point (mx,my) are full-pell and the relative displacement */ /* At this point (mx,my) are full-pell and the relative displacement */
ppix = ref_picture + ((yy+my) * s->linesize) + (xx+mx); ppix = ref_picture + ((yy+my) * s->linesize) + (xx+mx);
...@@ -1115,11 +1116,11 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1115,11 +1116,11 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
vard = (s->dsp.pix_norm(pix, ppix, s->linesize)+128)>>8; vard = (s->dsp.pix_norm(pix, ppix, s->linesize)+128)>>8;
//printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); //printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
s->mb_var [s->mb_width * mb_y + mb_x] = varc; pic->mb_var [s->mb_width * mb_y + mb_x] = varc;
s->mc_mb_var[s->mb_width * mb_y + mb_x] = vard; pic->mc_mb_var[s->mb_width * mb_y + mb_x] = vard;
s->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8; pic->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8;
s->mb_var_sum += varc; pic->mb_var_sum += varc;
s->mc_mb_var_sum += vard; pic->mc_mb_var_sum += vard;
//printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); //printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
#if 0 #if 0
...@@ -1318,7 +1319,7 @@ static inline int check_bidir_mv(MpegEncContext * s, ...@@ -1318,7 +1319,7 @@ static inline int check_bidir_mv(MpegEncContext * s,
if (src_y == s->height) if (src_y == s->height)
dxy&= 1; dxy&= 1;
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x; ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x;
s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
fbmin += (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->qscale; fbmin += (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->qscale;
...@@ -1333,10 +1334,10 @@ static inline int check_bidir_mv(MpegEncContext * s, ...@@ -1333,10 +1334,10 @@ static inline int check_bidir_mv(MpegEncContext * s,
if (src_y == s->height) if (src_y == s->height)
dxy&= 1; dxy&= 1;
ptr = s->next_picture[0] + (src_y * s->linesize) + src_x; ptr = s->next_picture.data[0] + (src_y * s->linesize) + src_x;
s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
fbmin += s->dsp.pix_abs16x16(s->new_picture[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize); fbmin += s->dsp.pix_abs16x16(s->new_picture.data[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize);
return fbmin; return fbmin;
} }
...@@ -1418,7 +1419,7 @@ static inline int direct_search(MpegEncContext * s, ...@@ -1418,7 +1419,7 @@ static inline int direct_search(MpegEncContext * s,
src_y = clip(src_y, -16, height); src_y = clip(src_y, -16, height);
if (src_y == height) dxy &= ~2; if (src_y == height) dxy &= ~2;
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x; ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x;
s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
dxy = ((motion_by & 1) << 1) | (motion_bx & 1); dxy = ((motion_by & 1) << 1) | (motion_bx & 1);
...@@ -1511,8 +1512,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, ...@@ -1511,8 +1512,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
dmin= direct_search(s, mb_x, mb_y); dmin= direct_search(s, mb_x, mb_y);
fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, s->last_picture[0], s->f_code); fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, s->last_picture.data[0], s->f_code);
bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, s->next_picture[0], s->b_code) - quant; bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, s->next_picture.data[0], s->b_code) - quant;
//printf(" %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); //printf(" %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]);
fbmin= bidir_refine(s, mb_x, mb_y); fbmin= bidir_refine(s, mb_x, mb_y);
...@@ -1534,8 +1535,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, ...@@ -1534,8 +1535,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
type= MB_TYPE_BIDIR; type= MB_TYPE_BIDIR;
} }
score= ((unsigned)(score*score + 128*256))>>16; score= ((unsigned)(score*score + 128*256))>>16;
s->mc_mb_var_sum += score; s->current_picture.mc_mb_var_sum += score;
s->mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSD s->current_picture.mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSD
} }
if(s->flags&CODEC_FLAG_HQ){ if(s->flags&CODEC_FLAG_HQ){
...@@ -1581,7 +1582,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type) ...@@ -1581,7 +1582,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
int j; int j;
for(j=0; j<fcode && j<8; j++){ for(j=0; j<fcode && j<8; j++){
if(s->pict_type==B_TYPE || s->mc_mb_var[i] < s->mb_var[i]) if(s->pict_type==B_TYPE || s->current_picture.mc_mb_var[i] < s->current_picture.mb_var[i])
score[j]-= 170; score[j]-= 170;
} }
} }
......
...@@ -134,7 +134,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) ...@@ -134,7 +134,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
int n; int n;
UINT64 time_code; UINT64 time_code;
if (s->picture_in_gop_number == 0) { if (s->current_picture.key_frame) {
/* mpeg1 header repeated every gop */ /* mpeg1 header repeated every gop */
put_header(s, SEQ_START_CODE); put_header(s, SEQ_START_CODE);
...@@ -1359,7 +1359,6 @@ static int mpeg_decode_init(AVCodecContext *avctx) ...@@ -1359,7 +1359,6 @@ static int mpeg_decode_init(AVCodecContext *avctx)
s->mpeg_enc_ctx.picture_number = 0; s->mpeg_enc_ctx.picture_number = 0;
s->repeat_field = 0; s->repeat_field = 0;
s->mpeg_enc_ctx.codec_id= avctx->codec->id; s->mpeg_enc_ctx.codec_id= avctx->codec->id;
avctx->mbskip_table= s->mpeg_enc_ctx.mbskip_table;
return 0; return 0;
} }
...@@ -1403,9 +1402,6 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, ...@@ -1403,9 +1402,6 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->pict_type = get_bits(&s->gb, 3); s->pict_type = get_bits(&s->gb, 3);
dprintf("pict_type=%d number=%d\n", s->pict_type, s->picture_number); dprintf("pict_type=%d number=%d\n", s->pict_type, s->picture_number);
avctx->pict_type= s->pict_type;
avctx->key_frame= s->pict_type == I_TYPE;
skip_bits(&s->gb, 16); skip_bits(&s->gb, 16);
if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
s->full_pel[0] = get_bits1(&s->gb); s->full_pel[0] = get_bits1(&s->gb);
...@@ -1423,6 +1419,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, ...@@ -1423,6 +1419,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][0] = f_code; s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code; s->mpeg_f_code[1][1] = f_code;
} }
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == I_TYPE;
s->y_dc_scale = 8; s->y_dc_scale = 8;
s->c_dc_scale = 8; s->c_dc_scale = 8;
s->first_slice = 1; s->first_slice = 1;
...@@ -1576,7 +1574,7 @@ static void mpeg_decode_extension(AVCodecContext *avctx, ...@@ -1576,7 +1574,7 @@ static void mpeg_decode_extension(AVCodecContext *avctx,
* DECODE_SLICE_EOP if the end of the picture is reached * DECODE_SLICE_EOP if the end of the picture is reached
*/ */
static int mpeg_decode_slice(AVCodecContext *avctx, static int mpeg_decode_slice(AVCodecContext *avctx,
AVPicture *pict, AVVideoFrame *pict,
int start_code, int start_code,
UINT8 *buf, int buf_size) UINT8 *buf, int buf_size)
{ {
...@@ -1677,38 +1675,25 @@ eos: //end of slice ...@@ -1677,38 +1675,25 @@ eos: //end of slice
if (/*s->mb_x == 0 &&*/ if (/*s->mb_x == 0 &&*/
s->mb_y == s->mb_height) { s->mb_y == s->mb_height) {
/* end of image */ /* end of image */
UINT8 **picture;
if(s->mpeg2)
s->qscale >>=1;
MPV_frame_end(s); MPV_frame_end(s);
if (s->pict_type == B_TYPE) { if (s->pict_type == B_TYPE) {
picture = s->current_picture; *pict= *(AVVideoFrame*)&s->current_picture;
avctx->quality = s->qscale;
} else { } else {
s->picture_number++;
/* latency of 1 frame for I and P frames */ /* latency of 1 frame for I and P frames */
/* XXX: use another variable than picture_number */ /* XXX: use another variable than picture_number */
if (s->picture_number == 0) { if (s->picture_number == 1) {
picture = NULL; return DECODE_SLICE_OK;
} else { } else {
picture = s->last_picture; *pict= *(AVVideoFrame*)&s->last_picture;
avctx->quality = s->last_qscale;
} }
s->last_qscale = s->qscale;
s->picture_number++;
}
if(s->mpeg2)
avctx->quality>>=1;
if (picture) {
pict->data[0] = picture[0];
pict->data[1] = picture[1];
pict->data[2] = picture[2];
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
return DECODE_SLICE_EOP;
} else {
return DECODE_SLICE_OK;
} }
return DECODE_SLICE_EOP;
} else { } else {
return DECODE_SLICE_OK; return DECODE_SLICE_OK;
} }
...@@ -1827,7 +1812,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, ...@@ -1827,7 +1812,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
Mpeg1Context *s = avctx->priv_data; Mpeg1Context *s = avctx->priv_data;
UINT8 *buf_end, *buf_ptr, *buf_start; UINT8 *buf_end, *buf_ptr, *buf_start;
int len, start_code_found, ret, code, start_code, input_size; int len, start_code_found, ret, code, start_code, input_size;
AVPicture *picture = data; AVVideoFrame *picture = data;
MpegEncContext *s2 = &s->mpeg_enc_ctx; MpegEncContext *s2 = &s->mpeg_enc_ctx;
dprintf("fill_buffer\n"); dprintf("fill_buffer\n");
...@@ -1837,13 +1822,9 @@ static int mpeg_decode_frame(AVCodecContext *avctx, ...@@ -1837,13 +1822,9 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
/* special case for last picture */ /* special case for last picture */
if (buf_size == 0) { if (buf_size == 0) {
if (s2->picture_number > 0) { if (s2->picture_number > 0) {
picture->data[0] = s2->next_picture[0]; *picture= *(AVVideoFrame*)&s2->next_picture;
picture->data[1] = s2->next_picture[1];
picture->data[2] = s2->next_picture[2]; *data_size = sizeof(AVVideoFrame);
picture->linesize[0] = s2->linesize;
picture->linesize[1] = s2->uvlinesize;
picture->linesize[2] = s2->uvlinesize;
*data_size = sizeof(AVPicture);
} }
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -28,6 +28,8 @@ enum OutputFormat { ...@@ -28,6 +28,8 @@ enum OutputFormat {
FMT_MJPEG, FMT_MJPEG,
}; };
#define EDGE_WIDTH 16
#define MPEG_BUF_SIZE (16 * 1024) #define MPEG_BUF_SIZE (16 * 1024)
#define QMAT_SHIFT_MMX 16 #define QMAT_SHIFT_MMX 16
...@@ -35,7 +37,8 @@ enum OutputFormat { ...@@ -35,7 +37,8 @@ enum OutputFormat {
#define MAX_FCODE 7 #define MAX_FCODE 7
#define MAX_MV 2048 #define MAX_MV 2048
#define REORDER_BUFFER_SIZE (FF_MAX_B_FRAMES+2)
#define MAX_PICTURE_COUNT 7
#define ME_MAP_SIZE 64 #define ME_MAP_SIZE 64
#define ME_MAP_SHIFT 3 #define ME_MAP_SHIFT 3
...@@ -90,14 +93,6 @@ typedef struct RateControlContext{ ...@@ -90,14 +93,6 @@ typedef struct RateControlContext{
int last_non_b_pict_type; int last_non_b_pict_type;
}RateControlContext; }RateControlContext;
typedef struct ReorderBuffer{
UINT8 *picture[3];
int pict_type;
int qscale;
int force_type;
int picture_number;
int picture_in_gop_number;
} ReorderBuffer;
typedef struct ScanTable{ typedef struct ScanTable{
const UINT8 *scantable; const UINT8 *scantable;
...@@ -109,6 +104,16 @@ typedef struct ScanTable{ ...@@ -109,6 +104,16 @@ typedef struct ScanTable{
#endif #endif
} ScanTable; } ScanTable;
typedef struct Picture{
FF_COMMON_PICTURE
int mb_var_sum; /* sum of MB variance for current frame */
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
uint16_t *mb_var; /* Table for MB variances */
uint16_t *mc_mb_var; /* Table for motion compensated MB variances */
uint8_t *mb_mean; /* Table for MB luminance */
} Picture;
typedef struct ParseContext{ typedef struct ParseContext{
UINT8 *buffer; UINT8 *buffer;
int index; int index;
...@@ -145,7 +150,6 @@ typedef struct MpegEncContext { ...@@ -145,7 +150,6 @@ typedef struct MpegEncContext {
int max_qdiff; /* max qscale difference between frames */ int max_qdiff; /* max qscale difference between frames */
int encoding; /* true if we are encoding (vs decoding) */ int encoding; /* true if we are encoding (vs decoding) */
int flags; /* AVCodecContext.flags (HQ, MV4, ...) */ int flags; /* AVCodecContext.flags (HQ, MV4, ...) */
int force_input_type;/* 0= no force, otherwise I_TYPE, P_TYPE, ... */
int max_b_frames; /* max number of b-frames for encoding */ int max_b_frames; /* max number of b-frames for encoding */
int b_frame_strategy; int b_frame_strategy;
int luma_elim_threshold; int luma_elim_threshold;
...@@ -160,10 +164,7 @@ typedef struct MpegEncContext { ...@@ -160,10 +164,7 @@ typedef struct MpegEncContext {
/* sequence parameters */ /* sequence parameters */
int context_initialized; int context_initialized;
int input_picture_number; int input_picture_number;
int input_picture_in_gop_number; /* 0-> first pic in gop, ... */
int picture_number; int picture_number;
int fake_picture_number; /* picture number at the bitstream frame rate */
int gop_picture_number; /* index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */
int picture_in_gop_number; /* 0-> first pic in gop, ... */ int picture_in_gop_number; /* 0-> first pic in gop, ... */
int b_frames_since_non_b; /* used for encoding, relative to not yet reordered input */ int b_frames_since_non_b; /* used for encoding, relative to not yet reordered input */
int mb_width, mb_height; /* number of MBs horizontally & vertically */ int mb_width, mb_height; /* number of MBs horizontally & vertically */
...@@ -171,20 +172,13 @@ typedef struct MpegEncContext { ...@@ -171,20 +172,13 @@ typedef struct MpegEncContext {
int mb_num; /* number of MBs of a picture */ int mb_num; /* number of MBs of a picture */
int linesize; /* line size, in bytes, may be different from width */ int linesize; /* line size, in bytes, may be different from width */
int uvlinesize; /* line size, for chroma in bytes, may be different from width */ int uvlinesize; /* line size, for chroma in bytes, may be different from width */
UINT8 *new_picture[3]; /* picture to be compressed */ Picture picture[MAX_PICTURE_COUNT]; /* main picture buffer */
UINT8 *picture_buffer[REORDER_BUFFER_SIZE][3]; /* internal buffers used for reordering of input pictures */ Picture *input_picture[MAX_PICTURE_COUNT]; /* next pictures on display order for encoding*/
int picture_buffer_index; Picture *reordered_input_picture[MAX_PICTURE_COUNT]; /* pointer to the next pictures in codedorder for encoding*/
ReorderBuffer coded_order[REORDER_BUFFER_SIZE]; Picture last_picture; /* previous picture */
UINT8 *last_picture[3]; /* previous picture */ Picture next_picture; /* previous picture (for bidir pred) */
UINT8 *last_picture_base[3]; /* real start of the picture */ Picture new_picture; /* source picture for encoding */
UINT8 *next_picture[3]; /* previous picture (for bidir pred) */ Picture current_picture; /* buffer to store the decompressed current picture */
UINT8 *next_picture_base[3]; /* real start of the picture */
UINT8 *aux_picture[3]; /* aux picture (for B frames only) */
UINT8 *aux_picture_base[3]; /* real start of the picture */
UINT8 *current_picture[3]; /* buffer to store the decompressed current picture */
void *last_dr_opaque;
void *next_dr_opaque;
int ip_buffer_count; /* number of buffers, currently only >2 if dr1 is used */
int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */ int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */
int last_dc[3]; /* last DC values for MPEG1 */ int last_dc[3]; /* last DC values for MPEG1 */
INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */ INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */
...@@ -200,17 +194,10 @@ typedef struct MpegEncContext { ...@@ -200,17 +194,10 @@ typedef struct MpegEncContext {
UINT8 *mbintra_table; /* used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding */ UINT8 *mbintra_table; /* used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding */
UINT8 *cbp_table; /* used to store cbp, ac_pred for partitioned decoding */ UINT8 *cbp_table; /* used to store cbp, ac_pred for partitioned decoding */
UINT8 *pred_dir_table; /* used to store pred_dir for partitioned decoding */ UINT8 *pred_dir_table; /* used to store pred_dir for partitioned decoding */
INT8 *qscale_table; /* used to store qscale */
INT8 *aux_qscale_table;
INT8 *next_qscale_table;
INT8 *last_qscale_table; //FIXME move these into some picture struct (MpegEncContext.aux.qscale_table[])
UINT8 *edge_emu_buffer; UINT8 *edge_emu_buffer;
int input_qscale; /* qscale prior to reordering of frames */
int input_pict_type; /* pict_type prior to reordering of frames */
int force_type; /* 0= no force, otherwise I_TYPE, P_TYPE, ... */
int qscale; /* QP */ int qscale; /* QP */
float frame_qscale; /* qscale from the frame level rc */ float frame_qscale; /* qscale from the frame level rc FIXME remove*/
int adaptive_quant; /* use adaptive quantization */ int adaptive_quant; /* use adaptive quantization */
int dquant; /* qscale difference to prev qscale */ int dquant; /* qscale difference to prev qscale */
int pict_type; /* I_TYPE, P_TYPE, B_TYPE, ... */ int pict_type; /* I_TYPE, P_TYPE, B_TYPE, ... */
...@@ -272,9 +259,6 @@ typedef struct MpegEncContext { ...@@ -272,9 +259,6 @@ typedef struct MpegEncContext {
int mb_x, mb_y; int mb_x, mb_y;
int mb_incr; int mb_incr;
int mb_intra; int mb_intra;
UINT16 *mb_var; /* Table for MB variances */
UINT16 *mc_mb_var; /* Table for motion compensated MB variances */
UINT8 *mb_mean; /* Table for MB luminance */
UINT8 *mb_type; /* Table for MB type */ UINT8 *mb_type; /* Table for MB type */
#define MB_TYPE_INTRA 0x01 #define MB_TYPE_INTRA 0x01
#define MB_TYPE_INTER 0x02 #define MB_TYPE_INTER 0x02
...@@ -325,8 +309,6 @@ typedef struct MpegEncContext { ...@@ -325,8 +309,6 @@ typedef struct MpegEncContext {
/* bit rate control */ /* bit rate control */
int I_frame_bits; //FIXME used in mpeg12 ... int I_frame_bits; //FIXME used in mpeg12 ...
int mb_var_sum; /* sum of MB variance for current frame */
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
INT64 wanted_bits; INT64 wanted_bits;
INT64 total_bits; INT64 total_bits;
int frame_bits; /* bits used for the current frame */ int frame_bits; /* bits used for the current frame */
...@@ -476,6 +458,10 @@ typedef struct MpegEncContext { ...@@ -476,6 +458,10 @@ typedef struct MpegEncContext {
/* decompression specific */ /* decompression specific */
GetBitContext gb; GetBitContext gb;
/* Mpeg1 specific */
int fake_picture_number; /* picture number at the bitstream frame rate */
int gop_picture_number; /* index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */
/* MPEG2 specific - I wish I had not to support this mess. */ /* MPEG2 specific - I wish I had not to support this mess. */
int progressive_sequence; int progressive_sequence;
int mpeg_f_code[2][2]; int mpeg_f_code[2][2];
...@@ -498,7 +484,6 @@ typedef struct MpegEncContext { ...@@ -498,7 +484,6 @@ typedef struct MpegEncContext {
int mpeg2; int mpeg2;
int full_pel[2]; int full_pel[2];
int interlaced_dct; int interlaced_dct;
int last_qscale;
int first_slice; int first_slice;
/* RTP specific */ /* RTP specific */
......
...@@ -759,10 +759,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n, ...@@ -759,10 +759,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
}else{ }else{
if(n<4){ if(n<4){
wrap= s->linesize; wrap= s->linesize;
dest= s->current_picture[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8; dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
}else{ }else{
wrap= s->uvlinesize; wrap= s->uvlinesize;
dest= s->current_picture[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
} }
if(s->mb_x==0) a= (1024 + (scale>>1))/scale; if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
else a= get_dc(dest-8, wrap, scale*8); else a= get_dc(dest-8, wrap, scale*8);
......
...@@ -41,7 +41,7 @@ void ff_write_pass1_stats(MpegEncContext *s){ ...@@ -41,7 +41,7 @@ void ff_write_pass1_stats(MpegEncContext *s){
sprintf(s->avctx->stats_out, "in:%d out:%d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d;\n", sprintf(s->avctx->stats_out, "in:%d out:%d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d;\n",
s->picture_number, s->input_picture_number - s->max_b_frames, s->pict_type, s->picture_number, s->input_picture_number - s->max_b_frames, s->pict_type,
s->frame_qscale, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, s->frame_qscale, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
s->f_code, s->b_code, s->mc_mb_var_sum, s->mb_var_sum, s->i_count); s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count);
} }
int ff_rate_control_init(MpegEncContext *s) int ff_rate_control_init(MpegEncContext *s)
...@@ -475,11 +475,12 @@ static void adaptive_quantization(MpegEncContext *s, double q){ ...@@ -475,11 +475,12 @@ static void adaptive_quantization(MpegEncContext *s, double q){
float bits_tab[s->mb_num]; float bits_tab[s->mb_num];
const int qmin= 2; //s->avctx->mb_qmin; const int qmin= 2; //s->avctx->mb_qmin;
const int qmax= 31; //s->avctx->mb_qmax; const int qmax= 31; //s->avctx->mb_qmax;
Picture * const pic= &s->current_picture;
for(i=0; i<s->mb_num; i++){ for(i=0; i<s->mb_num; i++){
float temp_cplx= sqrt(s->mc_mb_var[i]); float temp_cplx= sqrt(pic->mc_mb_var[i]);
float spat_cplx= sqrt(s->mb_var[i]); float spat_cplx= sqrt(pic->mb_var[i]);
const int lumi= s->mb_mean[i]; const int lumi= pic->mb_mean[i];
float bits, cplx, factor; float bits, cplx, factor;
if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune
...@@ -533,8 +534,8 @@ static void adaptive_quantization(MpegEncContext *s, double q){ ...@@ -533,8 +534,8 @@ static void adaptive_quantization(MpegEncContext *s, double q){
newq*= bits_sum/cplx_sum; newq*= bits_sum/cplx_sum;
} }
if(i && ABS(s->qscale_table[i-1] - newq)<0.75) if(i && ABS(pic->qscale_table[i-1] - newq)<0.75)
intq= s->qscale_table[i-1]; intq= pic->qscale_table[i-1];
else else
intq= (int)(newq + 0.5); intq= (int)(newq + 0.5);
...@@ -542,7 +543,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){ ...@@ -542,7 +543,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){
else if(intq < qmin) intq= qmin; else if(intq < qmin) intq= qmin;
//if(i%s->mb_width==0) printf("\n"); //if(i%s->mb_width==0) printf("\n");
//printf("%2d%3d ", intq, ff_sqrt(s->mc_mb_var[i])); //printf("%2d%3d ", intq, ff_sqrt(s->mc_mb_var[i]));
s->qscale_table[i]= intq; pic->qscale_table[i]= intq;
} }
} }
...@@ -562,6 +563,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) ...@@ -562,6 +563,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
double rate_factor; double rate_factor;
int var; int var;
const int pict_type= s->pict_type; const int pict_type= s->pict_type;
Picture * const pic= &s->current_picture;
emms_c(); emms_c();
get_qminmax(&qmin, &qmax, s, pict_type); get_qminmax(&qmin, &qmax, s, pict_type);
...@@ -588,7 +590,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) ...@@ -588,7 +590,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance; br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance;
if(br_compensation<=0.0) br_compensation=0.001; if(br_compensation<=0.0) br_compensation=0.001;
var= pict_type == I_TYPE ? s->mb_var_sum : s->mc_mb_var_sum; var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
if(s->flags&CODEC_FLAG_PASS2){ if(s->flags&CODEC_FLAG_PASS2){
if(pict_type!=I_TYPE) if(pict_type!=I_TYPE)
...@@ -599,8 +601,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s) ...@@ -599,8 +601,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
}else{ }else{
rce->pict_type= rce->pict_type=
rce->new_pict_type= pict_type; rce->new_pict_type= pict_type;
rce->mc_mb_var_sum= s->mc_mb_var_sum; rce->mc_mb_var_sum= pic->mc_mb_var_sum;
rce->mb_var_sum = s-> mb_var_sum; rce->mb_var_sum = pic-> mb_var_sum;
rce->qscale = 2; rce->qscale = 2;
rce->f_code = s->f_code; rce->f_code = s->f_code;
rce->b_code = s->b_code; rce->b_code = s->b_code;
...@@ -663,10 +665,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s) ...@@ -663,10 +665,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
else if(q>qmax) q=qmax; else if(q>qmax) q=qmax;
// printf("%f %d %d %d\n", q, picture_number, (int)wanted_bits, (int)s->total_bits); // printf("%f %d %d %d\n", q, picture_number, (int)wanted_bits, (int)s->total_bits);
//printf("%f %f %f\n", q, br_compensation, short_term_q); //printf("diff:%d comp:%f st_q:%f last_size:%d type:%d\n", (int)diff, br_compensation,
//printf("q:%d diff:%d comp:%f st_q:%f last_size:%d type:%d\n", qscale, (int)diff, br_compensation,
// short_term_q, s->frame_bits, pict_type); // short_term_q, s->frame_bits, pict_type);
//printf("%d %d\n", s->bit_rate, (int)fps); //printf("%d %d\n", s->bit_rate, (int)fps);
...@@ -676,8 +676,16 @@ float ff_rate_estimate_qscale(MpegEncContext *s) ...@@ -676,8 +676,16 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
q= (int)(q + 0.5); q= (int)(q + 0.5);
rcc->last_qscale= q; rcc->last_qscale= q;
rcc->last_mc_mb_var_sum= s->mc_mb_var_sum; rcc->last_mc_mb_var_sum= pic->mc_mb_var_sum;
rcc->last_mb_var_sum= s->mb_var_sum; rcc->last_mb_var_sum= pic->mb_var_sum;
#if 0
{
static int mvsum=0, texsum=0;
mvsum += s->mv_bits;
texsum += s->i_tex_bits + s->p_tex_bits;
printf("%d %d//\n\n", mvsum, texsum);
}
#endif
return q; return q;
} }
......
...@@ -472,7 +472,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, ...@@ -472,7 +472,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
int i; int i;
AVPicture *pict = data; AVVideoFrame *pict = data;
#ifdef DEBUG #ifdef DEBUG
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
...@@ -505,15 +505,9 @@ static int rv10_decode_frame(AVCodecContext *avctx, ...@@ -505,15 +505,9 @@ static int rv10_decode_frame(AVCodecContext *avctx,
if(s->mb_y>=s->mb_height){ if(s->mb_y>=s->mb_height){
MPV_frame_end(s); MPV_frame_end(s);
pict->data[0] = s->current_picture[0]; *pict= *(AVVideoFrame*)&s->current_picture;
pict->data[1] = s->current_picture[1];
pict->data[2] = s->current_picture[2];
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
avctx->quality = s->qscale; *data_size = sizeof(AVVideoFrame);
*data_size = sizeof(AVPicture);
}else{ }else{
*data_size = 0; *data_size = 0;
} }
......
...@@ -1063,7 +1063,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, ...@@ -1063,7 +1063,7 @@ static int svq1_decode_frame(AVCodecContext *avctx,
MpegEncContext *s=avctx->priv_data; MpegEncContext *s=avctx->priv_data;
uint8_t *current, *previous; uint8_t *current, *previous;
int result, i, x, y, width, height; int result, i, x, y, width, height;
AVPicture *pict = data; AVVideoFrame *pict = data;
/* initialize bit buffer */ /* initialize bit buffer */
init_get_bits(&s->gb,buf,buf_size); init_get_bits(&s->gb,buf,buf_size);
...@@ -1084,9 +1084,6 @@ static int svq1_decode_frame(AVCodecContext *avctx, ...@@ -1084,9 +1084,6 @@ static int svq1_decode_frame(AVCodecContext *avctx,
} }
result = svq1_decode_frame_header (&s->gb, s); result = svq1_decode_frame_header (&s->gb, s);
if(MPV_frame_start(s, avctx) < 0)
return -1;
if (result != 0) if (result != 0)
{ {
...@@ -1098,6 +1095,9 @@ static int svq1_decode_frame(AVCodecContext *avctx, ...@@ -1098,6 +1095,9 @@ static int svq1_decode_frame(AVCodecContext *avctx,
if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size; if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
if(MPV_frame_start(s, avctx) < 0)
return -1;
/* decode y, u and v components */ /* decode y, u and v components */
for (i=0; i < 3; i++) { for (i=0; i < 3; i++) {
int linesize; int linesize;
...@@ -1112,12 +1112,12 @@ static int svq1_decode_frame(AVCodecContext *avctx, ...@@ -1112,12 +1112,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
linesize= s->uvlinesize; linesize= s->uvlinesize;
} }
current = s->current_picture[i]; current = s->current_picture.data[i];
if(s->pict_type==B_TYPE){ if(s->pict_type==B_TYPE){
previous = s->next_picture[i]; previous = s->next_picture.data[i];
}else{ }else{
previous = s->last_picture[i]; previous = s->last_picture.data[i];
} }
if (s->pict_type == I_TYPE) { if (s->pict_type == I_TYPE) {
...@@ -1159,12 +1159,14 @@ static int svq1_decode_frame(AVCodecContext *avctx, ...@@ -1159,12 +1159,14 @@ static int svq1_decode_frame(AVCodecContext *avctx,
current += 16*linesize; current += 16*linesize;
} }
} }
pict->data[i] = s->current_picture[i];
pict->linesize[i] = linesize;
} }
*pict = *(AVVideoFrame*)&s->current_picture;
*data_size=sizeof(AVPicture);
MPV_frame_end(s);
*data_size=sizeof(AVVideoFrame);
return buf_size; return buf_size;
} }
...@@ -1176,7 +1178,6 @@ static int svq1_decode_init(AVCodecContext *avctx) ...@@ -1176,7 +1178,6 @@ static int svq1_decode_init(AVCodecContext *avctx)
s->width = (avctx->width+3)&~3; s->width = (avctx->width+3)&~3;
s->height = (avctx->height+3)&~3; s->height = (avctx->height+3)&~3;
s->codec_id= avctx->codec->id; s->codec_id= avctx->codec->id;
avctx->mbskip_table= s->mbskip_table;
avctx->pix_fmt = PIX_FMT_YUV410P; avctx->pix_fmt = PIX_FMT_YUV410P;
avctx->has_b_frames= s->has_b_frames=1; // not true, but DP frames and these behave like unidirectional b frames avctx->has_b_frames= s->has_b_frames=1; // not true, but DP frames and these behave like unidirectional b frames
s->flags= avctx->flags; s->flags= avctx->flags;
......
...@@ -86,6 +86,123 @@ void register_avcodec(AVCodec *format) ...@@ -86,6 +86,123 @@ void register_avcodec(AVCodec *format)
format->next = NULL; format->next = NULL;
} }
void avcodec_get_chroma_sub_sample(int fmt, int *h_shift, int *v_shift){
switch(fmt){
case PIX_FMT_YUV410P:
*h_shift=2;
*v_shift=2;
break;
case PIX_FMT_YUV420P:
*h_shift=1;
*v_shift=1;
break;
case PIX_FMT_YUV411P:
*h_shift=2;
*v_shift=0;
break;
case PIX_FMT_YUV422P:
case PIX_FMT_YUV422:
*h_shift=1;
*v_shift=0;
break;
default: //RGB/...
*h_shift=0;
*v_shift=0;
break;
}
}
typedef struct DefaultPicOpaque{
int last_pic_num;
uint8_t *data[4];
}DefaultPicOpaque;
int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
int i;
const int width = s->width;
const int height= s->height;
DefaultPicOpaque *opaque;
if(pic->opaque){
opaque= (DefaultPicOpaque *)pic->opaque;
for(i=0; i<3; i++)
pic->data[i]= opaque->data[i];
// printf("get_buffer %X coded_pic_num:%d last:%d\n", pic->opaque, pic->coded_picture_number, opaque->last_pic_num);
pic->age= pic->coded_picture_number - opaque->last_pic_num;
opaque->last_pic_num= pic->coded_picture_number;
//printf("age: %d %d %d\n", pic->age, c->picture_number, pic->coded_picture_number);
}else{
int align, h_chroma_shift, v_chroma_shift;
int w, h, pixel_size;
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
switch(s->pix_fmt){
case PIX_FMT_YUV422:
pixel_size=2;
break;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
pixel_size=3;
break;
case PIX_FMT_BGRA32:
case PIX_FMT_RGBA32:
pixel_size=4;
break;
default:
pixel_size=1;
}
if(s->codec_id==CODEC_ID_SVQ1) align=63;
else align=15;
w= (width +align)&~align;
h= (height+align)&~align;
if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
w+= EDGE_WIDTH*2;
h+= EDGE_WIDTH*2;
}
opaque= av_mallocz(sizeof(DefaultPicOpaque));
if(opaque==NULL) return -1;
pic->opaque= opaque;
opaque->last_pic_num= -256*256*256*64;
for(i=0; i<3; i++){
int h_shift= i==0 ? 0 : h_chroma_shift;
int v_shift= i==0 ? 0 : v_chroma_shift;
pic->linesize[i]= pixel_size*w>>h_shift;
pic->base[i]= av_mallocz((pic->linesize[i]*h>>v_shift)+16); //FIXME 16
if(pic->base[i]==NULL) return -1;
memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift);
if(s->flags&CODEC_FLAG_EMU_EDGE)
pic->data[i] = pic->base[i];
else
pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift);
opaque->data[i]= pic->data[i];
}
pic->age= 256*256*256*64;
}
return 0;
}
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){
int i;
for(i=0; i<3; i++)
pic->data[i]=NULL;
//printf("R%X\n", pic->opaque);
}
void avcodec_get_context_defaults(AVCodecContext *s){ void avcodec_get_context_defaults(AVCodecContext *s){
s->bit_rate= 800*1000; s->bit_rate= 800*1000;
s->bit_rate_tolerance= s->bit_rate*10; s->bit_rate_tolerance= s->bit_rate*10;
...@@ -104,6 +221,8 @@ void avcodec_get_context_defaults(AVCodecContext *s){ ...@@ -104,6 +221,8 @@ void avcodec_get_context_defaults(AVCodecContext *s){
s->frame_rate = 25 * FRAME_RATE_BASE; s->frame_rate = 25 * FRAME_RATE_BASE;
s->gop_size= 50; s->gop_size= 50;
s->me_method= ME_EPZS; s->me_method= ME_EPZS;
s->get_buffer= avcodec_default_get_buffer;
s->release_buffer= avcodec_default_release_buffer;
} }
/** /**
...@@ -120,6 +239,16 @@ AVCodecContext *avcodec_alloc_context(void){ ...@@ -120,6 +239,16 @@ AVCodecContext *avcodec_alloc_context(void){
return avctx; return avctx;
} }
/**
* allocates a AVPicture and set it to defaults.
* this can be deallocated by simply calling free()
*/
AVVideoFrame *avcodec_alloc_picture(void){
AVVideoFrame *pic= av_mallocz(sizeof(AVVideoFrame));
return pic;
}
int avcodec_open(AVCodecContext *avctx, AVCodec *codec) int avcodec_open(AVCodecContext *avctx, AVCodec *codec)
{ {
int ret; int ret;
...@@ -152,7 +281,7 @@ int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size, ...@@ -152,7 +281,7 @@ int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
} }
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
const AVPicture *pict) const AVVideoFrame *pict)
{ {
int ret; int ret;
...@@ -167,17 +296,17 @@ int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, ...@@ -167,17 +296,17 @@ int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
/* decode a frame. return -1 if error, otherwise return the number of /* decode a frame. return -1 if error, otherwise return the number of
bytes used. If no frame could be decompressed, *got_picture_ptr is bytes used. If no frame could be decompressed, *got_picture_ptr is
zero. Otherwise, it is non zero */ zero. Otherwise, it is non zero */
int avcodec_decode_video(AVCodecContext *avctx, AVPicture *picture, int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture,
int *got_picture_ptr, int *got_picture_ptr,
UINT8 *buf, int buf_size) UINT8 *buf, int buf_size)
{ {
int ret; int ret;
ret = avctx->codec->decode(avctx, picture, got_picture_ptr, ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
buf, buf_size); buf, buf_size);
emms_c(); //needed to avoid a emms_c() call before every return; emms_c(); //needed to avoid a emms_c() call before every return;
if (*got_picture_ptr) if (*got_picture_ptr)
avctx->frame_number++; avctx->frame_number++;
return ret; return ret;
......
...@@ -556,7 +556,7 @@ static void put_frame_header(AVFormatContext *s, ASFStream *stream, int timestam ...@@ -556,7 +556,7 @@ static void put_frame_header(AVFormatContext *s, ASFStream *stream, int timestam
int val; int val;
val = stream->num; val = stream->num;
if (s->streams[val - 1]->codec.key_frame /* && frag_offset == 0 */) if (s->streams[val - 1]->codec.coded_picture->key_frame /* && frag_offset == 0 */)
val |= 0x80; val |= 0x80;
put_byte(pb, val); put_byte(pb, val);
put_byte(pb, stream->seq); put_byte(pb, stream->seq);
...@@ -793,6 +793,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -793,6 +793,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream)); st = av_mallocz(sizeof(AVStream));
if (!st) if (!st)
goto fail; goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[s->nb_streams] = st; s->streams[s->nb_streams] = st;
asf_st = av_mallocz(sizeof(ASFStream)); asf_st = av_mallocz(sizeof(ASFStream));
if (!asf_st) if (!asf_st)
......
...@@ -143,6 +143,8 @@ static int au_read_header(AVFormatContext *s, ...@@ -143,6 +143,8 @@ static int au_read_header(AVFormatContext *s,
st = av_malloc(sizeof(AVStream)); st = av_malloc(sizeof(AVStream));
if (!st) if (!st)
return -1; return -1;
avcodec_get_context_defaults(&st->codec);
s->nb_streams = 1; s->nb_streams = 1;
s->streams[0] = st; s->streams[0] = st;
......
...@@ -144,6 +144,9 @@ typedef struct AVStream { ...@@ -144,6 +144,9 @@ typedef struct AVStream {
AVFrac pts; AVFrac pts;
/* ffmpeg.c private use */ /* ffmpeg.c private use */
int stream_copy; /* if TRUE, just copy stream */ int stream_copy; /* if TRUE, just copy stream */
/* quality, as it has been removed from AVCodecContext and put in AVVideoFrame
* MN:dunno if thats the right place, for it */
float quality;
} AVStream; } AVStream;
#define MAX_STREAMS 20 #define MAX_STREAMS 20
......
...@@ -103,6 +103,8 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -103,6 +103,8 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
AVStream *st = av_mallocz(sizeof(AVStream)); AVStream *st = av_mallocz(sizeof(AVStream));
if (!st) if (!st)
goto fail; goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[i] = st; s->streams[i] = st;
} }
url_fskip(pb, size - 7 * 4); url_fskip(pb, size - 7 * 4);
......
...@@ -320,7 +320,7 @@ static int avi_write_packet(AVFormatContext *s, int stream_index, ...@@ -320,7 +320,7 @@ static int avi_write_packet(AVFormatContext *s, int stream_index,
if (enc->codec_type == CODEC_TYPE_VIDEO) { if (enc->codec_type == CODEC_TYPE_VIDEO) {
tag[2] = 'd'; tag[2] = 'd';
tag[3] = 'c'; tag[3] = 'c';
flags = enc->key_frame ? 0x10 : 0x00; flags = enc->coded_picture->key_frame ? 0x10 : 0x00;
} else { } else {
tag[2] = 'w'; tag[2] = 'w';
tag[3] = 'b'; tag[3] = 'b';
......
...@@ -151,7 +151,7 @@ static int ffm_write_header(AVFormatContext *s) ...@@ -151,7 +151,7 @@ static int ffm_write_header(AVFormatContext *s)
put_be32(pb, codec->codec_id); put_be32(pb, codec->codec_id);
put_byte(pb, codec->codec_type); put_byte(pb, codec->codec_type);
put_be32(pb, codec->bit_rate); put_be32(pb, codec->bit_rate);
put_be32(pb, codec->quality); put_be32(pb, st->quality);
put_be32(pb, codec->flags); put_be32(pb, codec->flags);
/* specific info */ /* specific info */
switch(codec->codec_type) { switch(codec->codec_type) {
...@@ -232,7 +232,7 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index, ...@@ -232,7 +232,7 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index,
/* packet size & key_frame */ /* packet size & key_frame */
header[0] = stream_index; header[0] = stream_index;
header[1] = 0; header[1] = 0;
if (st->codec.key_frame) if (st->codec.coded_picture->key_frame)
header[1] |= FLAG_KEY_FRAME; header[1] |= FLAG_KEY_FRAME;
header[2] = (size >> 16) & 0xff; header[2] = (size >> 16) & 0xff;
header[3] = (size >> 8) & 0xff; header[3] = (size >> 8) & 0xff;
...@@ -394,6 +394,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -394,6 +394,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream)); st = av_mallocz(sizeof(AVStream));
if (!st) if (!st)
goto fail; goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[i] = st; s->streams[i] = st;
fst = av_mallocz(sizeof(FFMStream)); fst = av_mallocz(sizeof(FFMStream));
if (!fst) if (!fst)
...@@ -405,7 +406,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -405,7 +406,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec.codec_id = get_be32(pb); st->codec.codec_id = get_be32(pb);
st->codec.codec_type = get_byte(pb); /* codec_type */ st->codec.codec_type = get_byte(pb); /* codec_type */
codec->bit_rate = get_be32(pb); codec->bit_rate = get_be32(pb);
codec->quality = get_be32(pb); st->quality = get_be32(pb);
codec->flags = get_be32(pb); codec->flags = get_be32(pb);
/* specific info */ /* specific info */
switch(codec->codec_type) { switch(codec->codec_type) {
......
...@@ -170,6 +170,8 @@ static int jpeg_read_header(AVFormatContext *s1, AVFormatParameters *ap) ...@@ -170,6 +170,8 @@ static int jpeg_read_header(AVFormatContext *s1, AVFormatParameters *ap)
av_free(s); av_free(s);
return -ENOMEM; return -ENOMEM;
} }
avcodec_get_context_defaults(&st->codec);
s1->streams[0] = st; s1->streams[0] = st;
s->img_number = 0; s->img_number = 0;
......
...@@ -352,7 +352,7 @@ static int rm_write_video(AVFormatContext *s, UINT8 *buf, int size) ...@@ -352,7 +352,7 @@ static int rm_write_video(AVFormatContext *s, UINT8 *buf, int size)
RMContext *rm = s->priv_data; RMContext *rm = s->priv_data;
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
StreamInfo *stream = rm->video_stream; StreamInfo *stream = rm->video_stream;
int key_frame = stream->enc->key_frame; int key_frame = stream->enc->coded_picture->key_frame;
/* XXX: this is incorrect: should be a parameter */ /* XXX: this is incorrect: should be a parameter */
...@@ -527,6 +527,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -527,6 +527,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream)); st = av_mallocz(sizeof(AVStream));
if (!st) if (!st)
goto fail; goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[s->nb_streams++] = st; s->streams[s->nb_streams++] = st;
st->id = get_be16(pb); st->id = get_be16(pb);
get_be32(pb); /* max bit rate */ get_be32(pb); /* max bit rate */
......
...@@ -482,6 +482,8 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap) ...@@ -482,6 +482,8 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream)); st = av_mallocz(sizeof(AVStream));
if (!st) if (!st)
return -ENOMEM; return -ENOMEM;
avcodec_get_context_defaults(&st->codec);
if (v & 0x01) if (v & 0x01)
st->codec.channels = 2; st->codec.channels = 2;
else else
......
...@@ -458,7 +458,7 @@ int av_find_stream_info(AVFormatContext *ic) ...@@ -458,7 +458,7 @@ int av_find_stream_info(AVFormatContext *ic)
AVCodec *codec; AVCodec *codec;
AVStream *st; AVStream *st;
AVPacket *pkt; AVPacket *pkt;
AVPicture picture; AVVideoFrame picture;
AVPacketList *pktl=NULL, **ppktl; AVPacketList *pktl=NULL, **ppktl;
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]; short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
UINT8 *ptr; UINT8 *ptr;
...@@ -694,6 +694,8 @@ AVStream *av_new_stream(AVFormatContext *s, int id) ...@@ -694,6 +694,8 @@ AVStream *av_new_stream(AVFormatContext *s, int id)
st = av_mallocz(sizeof(AVStream)); st = av_mallocz(sizeof(AVStream));
if (!st) if (!st)
return NULL; return NULL;
avcodec_get_context_defaults(&st->codec);
st->index = s->nb_streams; st->index = s->nb_streams;
st->id = id; st->id = id;
s->streams[s->nb_streams++] = st; s->streams[s->nb_streams++] = st;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment