Commit 1e491e29 authored by Michael Niedermayer's avatar Michael Niedermayer

cleanup

 adding AVVideoFrame
 moving quality, pict_type, key_frame, qscale_table, ... to AVVideoFrame
 removing obsolete variables in AVCodecContext
 skiping of MBs in b frames
 correctly initalizing AVCodecContext
 picture buffer cleanup

Originally committed as revision 1302 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 855ea723
......@@ -285,6 +285,7 @@ int read_ffserver_streams(AVFormatContext *s, const char *filename)
s->nb_streams = ic->nb_streams;
for(i=0;i<ic->nb_streams;i++) {
AVStream *st;
st = av_mallocz(sizeof(AVFormatContext));
memcpy(st, ic->streams[i], sizeof(AVStream));
s->streams[i] = st;
......@@ -605,15 +606,21 @@ static void do_video_out(AVFormatContext *s,
/* XXX: pb because no interleaving */
for(i=0;i<nb_frames;i++) {
if (enc->codec_id != CODEC_ID_RAWVIDEO) {
AVVideoFrame big_picture;
memset(&big_picture, 0, sizeof(AVVideoFrame));
*(AVPicture*)&big_picture= *picture;
/* handles sameq here. This is not correct because it may
not be a global option */
if (same_quality) {
enc->quality = dec->quality;
}
big_picture.quality = ist->st->quality;
}else
big_picture.quality = ost->st->quality;
ret = avcodec_encode_video(enc,
video_buffer, VIDEO_BUFFER_SIZE,
picture);
&big_picture);
//enc->frame_number = enc->real_pict_num;
av_write_frame(s, ost->index, video_buffer, ret);
*frame_size = ret;
......@@ -674,7 +681,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
total_size += frame_size;
if (enc->codec_type == CODEC_TYPE_VIDEO) {
frame_number = ost->frame_number;
fprintf(fvstats, "frame= %5d q= %2d ", frame_number, enc->quality);
fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_picture->quality);
if (do_psnr)
fprintf(fvstats, "PSNR= %6.2f ", enc->psnr_y);
......@@ -688,7 +695,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0;
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
(double)total_size / 1024, ti1, bitrate, avg_bitrate);
fprintf(fvstats,"type= %s\n", enc->key_frame == 1 ? "I" : "P");
fprintf(fvstats,"type= %s\n", enc->coded_picture->key_frame == 1 ? "I" : "P");
}
}
......@@ -731,13 +738,13 @@ void print_report(AVFormatContext **output_files,
os = output_files[ost->file_index];
enc = &ost->st->codec;
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
sprintf(buf + strlen(buf), "q=%2d ",
enc->quality);
sprintf(buf + strlen(buf), "q=%2.1f ",
enc->coded_picture->quality);
}
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
frame_number = ost->frame_number;
sprintf(buf + strlen(buf), "frame=%5d q=%2d ",
frame_number, enc->quality);
sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
frame_number, enc->coded_picture ? enc->coded_picture->quality : 0);
if (do_psnr)
sprintf(buf + strlen(buf), "PSNR=%6.2f ", enc->psnr_y);
vid = 1;
......@@ -1236,9 +1243,13 @@ static int av_encode(AVFormatContext **output_files,
ist->st->codec.height);
ret = len;
} else {
AVVideoFrame big_picture;
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
ret = avcodec_decode_video(&ist->st->codec,
&picture, &got_picture, ptr, len);
&big_picture, &got_picture, ptr, len);
picture= *(AVPicture*)&big_picture;
ist->st->quality= big_picture.quality;
if (ret < 0) {
fail_decode:
fprintf(stderr, "Error while decoding stream #%d.%d\n",
......@@ -2046,6 +2057,7 @@ void opt_output_file(const char *filename)
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
avcodec_get_context_defaults(&st->codec);
video_enc = &st->codec;
if (video_stream_copy) {
......@@ -2074,7 +2086,7 @@ void opt_output_file(const char *filename)
video_enc->gop_size = 0;
if (video_qscale || same_quality) {
video_enc->flags |= CODEC_FLAG_QSCALE;
video_enc->quality = video_qscale;
st->quality = video_qscale;
}
if (use_hq) {
......@@ -2181,6 +2193,7 @@ void opt_output_file(const char *filename)
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
avcodec_get_context_defaults(&st->codec);
audio_enc = &st->codec;
audio_enc->codec_type = CODEC_TYPE_AUDIO;
......
......@@ -1955,7 +1955,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
/* we use the codec indication because it is
more accurate than the demux flags */
pkt->flags = 0;
if (st->codec.key_frame)
if (st->codec.coded_picture->key_frame)
pkt->flags |= PKT_FLAG_KEY;
return 0;
}
......@@ -3942,7 +3942,7 @@ int parse_ffconfig(const char *filename)
} else if (!strcasecmp(cmd, "AudioQuality")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
audio_enc.quality = atof(arg) * 1000;
// audio_enc.quality = atof(arg) * 1000;
}
} else if (!strcasecmp(cmd, "VideoBitRateRange")) {
if (stream) {
......
This diff is collapsed.
......@@ -33,6 +33,7 @@ typedef struct DVVideoDecodeContext {
int sampling_411; /* 0 = 420, 1 = 411 */
int width, height;
UINT8 *current_picture[3]; /* picture structure */
AVVideoFrame picture;
int linesize[3];
DCTELEM block[5*6][64] __align8;
UINT8 dv_zigzag[2][64];
......@@ -128,7 +129,7 @@ static int dvvideo_decode_init(AVCodecContext *avctx)
/* XXX: do it only for constant case */
dv_build_unquantize_tables(s);
return 0;
}
......@@ -499,7 +500,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
unsigned size;
UINT8 *buf_ptr;
const UINT16 *mb_pos_ptr;
AVPicture *picture;
/* parse id */
init_get_bits(&s->gb, buf, buf_size);
......@@ -561,45 +561,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
avctx->width = width;
avctx->height = height;
if (avctx->flags & CODEC_FLAG_DR1)
{
s->width = -1;
avctx->dr_buffer[0] = avctx->dr_buffer[1] = avctx->dr_buffer[2] = 0;
if(avctx->get_buffer_callback(avctx, width, height, I_TYPE) < 0
&& avctx->flags & CODEC_FLAG_DR1) {
fprintf(stderr, "get_buffer() failed\n");
return -1;
}
s->picture.reference= 0;
if(avctx->get_buffer(avctx, &s->picture) < 0) {
fprintf(stderr, "get_buffer() failed\n");
return -1;
}
/* (re)alloc picture if needed */
if (s->width != width || s->height != height) {
if (!(avctx->flags & CODEC_FLAG_DR1))
for(i=0;i<3;i++) {
if (avctx->dr_buffer[i] != s->current_picture[i])
av_freep(&s->current_picture[i]);
avctx->dr_buffer[i] = 0;
}
for(i=0;i<3;i++) {
if (avctx->dr_buffer[i]) {
s->current_picture[i] = avctx->dr_buffer[i];
s->linesize[i] = (i == 0) ? avctx->dr_stride : avctx->dr_uvstride;
} else {
size = width * height;
s->linesize[i] = width;
if (i >= 1) {
size >>= 2;
s->linesize[i] >>= s->sampling_411 ? 2 : 1;
}
s->current_picture[i] = av_malloc(size);
}
if (!s->current_picture[i])
return -1;
}
s->width = width;
s->height = height;
for(i=0;i<3;i++) {
s->current_picture[i] = s->picture.data[i];
s->linesize[i] = s->picture.linesize[i];
if (!s->current_picture[i])
return -1;
}
s->width = width;
s->height = height;
/* for each DIF segment */
buf_ptr = buf;
......@@ -620,12 +595,11 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
emms_c();
/* return image */
*data_size = sizeof(AVPicture);
picture = data;
for(i=0;i<3;i++) {
picture->data[i] = s->current_picture[i];
picture->linesize[i] = s->linesize[i];
}
*data_size = sizeof(AVVideoFrame);
*(AVVideoFrame*)data= s->picture;
avctx->release_buffer(avctx, &s->picture);
return packet_size;
}
......@@ -633,10 +607,15 @@ static int dvvideo_decode_end(AVCodecContext *avctx)
{
DVVideoDecodeContext *s = avctx->priv_data;
int i;
if(avctx->get_buffer == avcodec_default_get_buffer){
for(i=0; i<4; i++){
av_freep(&s->picture.base[i]);
s->picture.data[i]= NULL;
}
av_freep(&s->picture.opaque);
}
for(i=0;i<3;i++)
if (avctx->dr_buffer[i] != s->current_picture[i])
av_freep(&s->current_picture[i]);
return 0;
}
......
......@@ -464,7 +464,7 @@ int score_sum=0;
s->mb_y= mb_y;
for(j=0; j<pred_count; j++){
int score=0;
UINT8 *src= s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize;
UINT8 *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
......@@ -556,8 +556,8 @@ static int is_intra_more_likely(MpegEncContext *s){
if((j%skip_amount) != 0) continue; //skip a few to speed things up
if(s->pict_type==I_TYPE){
UINT8 *mb_ptr = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize;
UINT8 *last_mb_ptr= s->last_picture [0] + mb_x*16 + mb_y*16*s->linesize;
UINT8 *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
UINT8 *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize);
is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize);
......@@ -802,9 +802,9 @@ void ff_error_resilience(MpegEncContext *s){
if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
dest_y = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize;
dest_cb= s->current_picture[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_cr= s->current_picture[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
dc_ptr= &s->dc_val[0][mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2)];
for(n=0; n<4; n++){
......@@ -852,9 +852,9 @@ void ff_error_resilience(MpegEncContext *s){
if(!(s->mb_type[i]&MB_TYPE_INTRA)) continue; //inter
if(!(error&AC_ERROR)) continue; //undamaged
dest_y = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize;
dest_cb= s->current_picture[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_cr= s->current_picture[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
}
......@@ -863,14 +863,14 @@ void ff_error_resilience(MpegEncContext *s){
if(s->avctx->error_concealment&FF_EC_DEBLOCK){
/* filter horizontal block boundaries */
h_block_filter(s, s->current_picture[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
h_block_filter(s, s->current_picture[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
h_block_filter(s, s->current_picture[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
/* filter vertical block boundaries */
v_block_filter(s, s->current_picture[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
v_block_filter(s, s->current_picture[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
v_block_filter(s, s->current_picture[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
}
/* clean a few tables */
......
This diff is collapsed.
......@@ -199,6 +199,7 @@ static int decode_slice(MpegEncContext *s){
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
// s->mb_skiped = 0;
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
ret= s->decode_mb(s, s->block);
......@@ -347,7 +348,7 @@ static int h263_decode_frame(AVCodecContext *avctx,
{
MpegEncContext *s = avctx->priv_data;
int ret,i;
AVPicture *pict = data;
AVVideoFrame *pict = data;
float new_aspect;
#ifdef PRINT_FRAME_TIME
......@@ -357,7 +358,6 @@ uint64_t time= rdtsc();
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
#endif
s->flags= avctx->flags;
*data_size = 0;
......@@ -523,8 +523,9 @@ retry:
return -1;
}
s->avctx->key_frame = (s->pict_type == I_TYPE);
s->avctx->pict_type = s->pict_type;
// for hurry_up==5
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == I_TYPE;
/* skip b frames if we dont have reference frames */
if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size);
......@@ -580,7 +581,9 @@ retry:
}
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE)
if(msmpeg4_decode_ext_header(s, buf_size) < 0) return -1;
if(msmpeg4_decode_ext_header(s, buf_size) < 0){
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
}
/* divx 5.01+ bistream reorder stuff */
if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_version>=500){
......@@ -644,7 +647,7 @@ retry:
int y= mb_y*16 + 8;
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int x= mb_x*16 + 8;
uint8_t *ptr= s->last_picture[0];
uint8_t *ptr= s->last_picture.data[0];
int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
int mx= (s->motion_val[xy][0]>>1) + x;
int my= (s->motion_val[xy][1]>>1) + y;
......@@ -669,21 +672,12 @@ retry:
}
}
#endif
#endif
if(s->pict_type==B_TYPE || (!s->has_b_frames)){
pict->data[0] = s->current_picture[0];
pict->data[1] = s->current_picture[1];
pict->data[2] = s->current_picture[2];
*pict= *(AVVideoFrame*)&s->current_picture;
} else {
pict->data[0] = s->last_picture[0];
pict->data[1] = s->last_picture[1];
pict->data[2] = s->last_picture[2];
*pict= *(AVVideoFrame*)&s->last_picture;
}
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
avctx->quality = s->qscale;
/* Return the Picture timestamp as the frame number */
/* we substract 1 because it is added on utils.c */
......@@ -692,7 +686,7 @@ retry:
/* dont output the last pic after seeking
note we allready added +1 for the current pix in MPV_frame_end(s) */
if(s->num_available_buffers>=2 || (!s->has_b_frames))
*data_size = sizeof(AVPicture);
*data_size = sizeof(AVVideoFrame);
#ifdef PRINT_FRAME_TIME
printf("%Ld\n", rdtsc()-time);
#endif
......
This diff is collapsed.
......@@ -1180,9 +1180,11 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
get_bits(&s->gb, 8), get_bits(&s->gb, 8));
if (get_bits(&s->gb, 8) == 0)
{
s->avctx->aspect_ratio_info = FF_ASPECT_EXTENDED;
s->avctx->aspected_width = get_bits(&s->gb, 16);
s->avctx->aspected_height = get_bits(&s->gb, 16);
int x_density = get_bits(&s->gb, 16);
int y_density = get_bits(&s->gb, 16);
//MN: needs to be checked
s->avctx->aspect_ratio= s->width*y_density/((float)s->height*x_density);
}
else
{
......@@ -1468,7 +1470,7 @@ eoi_parser:
}
/* dummy quality */
/* XXX: infer it with matrix */
avctx->quality = 3;
// avctx->quality = 3;
goto the_end;
}
break;
......@@ -1635,7 +1637,7 @@ read_header:
}
/* dummy quality */
/* XXX: infer it with matrix */
avctx->quality = 3;
// avctx->quality = 3;
return buf_ptr - buf;
}
......
......@@ -92,7 +92,7 @@ static int full_motion_search(MpegEncContext * s,
y2 = yy + range - 1;
if (y2 > ymax)
y2 = ymax;
pix = s->new_picture[0] + (yy * s->linesize) + xx;
pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
dmin = 0x7fffffff;
mx = 0;
my = 0;
......@@ -155,7 +155,7 @@ static int log_motion_search(MpegEncContext * s,
if (y2 > ymax)
y2 = ymax;
pix = s->new_picture[0] + (yy * s->linesize) + xx;
pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
dmin = 0x7fffffff;
mx = 0;
my = 0;
......@@ -231,7 +231,7 @@ static int phods_motion_search(MpegEncContext * s,
if (y2 > ymax)
y2 = ymax;
pix = s->new_picture[0] + (yy * s->linesize) + xx;
pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
mx = 0;
my = 0;
......@@ -560,7 +560,7 @@ static int epzs_motion_search(MpegEncContext * s,
uint16_t *score_map= s->me_score_map;
int map_generation;
new_pic = s->new_picture[0] + pic_xy;
new_pic = s->new_picture.data[0] + pic_xy;
old_pic = ref_picture + pic_xy;
map_generation= update_map_generation(s);
......@@ -649,7 +649,7 @@ static int epzs_motion_search4(MpegEncContext * s, int block,
uint16_t *score_map= s->me_score_map;
int map_generation;
new_pic = s->new_picture[0] + pic_xy;
new_pic = s->new_picture.data[0] + pic_xy;
old_pic = ref_picture + pic_xy;
map_generation= update_map_generation(s);
......@@ -723,7 +723,7 @@ static inline int halfpel_motion_search(MpegEncContext * s,
xx = 16 * s->mb_x + 8*(n&1);
yy = 16 * s->mb_y + 8*(n>>1);
pix = s->new_picture[0] + (yy * s->linesize) + xx;
pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
mx = *mx_ptr;
my = *my_ptr;
......@@ -789,7 +789,7 @@ static inline int fast_halfpel_motion_search(MpegEncContext * s,
xx = 16 * s->mb_x + 8*(n&1);
yy = 16 * s->mb_y + 8*(n>>1);
pix = s->new_picture[0] + (yy * s->linesize) + xx;
pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
mx = *mx_ptr;
my = *my_ptr;
......@@ -931,7 +931,7 @@ static inline int mv4_search(MpegEncContext *s, int xmin, int ymin, int xmax, in
{
int block;
int P[10][2];
uint8_t *ref_picture= s->last_picture[0];
uint8_t *ref_picture= s->last_picture.data[0];
int dmin_sum=0;
for(block=0; block<4; block++){
......@@ -1019,7 +1019,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
int P[10][2];
const int shift= 1+s->quarter_sample;
int mb_type=0;
uint8_t *ref_picture= s->last_picture[0];
uint8_t *ref_picture= s->last_picture.data[0];
Picture * const pic= &s->current_picture;
get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code);
rel_xmin= xmin - mb_x*16;
......@@ -1104,7 +1105,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
xx = mb_x * 16;
yy = mb_y * 16;
pix = s->new_picture[0] + (yy * s->linesize) + xx;
pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
/* At this point (mx,my) are full-pell and the relative displacement */
ppix = ref_picture + ((yy+my) * s->linesize) + (xx+mx);
......@@ -1115,11 +1116,11 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
vard = (s->dsp.pix_norm(pix, ppix, s->linesize)+128)>>8;
//printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
s->mb_var [s->mb_width * mb_y + mb_x] = varc;
s->mc_mb_var[s->mb_width * mb_y + mb_x] = vard;
s->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8;
s->mb_var_sum += varc;
s->mc_mb_var_sum += vard;
pic->mb_var [s->mb_width * mb_y + mb_x] = varc;
pic->mc_mb_var[s->mb_width * mb_y + mb_x] = vard;
pic->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8;
pic->mb_var_sum += varc;
pic->mc_mb_var_sum += vard;
//printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
#if 0
......@@ -1318,7 +1319,7 @@ static inline int check_bidir_mv(MpegEncContext * s,
if (src_y == s->height)
dxy&= 1;
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x;
ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x;
s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
fbmin += (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->qscale;
......@@ -1333,10 +1334,10 @@ static inline int check_bidir_mv(MpegEncContext * s,
if (src_y == s->height)
dxy&= 1;
ptr = s->next_picture[0] + (src_y * s->linesize) + src_x;
ptr = s->next_picture.data[0] + (src_y * s->linesize) + src_x;
s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
fbmin += s->dsp.pix_abs16x16(s->new_picture[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize);
fbmin += s->dsp.pix_abs16x16(s->new_picture.data[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize);
return fbmin;
}
......@@ -1418,7 +1419,7 @@ static inline int direct_search(MpegEncContext * s,
src_y = clip(src_y, -16, height);
if (src_y == height) dxy &= ~2;
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x;
ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x;
s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
dxy = ((motion_by & 1) << 1) | (motion_bx & 1);
......@@ -1511,8 +1512,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
dmin= direct_search(s, mb_x, mb_y);
fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, s->last_picture[0], s->f_code);
bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, s->next_picture[0], s->b_code) - quant;
fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, s->last_picture.data[0], s->f_code);
bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, s->next_picture.data[0], s->b_code) - quant;
//printf(" %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]);
fbmin= bidir_refine(s, mb_x, mb_y);
......@@ -1534,8 +1535,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
type= MB_TYPE_BIDIR;
}
score= ((unsigned)(score*score + 128*256))>>16;
s->mc_mb_var_sum += score;
s->mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSD
s->current_picture.mc_mb_var_sum += score;
s->current_picture.mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSD
}
if(s->flags&CODEC_FLAG_HQ){
......@@ -1581,7 +1582,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
int j;
for(j=0; j<fcode && j<8; j++){
if(s->pict_type==B_TYPE || s->mc_mb_var[i] < s->mb_var[i])
if(s->pict_type==B_TYPE || s->current_picture.mc_mb_var[i] < s->current_picture.mb_var[i])
score[j]-= 170;
}
}
......
......@@ -134,7 +134,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
int n;
UINT64 time_code;
if (s->picture_in_gop_number == 0) {
if (s->current_picture.key_frame) {
/* mpeg1 header repeated every gop */
put_header(s, SEQ_START_CODE);
......@@ -1359,7 +1359,6 @@ static int mpeg_decode_init(AVCodecContext *avctx)
s->mpeg_enc_ctx.picture_number = 0;
s->repeat_field = 0;
s->mpeg_enc_ctx.codec_id= avctx->codec->id;
avctx->mbskip_table= s->mpeg_enc_ctx.mbskip_table;
return 0;
}
......@@ -1403,9 +1402,6 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->pict_type = get_bits(&s->gb, 3);
dprintf("pict_type=%d number=%d\n", s->pict_type, s->picture_number);
avctx->pict_type= s->pict_type;
avctx->key_frame= s->pict_type == I_TYPE;
skip_bits(&s->gb, 16);
if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
s->full_pel[0] = get_bits1(&s->gb);
......@@ -1423,6 +1419,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code;
}
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == I_TYPE;
s->y_dc_scale = 8;
s->c_dc_scale = 8;
s->first_slice = 1;
......@@ -1576,7 +1574,7 @@ static void mpeg_decode_extension(AVCodecContext *avctx,
* DECODE_SLICE_EOP if the end of the picture is reached
*/
static int mpeg_decode_slice(AVCodecContext *avctx,
AVPicture *pict,
AVVideoFrame *pict,
int start_code,
UINT8 *buf, int buf_size)
{
......@@ -1677,38 +1675,25 @@ eos: //end of slice
if (/*s->mb_x == 0 &&*/
s->mb_y == s->mb_height) {
/* end of image */
UINT8 **picture;
if(s->mpeg2)
s->qscale >>=1;
MPV_frame_end(s);
if (s->pict_type == B_TYPE) {
picture = s->current_picture;
avctx->quality = s->qscale;
*pict= *(AVVideoFrame*)&s->current_picture;
} else {
s->picture_number++;
/* latency of 1 frame for I and P frames */
/* XXX: use another variable than picture_number */
if (s->picture_number == 0) {
picture = NULL;
if (s->picture_number == 1) {
return DECODE_SLICE_OK;
} else {
picture = s->last_picture;
avctx->quality = s->last_qscale;
*pict= *(AVVideoFrame*)&s->last_picture;
}
s->last_qscale = s->qscale;
s->picture_number++;
}
if(s->mpeg2)
avctx->quality>>=1;
if (picture) {
pict->data[0] = picture[0];
pict->data[1] = picture[1];
pict->data[2] = picture[2];
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
return DECODE_SLICE_EOP;
} else {
return DECODE_SLICE_OK;
}
return DECODE_SLICE_EOP;
} else {
return DECODE_SLICE_OK;
}
......@@ -1827,7 +1812,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
Mpeg1Context *s = avctx->priv_data;
UINT8 *buf_end, *buf_ptr, *buf_start;
int len, start_code_found, ret, code, start_code, input_size;
AVPicture *picture = data;
AVVideoFrame *picture = data;
MpegEncContext *s2 = &s->mpeg_enc_ctx;
dprintf("fill_buffer\n");
......@@ -1837,13 +1822,9 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
/* special case for last picture */
if (buf_size == 0) {
if (s2->picture_number > 0) {
picture->data[0] = s2->next_picture[0];
picture->data[1] = s2->next_picture[1];
picture->data[2] = s2->next_picture[2];
picture->linesize[0] = s2->linesize;
picture->linesize[1] = s2->uvlinesize;
picture->linesize[2] = s2->uvlinesize;
*data_size = sizeof(AVPicture);
*picture= *(AVVideoFrame*)&s2->next_picture;
*data_size = sizeof(AVVideoFrame);
}
return 0;
}
......
This diff is collapsed.
......@@ -28,6 +28,8 @@ enum OutputFormat {
FMT_MJPEG,
};
#define EDGE_WIDTH 16
#define MPEG_BUF_SIZE (16 * 1024)
#define QMAT_SHIFT_MMX 16
......@@ -35,7 +37,8 @@ enum OutputFormat {
#define MAX_FCODE 7
#define MAX_MV 2048
#define REORDER_BUFFER_SIZE (FF_MAX_B_FRAMES+2)
#define MAX_PICTURE_COUNT 7
#define ME_MAP_SIZE 64
#define ME_MAP_SHIFT 3
......@@ -90,14 +93,6 @@ typedef struct RateControlContext{
int last_non_b_pict_type;
}RateControlContext;
typedef struct ReorderBuffer{
UINT8 *picture[3];
int pict_type;
int qscale;
int force_type;
int picture_number;
int picture_in_gop_number;
} ReorderBuffer;
typedef struct ScanTable{
const UINT8 *scantable;
......@@ -109,6 +104,16 @@ typedef struct ScanTable{
#endif
} ScanTable;
typedef struct Picture{
FF_COMMON_PICTURE
int mb_var_sum; /* sum of MB variance for current frame */
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
uint16_t *mb_var; /* Table for MB variances */
uint16_t *mc_mb_var; /* Table for motion compensated MB variances */
uint8_t *mb_mean; /* Table for MB luminance */
} Picture;
typedef struct ParseContext{
UINT8 *buffer;
int index;
......@@ -145,7 +150,6 @@ typedef struct MpegEncContext {
int max_qdiff; /* max qscale difference between frames */
int encoding; /* true if we are encoding (vs decoding) */
int flags; /* AVCodecContext.flags (HQ, MV4, ...) */
int force_input_type;/* 0= no force, otherwise I_TYPE, P_TYPE, ... */
int max_b_frames; /* max number of b-frames for encoding */
int b_frame_strategy;
int luma_elim_threshold;
......@@ -160,10 +164,7 @@ typedef struct MpegEncContext {
/* sequence parameters */
int context_initialized;
int input_picture_number;
int input_picture_in_gop_number; /* 0-> first pic in gop, ... */
int picture_number;
int fake_picture_number; /* picture number at the bitstream frame rate */
int gop_picture_number; /* index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */
int picture_in_gop_number; /* 0-> first pic in gop, ... */
int b_frames_since_non_b; /* used for encoding, relative to not yet reordered input */
int mb_width, mb_height; /* number of MBs horizontally & vertically */
......@@ -171,20 +172,13 @@ typedef struct MpegEncContext {
int mb_num; /* number of MBs of a picture */
int linesize; /* line size, in bytes, may be different from width */
int uvlinesize; /* line size, for chroma in bytes, may be different from width */
UINT8 *new_picture[3]; /* picture to be compressed */
UINT8 *picture_buffer[REORDER_BUFFER_SIZE][3]; /* internal buffers used for reordering of input pictures */
int picture_buffer_index;
ReorderBuffer coded_order[REORDER_BUFFER_SIZE];
UINT8 *last_picture[3]; /* previous picture */
UINT8 *last_picture_base[3]; /* real start of the picture */
UINT8 *next_picture[3]; /* previous picture (for bidir pred) */
UINT8 *next_picture_base[3]; /* real start of the picture */
UINT8 *aux_picture[3]; /* aux picture (for B frames only) */
UINT8 *aux_picture_base[3]; /* real start of the picture */
UINT8 *current_picture[3]; /* buffer to store the decompressed current picture */
void *last_dr_opaque;
void *next_dr_opaque;
int ip_buffer_count; /* number of buffers, currently only >2 if dr1 is used */
Picture picture[MAX_PICTURE_COUNT]; /* main picture buffer */
Picture *input_picture[MAX_PICTURE_COUNT]; /* next pictures on display order for encoding*/
Picture *reordered_input_picture[MAX_PICTURE_COUNT]; /* pointer to the next pictures in codedorder for encoding*/
Picture last_picture; /* previous picture */
Picture next_picture; /* previous picture (for bidir pred) */
Picture new_picture; /* source picture for encoding */
Picture current_picture; /* buffer to store the decompressed current picture */
int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */
int last_dc[3]; /* last DC values for MPEG1 */
INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */
......@@ -200,17 +194,10 @@ typedef struct MpegEncContext {
UINT8 *mbintra_table; /* used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding */
UINT8 *cbp_table; /* used to store cbp, ac_pred for partitioned decoding */
UINT8 *pred_dir_table; /* used to store pred_dir for partitioned decoding */
INT8 *qscale_table; /* used to store qscale */
INT8 *aux_qscale_table;
INT8 *next_qscale_table;
INT8 *last_qscale_table; //FIXME move these into some picture struct (MpegEncContext.aux.qscale_table[])
UINT8 *edge_emu_buffer;
int input_qscale; /* qscale prior to reordering of frames */
int input_pict_type; /* pict_type prior to reordering of frames */
int force_type; /* 0= no force, otherwise I_TYPE, P_TYPE, ... */
int qscale; /* QP */
float frame_qscale; /* qscale from the frame level rc */
float frame_qscale; /* qscale from the frame level rc FIXME remove*/
int adaptive_quant; /* use adaptive quantization */
int dquant; /* qscale difference to prev qscale */
int pict_type; /* I_TYPE, P_TYPE, B_TYPE, ... */
......@@ -272,9 +259,6 @@ typedef struct MpegEncContext {
int mb_x, mb_y;
int mb_incr;
int mb_intra;
UINT16 *mb_var; /* Table for MB variances */
UINT16 *mc_mb_var; /* Table for motion compensated MB variances */
UINT8 *mb_mean; /* Table for MB luminance */
UINT8 *mb_type; /* Table for MB type */
#define MB_TYPE_INTRA 0x01
#define MB_TYPE_INTER 0x02
......@@ -325,8 +309,6 @@ typedef struct MpegEncContext {
/* bit rate control */
int I_frame_bits; //FIXME used in mpeg12 ...
int mb_var_sum; /* sum of MB variance for current frame */
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
INT64 wanted_bits;
INT64 total_bits;
int frame_bits; /* bits used for the current frame */
......@@ -476,6 +458,10 @@ typedef struct MpegEncContext {
/* decompression specific */
GetBitContext gb;
/* Mpeg1 specific */
int fake_picture_number; /* picture number at the bitstream frame rate */
int gop_picture_number; /* index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */
/* MPEG2 specific - I wish I had not to support this mess. */
int progressive_sequence;
int mpeg_f_code[2][2];
......@@ -498,7 +484,6 @@ typedef struct MpegEncContext {
int mpeg2;
int full_pel[2];
int interlaced_dct;
int last_qscale;
int first_slice;
/* RTP specific */
......
......@@ -759,10 +759,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
}else{
if(n<4){
wrap= s->linesize;
dest= s->current_picture[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
}else{
wrap= s->uvlinesize;
dest= s->current_picture[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
}
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
else a= get_dc(dest-8, wrap, scale*8);
......
......@@ -41,7 +41,7 @@ void ff_write_pass1_stats(MpegEncContext *s){
sprintf(s->avctx->stats_out, "in:%d out:%d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d;\n",
s->picture_number, s->input_picture_number - s->max_b_frames, s->pict_type,
s->frame_qscale, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
s->f_code, s->b_code, s->mc_mb_var_sum, s->mb_var_sum, s->i_count);
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count);
}
int ff_rate_control_init(MpegEncContext *s)
......@@ -475,11 +475,12 @@ static void adaptive_quantization(MpegEncContext *s, double q){
float bits_tab[s->mb_num];
const int qmin= 2; //s->avctx->mb_qmin;
const int qmax= 31; //s->avctx->mb_qmax;
Picture * const pic= &s->current_picture;
for(i=0; i<s->mb_num; i++){
float temp_cplx= sqrt(s->mc_mb_var[i]);
float spat_cplx= sqrt(s->mb_var[i]);
const int lumi= s->mb_mean[i];
float temp_cplx= sqrt(pic->mc_mb_var[i]);
float spat_cplx= sqrt(pic->mb_var[i]);
const int lumi= pic->mb_mean[i];
float bits, cplx, factor;
if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune
......@@ -533,8 +534,8 @@ static void adaptive_quantization(MpegEncContext *s, double q){
newq*= bits_sum/cplx_sum;
}
if(i && ABS(s->qscale_table[i-1] - newq)<0.75)
intq= s->qscale_table[i-1];
if(i && ABS(pic->qscale_table[i-1] - newq)<0.75)
intq= pic->qscale_table[i-1];
else
intq= (int)(newq + 0.5);
......@@ -542,7 +543,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){
else if(intq < qmin) intq= qmin;
//if(i%s->mb_width==0) printf("\n");
//printf("%2d%3d ", intq, ff_sqrt(s->mc_mb_var[i]));
s->qscale_table[i]= intq;
pic->qscale_table[i]= intq;
}
}
......@@ -562,6 +563,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
double rate_factor;
int var;
const int pict_type= s->pict_type;
Picture * const pic= &s->current_picture;
emms_c();
get_qminmax(&qmin, &qmax, s, pict_type);
......@@ -588,7 +590,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance;
if(br_compensation<=0.0) br_compensation=0.001;
var= pict_type == I_TYPE ? s->mb_var_sum : s->mc_mb_var_sum;
var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
if(s->flags&CODEC_FLAG_PASS2){
if(pict_type!=I_TYPE)
......@@ -599,8 +601,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
}else{
rce->pict_type=
rce->new_pict_type= pict_type;
rce->mc_mb_var_sum= s->mc_mb_var_sum;
rce->mb_var_sum = s-> mb_var_sum;
rce->mc_mb_var_sum= pic->mc_mb_var_sum;
rce->mb_var_sum = pic-> mb_var_sum;
rce->qscale = 2;
rce->f_code = s->f_code;
rce->b_code = s->b_code;
......@@ -663,10 +665,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
else if(q>qmax) q=qmax;
// printf("%f %d %d %d\n", q, picture_number, (int)wanted_bits, (int)s->total_bits);
//printf("%f %f %f\n", q, br_compensation, short_term_q);
//printf("q:%d diff:%d comp:%f st_q:%f last_size:%d type:%d\n", qscale, (int)diff, br_compensation,
//printf("diff:%d comp:%f st_q:%f last_size:%d type:%d\n", (int)diff, br_compensation,
// short_term_q, s->frame_bits, pict_type);
//printf("%d %d\n", s->bit_rate, (int)fps);
......@@ -676,8 +676,16 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
q= (int)(q + 0.5);
rcc->last_qscale= q;
rcc->last_mc_mb_var_sum= s->mc_mb_var_sum;
rcc->last_mb_var_sum= s->mb_var_sum;
rcc->last_mc_mb_var_sum= pic->mc_mb_var_sum;
rcc->last_mb_var_sum= pic->mb_var_sum;
#if 0
{
static int mvsum=0, texsum=0;
mvsum += s->mv_bits;
texsum += s->i_tex_bits + s->p_tex_bits;
printf("%d %d//\n\n", mvsum, texsum);
}
#endif
return q;
}
......
......@@ -472,7 +472,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
{
MpegEncContext *s = avctx->priv_data;
int i;
AVPicture *pict = data;
AVVideoFrame *pict = data;
#ifdef DEBUG
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
......@@ -505,15 +505,9 @@ static int rv10_decode_frame(AVCodecContext *avctx,
if(s->mb_y>=s->mb_height){
MPV_frame_end(s);
pict->data[0] = s->current_picture[0];
pict->data[1] = s->current_picture[1];
pict->data[2] = s->current_picture[2];
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
*pict= *(AVVideoFrame*)&s->current_picture;
avctx->quality = s->qscale;
*data_size = sizeof(AVPicture);
*data_size = sizeof(AVVideoFrame);
}else{
*data_size = 0;
}
......
......@@ -1063,7 +1063,7 @@ static int svq1_decode_frame(AVCodecContext *avctx,
MpegEncContext *s=avctx->priv_data;
uint8_t *current, *previous;
int result, i, x, y, width, height;
AVPicture *pict = data;
AVVideoFrame *pict = data;
/* initialize bit buffer */
init_get_bits(&s->gb,buf,buf_size);
......@@ -1084,9 +1084,6 @@ static int svq1_decode_frame(AVCodecContext *avctx,
}
result = svq1_decode_frame_header (&s->gb, s);
if(MPV_frame_start(s, avctx) < 0)
return -1;
if (result != 0)
{
......@@ -1098,6 +1095,9 @@ static int svq1_decode_frame(AVCodecContext *avctx,
if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
if(MPV_frame_start(s, avctx) < 0)
return -1;
/* decode y, u and v components */
for (i=0; i < 3; i++) {
int linesize;
......@@ -1112,12 +1112,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
linesize= s->uvlinesize;
}
current = s->current_picture[i];
current = s->current_picture.data[i];
if(s->pict_type==B_TYPE){
previous = s->next_picture[i];
previous = s->next_picture.data[i];
}else{
previous = s->last_picture[i];
previous = s->last_picture.data[i];
}
if (s->pict_type == I_TYPE) {
......@@ -1159,12 +1159,14 @@ static int svq1_decode_frame(AVCodecContext *avctx,
current += 16*linesize;
}
}
pict->data[i] = s->current_picture[i];
pict->linesize[i] = linesize;
}
*pict = *(AVVideoFrame*)&s->current_picture;
*data_size=sizeof(AVPicture);
MPV_frame_end(s);
*data_size=sizeof(AVVideoFrame);
return buf_size;
}
......@@ -1176,7 +1178,6 @@ static int svq1_decode_init(AVCodecContext *avctx)
s->width = (avctx->width+3)&~3;
s->height = (avctx->height+3)&~3;
s->codec_id= avctx->codec->id;
avctx->mbskip_table= s->mbskip_table;
avctx->pix_fmt = PIX_FMT_YUV410P;
avctx->has_b_frames= s->has_b_frames=1; // not true, but DP frames and these behave like unidirectional b frames
s->flags= avctx->flags;
......
......@@ -86,6 +86,123 @@ void register_avcodec(AVCodec *format)
format->next = NULL;
}
void avcodec_get_chroma_sub_sample(int fmt, int *h_shift, int *v_shift){
switch(fmt){
case PIX_FMT_YUV410P:
*h_shift=2;
*v_shift=2;
break;
case PIX_FMT_YUV420P:
*h_shift=1;
*v_shift=1;
break;
case PIX_FMT_YUV411P:
*h_shift=2;
*v_shift=0;
break;
case PIX_FMT_YUV422P:
case PIX_FMT_YUV422:
*h_shift=1;
*v_shift=0;
break;
default: //RGB/...
*h_shift=0;
*v_shift=0;
break;
}
}
typedef struct DefaultPicOpaque{
int last_pic_num;
uint8_t *data[4];
}DefaultPicOpaque;
int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
int i;
const int width = s->width;
const int height= s->height;
DefaultPicOpaque *opaque;
if(pic->opaque){
opaque= (DefaultPicOpaque *)pic->opaque;
for(i=0; i<3; i++)
pic->data[i]= opaque->data[i];
// printf("get_buffer %X coded_pic_num:%d last:%d\n", pic->opaque, pic->coded_picture_number, opaque->last_pic_num);
pic->age= pic->coded_picture_number - opaque->last_pic_num;
opaque->last_pic_num= pic->coded_picture_number;
//printf("age: %d %d %d\n", pic->age, c->picture_number, pic->coded_picture_number);
}else{
int align, h_chroma_shift, v_chroma_shift;
int w, h, pixel_size;
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
switch(s->pix_fmt){
case PIX_FMT_YUV422:
pixel_size=2;
break;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
pixel_size=3;
break;
case PIX_FMT_BGRA32:
case PIX_FMT_RGBA32:
pixel_size=4;
break;
default:
pixel_size=1;
}
if(s->codec_id==CODEC_ID_SVQ1) align=63;
else align=15;
w= (width +align)&~align;
h= (height+align)&~align;
if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
w+= EDGE_WIDTH*2;
h+= EDGE_WIDTH*2;
}
opaque= av_mallocz(sizeof(DefaultPicOpaque));
if(opaque==NULL) return -1;
pic->opaque= opaque;
opaque->last_pic_num= -256*256*256*64;
for(i=0; i<3; i++){
int h_shift= i==0 ? 0 : h_chroma_shift;
int v_shift= i==0 ? 0 : v_chroma_shift;
pic->linesize[i]= pixel_size*w>>h_shift;
pic->base[i]= av_mallocz((pic->linesize[i]*h>>v_shift)+16); //FIXME 16
if(pic->base[i]==NULL) return -1;
memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift);
if(s->flags&CODEC_FLAG_EMU_EDGE)
pic->data[i] = pic->base[i];
else
pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift);
opaque->data[i]= pic->data[i];
}
pic->age= 256*256*256*64;
}
return 0;
}
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){
int i;
for(i=0; i<3; i++)
pic->data[i]=NULL;
//printf("R%X\n", pic->opaque);
}
void avcodec_get_context_defaults(AVCodecContext *s){
s->bit_rate= 800*1000;
s->bit_rate_tolerance= s->bit_rate*10;
......@@ -104,6 +221,8 @@ void avcodec_get_context_defaults(AVCodecContext *s){
s->frame_rate = 25 * FRAME_RATE_BASE;
s->gop_size= 50;
s->me_method= ME_EPZS;
s->get_buffer= avcodec_default_get_buffer;
s->release_buffer= avcodec_default_release_buffer;
}
/**
......@@ -120,6 +239,16 @@ AVCodecContext *avcodec_alloc_context(void){
return avctx;
}
/**
* allocates a AVPicture and set it to defaults.
* this can be deallocated by simply calling free()
*/
AVVideoFrame *avcodec_alloc_picture(void){
AVVideoFrame *pic= av_mallocz(sizeof(AVVideoFrame));
return pic;
}
int avcodec_open(AVCodecContext *avctx, AVCodec *codec)
{
int ret;
......@@ -152,7 +281,7 @@ int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
}
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
const AVPicture *pict)
const AVVideoFrame *pict)
{
int ret;
......@@ -167,17 +296,17 @@ int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
/* decode a frame. return -1 if error, otherwise return the number of
bytes used. If no frame could be decompressed, *got_picture_ptr is
zero. Otherwise, it is non zero */
int avcodec_decode_video(AVCodecContext *avctx, AVPicture *picture,
int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture,
int *got_picture_ptr,
UINT8 *buf, int buf_size)
{
int ret;
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
buf, buf_size);
emms_c(); //needed to avoid a emms_c() call before every return;
if (*got_picture_ptr)
avctx->frame_number++;
return ret;
......
......@@ -556,7 +556,7 @@ static void put_frame_header(AVFormatContext *s, ASFStream *stream, int timestam
int val;
val = stream->num;
if (s->streams[val - 1]->codec.key_frame /* && frag_offset == 0 */)
if (s->streams[val - 1]->codec.coded_picture->key_frame /* && frag_offset == 0 */)
val |= 0x80;
put_byte(pb, val);
put_byte(pb, stream->seq);
......@@ -793,6 +793,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream));
if (!st)
goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[s->nb_streams] = st;
asf_st = av_mallocz(sizeof(ASFStream));
if (!asf_st)
......
......@@ -143,6 +143,8 @@ static int au_read_header(AVFormatContext *s,
st = av_malloc(sizeof(AVStream));
if (!st)
return -1;
avcodec_get_context_defaults(&st->codec);
s->nb_streams = 1;
s->streams[0] = st;
......
......@@ -144,6 +144,9 @@ typedef struct AVStream {
AVFrac pts;
/* ffmpeg.c private use */
int stream_copy; /* if TRUE, just copy stream */
/* quality, as it has been removed from AVCodecContext and put in AVVideoFrame
* MN:dunno if thats the right place, for it */
float quality;
} AVStream;
#define MAX_STREAMS 20
......
......@@ -103,6 +103,8 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
AVStream *st = av_mallocz(sizeof(AVStream));
if (!st)
goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[i] = st;
}
url_fskip(pb, size - 7 * 4);
......
......@@ -320,7 +320,7 @@ static int avi_write_packet(AVFormatContext *s, int stream_index,
if (enc->codec_type == CODEC_TYPE_VIDEO) {
tag[2] = 'd';
tag[3] = 'c';
flags = enc->key_frame ? 0x10 : 0x00;
flags = enc->coded_picture->key_frame ? 0x10 : 0x00;
} else {
tag[2] = 'w';
tag[3] = 'b';
......
......@@ -151,7 +151,7 @@ static int ffm_write_header(AVFormatContext *s)
put_be32(pb, codec->codec_id);
put_byte(pb, codec->codec_type);
put_be32(pb, codec->bit_rate);
put_be32(pb, codec->quality);
put_be32(pb, st->quality);
put_be32(pb, codec->flags);
/* specific info */
switch(codec->codec_type) {
......@@ -232,7 +232,7 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index,
/* packet size & key_frame */
header[0] = stream_index;
header[1] = 0;
if (st->codec.key_frame)
if (st->codec.coded_picture->key_frame)
header[1] |= FLAG_KEY_FRAME;
header[2] = (size >> 16) & 0xff;
header[3] = (size >> 8) & 0xff;
......@@ -394,6 +394,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream));
if (!st)
goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[i] = st;
fst = av_mallocz(sizeof(FFMStream));
if (!fst)
......@@ -405,7 +406,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec.codec_id = get_be32(pb);
st->codec.codec_type = get_byte(pb); /* codec_type */
codec->bit_rate = get_be32(pb);
codec->quality = get_be32(pb);
st->quality = get_be32(pb);
codec->flags = get_be32(pb);
/* specific info */
switch(codec->codec_type) {
......
......@@ -170,6 +170,8 @@ static int jpeg_read_header(AVFormatContext *s1, AVFormatParameters *ap)
av_free(s);
return -ENOMEM;
}
avcodec_get_context_defaults(&st->codec);
s1->streams[0] = st;
s->img_number = 0;
......
......@@ -352,7 +352,7 @@ static int rm_write_video(AVFormatContext *s, UINT8 *buf, int size)
RMContext *rm = s->priv_data;
ByteIOContext *pb = &s->pb;
StreamInfo *stream = rm->video_stream;
int key_frame = stream->enc->key_frame;
int key_frame = stream->enc->coded_picture->key_frame;
/* XXX: this is incorrect: should be a parameter */
......@@ -527,6 +527,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream));
if (!st)
goto fail;
avcodec_get_context_defaults(&st->codec);
s->streams[s->nb_streams++] = st;
st->id = get_be16(pb);
get_be32(pb); /* max bit rate */
......
......@@ -482,6 +482,8 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
st = av_mallocz(sizeof(AVStream));
if (!st)
return -ENOMEM;
avcodec_get_context_defaults(&st->codec);
if (v & 0x01)
st->codec.channels = 2;
else
......
......@@ -458,7 +458,7 @@ int av_find_stream_info(AVFormatContext *ic)
AVCodec *codec;
AVStream *st;
AVPacket *pkt;
AVPicture picture;
AVVideoFrame picture;
AVPacketList *pktl=NULL, **ppktl;
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
UINT8 *ptr;
......@@ -694,6 +694,8 @@ AVStream *av_new_stream(AVFormatContext *s, int id)
st = av_mallocz(sizeof(AVStream));
if (!st)
return NULL;
avcodec_get_context_defaults(&st->codec);
st->index = s->nb_streams;
st->id = id;
s->streams[s->nb_streams++] = st;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment