Commit 6a56f4e6 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  flicvideo: fix invalid reads
  vorbis: Avoid some out-of-bounds reads
  vqf: add more known extensions
  cabac: remove unused function renorm_cabac_decoder
  h264: Only use symbols from the SVQ3 decoder under proper conditionals.
  add bytestream2_tell() and bytestream2_seek() functions
  parsers: initialize MpegEncContext.slice_context_count to 1
  spdifenc: use special alignment for DTS-HD length_code

Conflicts:
	libavcodec/flicvideo.c
	libavcodec/h264.c
	libavcodec/mpeg4video_parser.c
	libavcodec/vorbis.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 12d8340f b348c852
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
typedef struct { typedef struct {
const uint8_t *buffer, *buffer_end; const uint8_t *buffer, *buffer_end, *buffer_start;
} GetByteContext; } GetByteContext;
#define DEF_T(type, name, bytes, read, write) \ #define DEF_T(type, name, bytes, read, write) \
...@@ -79,6 +79,7 @@ static av_always_inline void bytestream2_init(GetByteContext *g, ...@@ -79,6 +79,7 @@ static av_always_inline void bytestream2_init(GetByteContext *g,
const uint8_t *buf, int buf_size) const uint8_t *buf, int buf_size)
{ {
g->buffer = buf; g->buffer = buf;
g->buffer_start = buf;
g->buffer_end = buf + buf_size; g->buffer_end = buf + buf_size;
} }
...@@ -93,6 +94,34 @@ static av_always_inline void bytestream2_skip(GetByteContext *g, ...@@ -93,6 +94,34 @@ static av_always_inline void bytestream2_skip(GetByteContext *g,
g->buffer += FFMIN(g->buffer_end - g->buffer, size); g->buffer += FFMIN(g->buffer_end - g->buffer, size);
} }
static av_always_inline int bytestream2_tell(GetByteContext *g)
{
return (int)(g->buffer - g->buffer_start);
}
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
int whence)
{
switch (whence) {
case SEEK_CUR:
offset = av_clip(offset, -(g->buffer - g->buffer_start),
g->buffer_end - g->buffer);
g->buffer += offset;
break;
case SEEK_END:
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
g->buffer = g->buffer_end + offset;
break;
case SEEK_SET:
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
g->buffer = g->buffer_start + offset;
break;
default:
return AVERROR(EINVAL);
}
return bytestream2_tell(g);
}
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
uint8_t *dst, uint8_t *dst,
unsigned int size) unsigned int size)
......
...@@ -97,15 +97,6 @@ static void refill(CABACContext *c){ ...@@ -97,15 +97,6 @@ static void refill(CABACContext *c){
c->bytestream+= CABAC_BITS/8; c->bytestream+= CABAC_BITS/8;
} }
static inline void renorm_cabac_decoder(CABACContext *c){
while(c->range < 0x100){
c->range+= c->range;
c->low+= c->low;
if(!(c->low & CABAC_MASK))
refill(c);
}
}
static inline void renorm_cabac_decoder_once(CABACContext *c){ static inline void renorm_cabac_decoder_once(CABACContext *c){
int shift= (uint32_t)(c->range - 0x100)>>31; int shift= (uint32_t)(c->range - 0x100)>>31;
c->range<<= shift; c->range<<= shift;
......
This diff is collapsed.
...@@ -1818,7 +1818,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_ty ...@@ -1818,7 +1818,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_ty
idct_dc_add(ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); idct_dc_add(ptr, h->mb + (i*16+p*256 << pixel_shift), linesize);
else else
idct_add (ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); idct_add (ptr, h->mb + (i*16+p*256 << pixel_shift), linesize);
}else if(CONFIG_SVQ3_DECODER) } else if (CONFIG_SVQ3_DECODER)
ff_svq3_add_idct_c(ptr, h->mb + i*16+p*256, linesize, qscale, 0); ff_svq3_add_idct_c(ptr, h->mb + i*16+p*256, linesize, qscale, 0);
} }
} }
...@@ -1838,7 +1838,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_ty ...@@ -1838,7 +1838,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_ty
dctcoef_set(h->mb+(p*256 << pixel_shift), pixel_shift, dc_mapping[i], dctcoef_get(h->mb_luma_dc[p], pixel_shift, i)); dctcoef_set(h->mb+(p*256 << pixel_shift), pixel_shift, dc_mapping[i], dctcoef_get(h->mb_luma_dc[p], pixel_shift, i));
} }
} }
}else if(CONFIG_SVQ3_DECODER) } else if (CONFIG_SVQ3_DECODER)
ff_svq3_luma_dc_dequant_idct_c(h->mb+p*256, h->mb_luma_dc[p], qscale); ff_svq3_luma_dc_dequant_idct_c(h->mb+p*256, h->mb_luma_dc[p], qscale);
} }
} }
...@@ -1882,7 +1882,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, ...@@ -1882,7 +1882,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type,
} }
} }
} }
}else if(CONFIG_SVQ3_DECODER) { } else if (CONFIG_SVQ3_DECODER) {
for(i=0; i<16; i++){ for(i=0; i<16; i++){
if(h->non_zero_count_cache[ scan8[i+p*16] ] || h->mb[i*16+p*256]){ //FIXME benchmark weird rule, & below if(h->non_zero_count_cache[ scan8[i+p*16] ] || h->mb[i*16+p*256]){ //FIXME benchmark weird rule, & below
uint8_t * const ptr= dest_y + block_offset[i]; uint8_t * const ptr= dest_y + block_offset[i];
...@@ -2076,9 +2076,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i ...@@ -2076,9 +2076,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
h->h264dsp.h264_idct_add8(dest, block_offset, h->h264dsp.h264_idct_add8(dest, block_offset,
h->mb, uvlinesize, h->mb, uvlinesize,
h->non_zero_count_cache); h->non_zero_count_cache);
} } else if (CONFIG_SVQ3_DECODER) {
#if CONFIG_SVQ3_DECODER
else{
h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16*16*1, h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]); h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16*16*1, h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16*16*2, h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]); h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16*16*2, h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
for(j=1; j<3; j++){ for(j=1; j<3; j++){
...@@ -2090,7 +2088,6 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i ...@@ -2090,7 +2088,6 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
} }
} }
} }
#endif
} }
} }
} }
......
...@@ -376,6 +376,7 @@ static int init(AVCodecParserContext *s) ...@@ -376,6 +376,7 @@ static int init(AVCodecParserContext *s)
{ {
H264Context *h = s->priv_data; H264Context *h = s->priv_data;
h->thread_context[0] = h; h->thread_context[0] = h;
h->s.slice_context_count = 1;
return 0; return 0;
} }
......
...@@ -102,6 +102,7 @@ static av_cold int mpeg4video_parse_init(AVCodecParserContext *s) ...@@ -102,6 +102,7 @@ static av_cold int mpeg4video_parse_init(AVCodecParserContext *s)
return -1; return -1;
pc->first_picture = 1; pc->first_picture = 1;
pc->enc->quant_precision=5; pc->enc->quant_precision=5;
pc->enc->slice_context_count = 1;
return 0; return 0;
} }
......
...@@ -184,9 +184,17 @@ static int vc1_split(AVCodecContext *avctx, ...@@ -184,9 +184,17 @@ static int vc1_split(AVCodecContext *avctx,
return 0; return 0;
} }
static int vc1_parse_init(AVCodecParserContext *s)
{
VC1ParseContext *vpc = s->priv_data;
vpc->v.s.slice_context_count = 1;
return 0;
}
AVCodecParser ff_vc1_parser = { AVCodecParser ff_vc1_parser = {
.codec_ids = { CODEC_ID_VC1 }, .codec_ids = { CODEC_ID_VC1 },
.priv_data_size = sizeof(VC1ParseContext), .priv_data_size = sizeof(VC1ParseContext),
.parser_init = vc1_parse_init,
.parser_parse = vc1_parse, .parser_parse = vc1_parse,
.parser_close = ff_parse1_close, .parser_close = ff_parse1_close,
.split = vc1_split, .split = vc1_split,
......
...@@ -156,7 +156,7 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values) ...@@ -156,7 +156,7 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
} }
} }
static inline void render_line_unrolled(intptr_t x, unsigned char y, int x1, static inline void render_line_unrolled(intptr_t x, uint8_t y, int x1,
intptr_t sy, int ady, int adx, intptr_t sy, int ady, int adx,
float *buf) float *buf)
{ {
...@@ -179,7 +179,7 @@ static inline void render_line_unrolled(intptr_t x, unsigned char y, int x1, ...@@ -179,7 +179,7 @@ static inline void render_line_unrolled(intptr_t x, unsigned char y, int x1,
} }
} }
static void render_line(int x0, unsigned char y0, int x1, int y1, float *buf) static void render_line(int x0, uint8_t y0, int x1, int y1, float *buf)
{ {
int dy = y1 - y0; int dy = y1 - y0;
int adx = x1 - x0; int adx = x1 - x0;
...@@ -191,7 +191,7 @@ static void render_line(int x0, unsigned char y0, int x1, int y1, float *buf) ...@@ -191,7 +191,7 @@ static void render_line(int x0, unsigned char y0, int x1, int y1, float *buf)
} else { } else {
int base = dy / adx; int base = dy / adx;
int x = x0; int x = x0;
unsigned char y = y0; uint8_t y = y0;
int err = -adx; int err = -adx;
ady -= FFABS(base) * adx; ady -= FFABS(base) * adx;
while (++x < x1) { while (++x < x1) {
...@@ -210,7 +210,8 @@ void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values, ...@@ -210,7 +210,8 @@ void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values,
uint16_t *y_list, int *flag, uint16_t *y_list, int *flag,
int multiplier, float *out, int samples) int multiplier, float *out, int samples)
{ {
int lx, ly, i; int lx, i;
uint8_t ly;
lx = 0; lx = 0;
ly = y_list[0] * multiplier; ly = y_list[0] * multiplier;
for (i = 1; i < values; i++) { for (i = 1; i < values; i++) {
......
...@@ -265,5 +265,5 @@ AVInputFormat ff_vqf_demuxer = { ...@@ -265,5 +265,5 @@ AVInputFormat ff_vqf_demuxer = {
.read_header = vqf_read_header, .read_header = vqf_read_header,
.read_packet = vqf_read_packet, .read_packet = vqf_read_packet,
.read_seek = vqf_read_seek, .read_seek = vqf_read_seek,
.extensions = "vqf", .extensions = "vqf,vql,vqe",
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment