Commit 184fc600 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  mpegvideo_enc: only allocate output packet when we know there will be output
  Add names for more channel layouts to the channel layout map.
  sunrast: Add a sample request for RMP_RAW colormap.
  avcodec: do not override pts or duration from the audio encoder
  Add prores regression test.
  Enable already existing rso regression test.
  Add regression test for "sox" format muxer/demuxer.
  Add dpx encoding regression test.
  swscale: K&R formatting cosmetics for PowerPC code (part I/II)
  img2: Use ff_guess_image2_codec(filename) shorthand where appropriate.
  Clarify licensing information about files borrowed from libjpeg.
  Mark mutable static data const where appropriate.
  avplay: fix -threads option
  dvbsubdec: avoid undefined signed left shift in RGBA macro
  mlpdec: use av_log_ask_for_sample()
  gif: K&R formatting cosmetics
  png: make .long_name more descriptive
  movdec: Adjust keyframe flagging in fragmented files
  rv34: change most "int stride" into "ptrdiff_t stride".

Conflicts:
	avprobe.c
	ffplay.c
	libavcodec/mlpdec.c
	libavcodec/mpegvideo_enc.c
	libavcodec/pngenc.c
	libavcodec/x86/v210-init.c
	libavfilter/vf_boxblur.c
	libavfilter/vf_crop.c
	libavfilter/vf_drawtext.c
	libavfilter/vf_lut.c
	libavfilter/vf_overlay.c
	libavfilter/vf_pad.c
	libavfilter/vf_scale.c
	libavfilter/vf_select.c
	libavfilter/vf_setpts.c
	libavfilter/vf_settb.c
	libavformat/img2.c
	libavutil/audioconvert.c
	tests/codec-regression.sh
	tests/lavf-regression.sh
	tests/ref/lavf/dpx
	tests/ref/vsynth1/prores
	tests/ref/vsynth2/prores
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 92b5f71a 5d42ac7f
...@@ -20,8 +20,12 @@ Specifically, the GPL parts of FFmpeg are ...@@ -20,8 +20,12 @@ Specifically, the GPL parts of FFmpeg are
There are a handful of files under other licensing terms, namely: There are a handful of files under other licensing terms, namely:
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint.c, libavcodec/jrevdct.c * The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
are taken from libjpeg, see the top of the files for licensing details. libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
licensing details. Specifically note that you must credit the IJG in the
documentation accompanying your program if you only distribute executables.
You must also indicate any changes including additions and deletions to
those three files in the documentation.
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
the configure parameter --enable-version3 will activate this licensing option the configure parameter --enable-version3 will activate this licensing option
......
...@@ -68,13 +68,13 @@ static const OptionDef options[]; ...@@ -68,13 +68,13 @@ static const OptionDef options[];
static const char *input_filename; static const char *input_filename;
static AVInputFormat *iformat = NULL; static AVInputFormat *iformat = NULL;
static const char *binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" }; static const char *const binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
static const char *decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P" }; static const char *const decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P" };
static const char *unit_second_str = "s" ; static const char unit_second_str[] = "s" ;
static const char *unit_hertz_str = "Hz" ; static const char unit_hertz_str[] = "Hz" ;
static const char *unit_byte_str = "byte" ; static const char unit_byte_str[] = "byte" ;
static const char *unit_bit_per_second_str = "bit/s"; static const char unit_bit_per_second_str[] = "bit/s";
static uint64_t *nb_streams_packets; static uint64_t *nb_streams_packets;
static uint64_t *nb_streams_frames; static uint64_t *nb_streams_frames;
......
...@@ -1872,7 +1872,7 @@ static int http_parse_request(HTTPContext *c) ...@@ -1872,7 +1872,7 @@ static int http_parse_request(HTTPContext *c)
static void fmt_bytecount(AVIOContext *pb, int64_t count) static void fmt_bytecount(AVIOContext *pb, int64_t count)
{ {
static const char *suffix = " kMGTP"; static const char suffix[] = " kMGTP";
const char *s; const char *s;
for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++); for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++);
......
...@@ -1036,7 +1036,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, ...@@ -1036,7 +1036,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
int offset[3] = { global_gain, global_gain - 90, 0 }; int offset[3] = { global_gain, global_gain - 90, 0 };
int clipped_offset; int clipped_offset;
int noise_flag = 1; int noise_flag = 1;
static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" }; static const char *const sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" };
for (g = 0; g < ics->num_window_groups; g++) { for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb;) { for (i = 0; i < ics->max_sfb;) {
int run_end = band_type_run_end[idx]; int run_end = band_type_run_end[idx];
......
...@@ -27,8 +27,8 @@ void ff_rv34_inv_transform_noround_neon(DCTELEM *block); ...@@ -27,8 +27,8 @@ void ff_rv34_inv_transform_noround_neon(DCTELEM *block);
void ff_rv34_inv_transform_noround_dc_neon(DCTELEM *block); void ff_rv34_inv_transform_noround_dc_neon(DCTELEM *block);
void ff_rv34_idct_add_neon(uint8_t *dst, int stride, DCTELEM *block); void ff_rv34_idct_add_neon(uint8_t *dst, ptrdiff_t stride, DCTELEM *block);
void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc); void ff_rv34_idct_dc_add_neon(uint8_t *dst, ptrdiff_t stride, int dc);
void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp) void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
{ {
......
...@@ -51,20 +51,20 @@ void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int); ...@@ -51,20 +51,20 @@ void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int); void ff_avg_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int); void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_rv40_weight_func_16_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int); void ff_rv40_weight_func_16_neon(uint8_t *, uint8_t *, uint8_t *, int, int, ptrdiff_t);
void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int); void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, ptrdiff_t);
int ff_rv40_h_loop_filter_strength_neon(uint8_t *src, int stride, int ff_rv40_h_loop_filter_strength_neon(uint8_t *src, ptrdiff_t stride,
int beta, int beta2, int edge, int beta, int beta2, int edge,
int *p1, int *q1); int *p1, int *q1);
int ff_rv40_v_loop_filter_strength_neon(uint8_t *src, int stride, int ff_rv40_v_loop_filter_strength_neon(uint8_t *src, ptrdiff_t stride,
int beta, int beta2, int edge, int beta, int beta2, int edge,
int *p1, int *q1); int *p1, int *q1);
void ff_rv40_h_weak_loop_filter_neon(uint8_t *src, int stride, int filter_p1, void ff_rv40_h_weak_loop_filter_neon(uint8_t *src, ptrdiff_t stride, int filter_p1,
int filter_q1, int alpha, int beta, int filter_q1, int alpha, int beta,
int lim_p0q0, int lim_q1, int lim_p1); int lim_p0q0, int lim_q1, int lim_p1);
void ff_rv40_v_weak_loop_filter_neon(uint8_t *src, int stride, int filter_p1, void ff_rv40_v_weak_loop_filter_neon(uint8_t *src, ptrdiff_t stride, int filter_p1,
int filter_q1, int alpha, int beta, int filter_q1, int alpha, int beta,
int lim_p0q0, int lim_q1, int lim_p1); int lim_p0q0, int lim_q1, int lim_p1);
......
...@@ -150,7 +150,7 @@ static void png_save2(const char *filename, uint32_t *bitmap, int w, int h) ...@@ -150,7 +150,7 @@ static void png_save2(const char *filename, uint32_t *bitmap, int w, int h)
} }
#endif #endif
#define RGBA(r,g,b,a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b)) #define RGBA(r,g,b,a) (((unsigned)(a) << 24) | ((r) << 16) | ((g) << 8) | (b))
typedef struct DVBSubCLUT { typedef struct DVBSubCLUT {
int id; int id;
......
...@@ -77,7 +77,7 @@ typedef struct VP8EncoderContext { ...@@ -77,7 +77,7 @@ typedef struct VP8EncoderContext {
} VP8Context; } VP8Context;
/** String mappings for enum vp8e_enc_control_id */ /** String mappings for enum vp8e_enc_control_id */
static const char *ctlidstr[] = { static const char *const ctlidstr[] = {
[VP8E_UPD_ENTROPY] = "VP8E_UPD_ENTROPY", [VP8E_UPD_ENTROPY] = "VP8E_UPD_ENTROPY",
[VP8E_UPD_REFERENCE] = "VP8E_UPD_REFERENCE", [VP8E_UPD_REFERENCE] = "VP8E_UPD_REFERENCE",
[VP8E_USE_REFERENCE] = "VP8E_USE_REFERENCE", [VP8E_USE_REFERENCE] = "VP8E_USE_REFERENCE",
......
...@@ -48,7 +48,7 @@ int ff_xvid_rate_control_init(MpegEncContext *s){ ...@@ -48,7 +48,7 @@ int ff_xvid_rate_control_init(MpegEncContext *s){
} }
for(i=0; i<s->rc_context.num_entries; i++){ for(i=0; i<s->rc_context.num_entries; i++){
static const char *frame_types = " ipbs"; static const char frame_types[] = " ipbs";
char tmp[256]; char tmp[256];
RateControlEntry *rce; RateControlEntry *rce;
......
...@@ -38,12 +38,6 @@ ...@@ -38,12 +38,6 @@
/** number of bits used for VLC lookup - longest Huffman code is 9 */ /** number of bits used for VLC lookup - longest Huffman code is 9 */
#define VLC_BITS 9 #define VLC_BITS 9
static const char* sample_message =
"Please file a bug report following the instructions at "
"http://ffmpeg.org/bugreports.html and include "
"a sample of this file.";
typedef struct SubStream { typedef struct SubStream {
/// Set if a valid restart header has been read. Otherwise the substream cannot be decoded. /// Set if a valid restart header has been read. Otherwise the substream cannot be decoded.
uint8_t restart_seen; uint8_t restart_seen;
...@@ -308,10 +302,10 @@ static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb) ...@@ -308,10 +302,10 @@ static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (mh.num_substreams > MAX_SUBSTREAMS) { if (mh.num_substreams > MAX_SUBSTREAMS) {
av_log(m->avctx, AV_LOG_ERROR, av_log_ask_for_sample(m->avctx,
"Number of substreams %d is larger than the maximum supported " "Number of substreams %d is larger than the maximum supported "
"by the decoder. %s\n", mh.num_substreams, sample_message); "by the decoder.\n", mh.num_substreams);
return AVERROR_INVALIDDATA; return AVERROR_PATCHWELCOME;
} }
m->access_unit_size = mh.access_unit_size; m->access_unit_size = mh.access_unit_size;
...@@ -410,10 +404,10 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp, ...@@ -410,10 +404,10 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
/* This should happen for TrueHD streams with >6 channels and MLP's noise /* This should happen for TrueHD streams with >6 channels and MLP's noise
* type. It is not yet known if this is allowed. */ * type. It is not yet known if this is allowed. */
if (s->max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) { if (s->max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) {
av_log(m->avctx, AV_LOG_ERROR, av_log_ask_for_sample(m->avctx,
"Number of channels %d is larger than the maximum supported " "Number of channels %d is larger than the maximum supported "
"by the decoder. %s\n", s->max_channel+2, sample_message); "by the decoder.\n", s->max_channel + 2);
return AVERROR_INVALIDDATA; return AVERROR_PATCHWELCOME;
} }
if (s->min_channel > s->max_channel) { if (s->min_channel > s->max_channel) {
...@@ -455,10 +449,10 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp, ...@@ -455,10 +449,10 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
for (ch = 0; ch <= s->max_matrix_channel; ch++) { for (ch = 0; ch <= s->max_matrix_channel; ch++) {
int ch_assign = get_bits(gbp, 6); int ch_assign = get_bits(gbp, 6);
if (ch_assign > s->max_matrix_channel) { if (ch_assign > s->max_matrix_channel) {
av_log(m->avctx, AV_LOG_ERROR, av_log_ask_for_sample(m->avctx,
"Assignment of matrix channel %d to invalid output channel %d. %s\n", "Assignment of matrix channel %d to invalid output channel %d.\n",
ch, ch_assign, sample_message); ch, ch_assign);
return AVERROR_INVALIDDATA; return AVERROR_PATCHWELCOME;
} }
s->ch_assign[ch_assign] = ch; s->ch_assign[ch_assign] = ch;
} }
...@@ -813,8 +807,8 @@ static int read_block_data(MLPDecodeContext *m, GetBitContext *gbp, ...@@ -813,8 +807,8 @@ static int read_block_data(MLPDecodeContext *m, GetBitContext *gbp,
if (s->data_check_present) { if (s->data_check_present) {
expected_stream_pos = get_bits_count(gbp); expected_stream_pos = get_bits_count(gbp);
expected_stream_pos += get_bits(gbp, 16); expected_stream_pos += get_bits(gbp, 16);
av_log(m->avctx, AV_LOG_WARNING, "This file contains some features " av_log_ask_for_sample(m->avctx, "This file contains some features "
"we have not tested yet. %s\n", sample_message); "we have not tested yet.\n");
} }
if (s->blockpos + s->blocksize > m->access_unit_size) { if (s->blockpos + s->blocksize > m->access_unit_size) {
......
...@@ -1395,20 +1395,6 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, ...@@ -1395,20 +1395,6 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
int i, stuffing_count, ret; int i, stuffing_count, ret;
int context_count = s->slice_context_count; int context_count = s->slice_context_count;
if (!pkt->data &&
(ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
return ret;
for (i = 0; i < context_count; i++) {
int start_y = s->thread_context[i]->start_mb_y;
int end_y = s->thread_context[i]-> end_mb_y;
int h = s->mb_height;
uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
init_put_bits(&s->thread_context[i]->pb, start, end - start);
}
s->picture_in_gop_number++; s->picture_in_gop_number++;
if (load_input_picture(s, pic_arg) < 0) if (load_input_picture(s, pic_arg) < 0)
...@@ -1420,6 +1406,20 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, ...@@ -1420,6 +1406,20 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
/* output? */ /* output? */
if (s->new_picture.f.data[0]) { if (s->new_picture.f.data[0]) {
if (!pkt->data &&
(ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
return ret;
for (i = 0; i < context_count; i++) {
int start_y = s->thread_context[i]->start_mb_y;
int end_y = s->thread_context[i]-> end_mb_y;
int h = s->mb_height;
uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
init_put_bits(&s->thread_context[i]->pb, start, end - start);
}
s->pict_type = s->new_picture.f.pict_type; s->pict_type = s->new_picture.f.pict_type;
//emms_c(); //emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality, //printf("qs:%f %f %d\n", s->new_picture.quality,
......
...@@ -732,5 +732,5 @@ AVCodec ff_png_decoder = { ...@@ -732,5 +732,5 @@ AVCodec ff_png_decoder = {
.close = png_dec_end, .close = png_dec_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
.long_name = NULL_IF_CONFIG_SMALL("PNG image"), .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
}; };
...@@ -445,5 +445,5 @@ AVCodec ff_png_encoder = { ...@@ -445,5 +445,5 @@ AVCodec ff_png_encoder = {
PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY8, PIX_FMT_GRAY8A,
PIX_FMT_GRAY16BE, PIX_FMT_GRAY16BE,
PIX_FMT_MONOBLACK, PIX_FMT_NONE}, PIX_FMT_MONOBLACK, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("PNG image"), .long_name= NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
}; };
...@@ -53,7 +53,7 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block) ...@@ -53,7 +53,7 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
* Real Video 3.0/4.0 inverse transform + sample reconstruction * Real Video 3.0/4.0 inverse transform + sample reconstruction
* Code is almost the same as in SVQ3, only scaling is different. * Code is almost the same as in SVQ3, only scaling is different.
*/ */
static void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block){ static void rv34_idct_add_c(uint8_t *dst, ptrdiff_t stride, DCTELEM *block){
int temp[16]; int temp[16];
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i; int i;
...@@ -101,7 +101,7 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){ ...@@ -101,7 +101,7 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){
} }
} }
static void rv34_idct_dc_add_c(uint8_t *dst, int stride, int dc) static void rv34_idct_dc_add_c(uint8_t *dst, ptrdiff_t stride, int dc)
{ {
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i, j; int i, j;
......
...@@ -32,24 +32,24 @@ ...@@ -32,24 +32,24 @@
typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/, typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/,
uint8_t *src1/*align width (8 or 16)*/, uint8_t *src1/*align width (8 or 16)*/,
uint8_t *src2/*align width (8 or 16)*/, uint8_t *src2/*align width (8 or 16)*/,
int w1, int w2, int stride); int w1, int w2, ptrdiff_t stride);
typedef void (*rv34_inv_transform_func)(DCTELEM *block); typedef void (*rv34_inv_transform_func)(DCTELEM *block);
typedef void (*rv34_idct_add_func)(uint8_t *dst, int stride, DCTELEM *block); typedef void (*rv34_idct_add_func)(uint8_t *dst, ptrdiff_t stride, DCTELEM *block);
typedef void (*rv34_idct_dc_add_func)(uint8_t *dst, int stride, typedef void (*rv34_idct_dc_add_func)(uint8_t *dst, ptrdiff_t stride,
int dc); int dc);
typedef void (*rv40_weak_loop_filter_func)(uint8_t *src, int stride, typedef void (*rv40_weak_loop_filter_func)(uint8_t *src, ptrdiff_t stride,
int filter_p1, int filter_q1, int filter_p1, int filter_q1,
int alpha, int beta, int alpha, int beta,
int lims, int lim_q1, int lim_p1); int lims, int lim_q1, int lim_p1);
typedef void (*rv40_strong_loop_filter_func)(uint8_t *src, int stride, typedef void (*rv40_strong_loop_filter_func)(uint8_t *src, ptrdiff_t stride,
int alpha, int lims, int alpha, int lims,
int dmode, int chroma); int dmode, int chroma);
typedef int (*rv40_loop_filter_strength_func)(uint8_t *src, int stride, typedef int (*rv40_loop_filter_strength_func)(uint8_t *src, ptrdiff_t stride,
int beta, int beta2, int edge, int beta, int beta2, int edge,
int *p1, int *q1); int *p1, int *q1);
......
...@@ -278,7 +278,7 @@ RV40_CHROMA_MC(put_, op_put) ...@@ -278,7 +278,7 @@ RV40_CHROMA_MC(put_, op_put)
RV40_CHROMA_MC(avg_, op_avg) RV40_CHROMA_MC(avg_, op_avg)
#define RV40_WEIGHT_FUNC(size) \ #define RV40_WEIGHT_FUNC(size) \
static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, int stride)\ static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
{\ {\
int i, j;\ int i, j;\
\ \
...@@ -316,7 +316,7 @@ static const uint8_t rv40_dither_r[16] = { ...@@ -316,7 +316,7 @@ static const uint8_t rv40_dither_r[16] = {
*/ */
static av_always_inline void rv40_weak_loop_filter(uint8_t *src, static av_always_inline void rv40_weak_loop_filter(uint8_t *src,
const int step, const int step,
const int stride, const ptrdiff_t stride,
const int filter_p1, const int filter_p1,
const int filter_q1, const int filter_q1,
const int alpha, const int alpha,
...@@ -362,7 +362,7 @@ static av_always_inline void rv40_weak_loop_filter(uint8_t *src, ...@@ -362,7 +362,7 @@ static av_always_inline void rv40_weak_loop_filter(uint8_t *src,
} }
} }
static void rv40_h_weak_loop_filter(uint8_t *src, const int stride, static void rv40_h_weak_loop_filter(uint8_t *src, const ptrdiff_t stride,
const int filter_p1, const int filter_q1, const int filter_p1, const int filter_q1,
const int alpha, const int beta, const int alpha, const int beta,
const int lim_p0q0, const int lim_q1, const int lim_p0q0, const int lim_q1,
...@@ -372,7 +372,7 @@ static void rv40_h_weak_loop_filter(uint8_t *src, const int stride, ...@@ -372,7 +372,7 @@ static void rv40_h_weak_loop_filter(uint8_t *src, const int stride,
alpha, beta, lim_p0q0, lim_q1, lim_p1); alpha, beta, lim_p0q0, lim_q1, lim_p1);
} }
static void rv40_v_weak_loop_filter(uint8_t *src, const int stride, static void rv40_v_weak_loop_filter(uint8_t *src, const ptrdiff_t stride,
const int filter_p1, const int filter_q1, const int filter_p1, const int filter_q1,
const int alpha, const int beta, const int alpha, const int beta,
const int lim_p0q0, const int lim_q1, const int lim_p0q0, const int lim_q1,
...@@ -384,7 +384,7 @@ static void rv40_v_weak_loop_filter(uint8_t *src, const int stride, ...@@ -384,7 +384,7 @@ static void rv40_v_weak_loop_filter(uint8_t *src, const int stride,
static av_always_inline void rv40_strong_loop_filter(uint8_t *src, static av_always_inline void rv40_strong_loop_filter(uint8_t *src,
const int step, const int step,
const int stride, const ptrdiff_t stride,
const int alpha, const int alpha,
const int lims, const int lims,
const int dmode, const int dmode,
...@@ -440,14 +440,14 @@ static av_always_inline void rv40_strong_loop_filter(uint8_t *src, ...@@ -440,14 +440,14 @@ static av_always_inline void rv40_strong_loop_filter(uint8_t *src,
} }
} }
static void rv40_h_strong_loop_filter(uint8_t *src, const int stride, static void rv40_h_strong_loop_filter(uint8_t *src, const ptrdiff_t stride,
const int alpha, const int lims, const int alpha, const int lims,
const int dmode, const int chroma) const int dmode, const int chroma)
{ {
rv40_strong_loop_filter(src, stride, 1, alpha, lims, dmode, chroma); rv40_strong_loop_filter(src, stride, 1, alpha, lims, dmode, chroma);
} }
static void rv40_v_strong_loop_filter(uint8_t *src, const int stride, static void rv40_v_strong_loop_filter(uint8_t *src, const ptrdiff_t stride,
const int alpha, const int lims, const int alpha, const int lims,
const int dmode, const int chroma) const int dmode, const int chroma)
{ {
...@@ -455,7 +455,7 @@ static void rv40_v_strong_loop_filter(uint8_t *src, const int stride, ...@@ -455,7 +455,7 @@ static void rv40_v_strong_loop_filter(uint8_t *src, const int stride,
} }
static av_always_inline int rv40_loop_filter_strength(uint8_t *src, static av_always_inline int rv40_loop_filter_strength(uint8_t *src,
int step, int stride, int step, ptrdiff_t stride,
int beta, int beta2, int beta, int beta2,
int edge, int edge,
int *p1, int *q1) int *p1, int *q1)
...@@ -490,14 +490,14 @@ static av_always_inline int rv40_loop_filter_strength(uint8_t *src, ...@@ -490,14 +490,14 @@ static av_always_inline int rv40_loop_filter_strength(uint8_t *src,
return strong0 && strong1; return strong0 && strong1;
} }
static int rv40_h_loop_filter_strength(uint8_t *src, int stride, static int rv40_h_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
int beta, int beta2, int edge, int beta, int beta2, int edge,
int *p1, int *q1) int *p1, int *q1)
{ {
return rv40_loop_filter_strength(src, stride, 1, beta, beta2, edge, p1, q1); return rv40_loop_filter_strength(src, stride, 1, beta, beta2, edge, p1, q1);
} }
static int rv40_v_loop_filter_strength(uint8_t *src, int stride, static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
int beta, int beta2, int edge, int beta, int beta2, int edge,
int *p1, int *q1) int *p1, int *q1)
{ {
......
...@@ -77,7 +77,11 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data, ...@@ -77,7 +77,11 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "invalid image size\n"); av_log(avctx, AV_LOG_ERROR, "invalid image size\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (maptype & ~1) { if (maptype == RMT_RAW) {
av_log_ask_for_sample(avctx, "unsupported colormap type\n");
return AVERROR_PATCHWELCOME;
}
if (maptype > RMT_RAW) {
av_log(avctx, AV_LOG_ERROR, "invalid colormap type\n"); av_log(avctx, AV_LOG_ERROR, "invalid colormap type\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
......
...@@ -992,9 +992,11 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, ...@@ -992,9 +992,11 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret && *got_packet_ptr) { if (!ret && *got_packet_ptr) {
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) { if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
avpkt->pts = frame->pts; if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->duration = ff_samples_to_time_base(avctx, avpkt->pts = frame->pts;
frame->nb_samples); if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
} }
avpkt->dts = avpkt->pts; avpkt->dts = avpkt->pts;
} else { } else {
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
void ff_rv34_idct_dc_mmx2(DCTELEM *block); void ff_rv34_idct_dc_mmx2(DCTELEM *block);
void ff_rv34_idct_dc_noround_mmx2(DCTELEM *block); void ff_rv34_idct_dc_noround_mmx2(DCTELEM *block);
void ff_rv34_idct_dc_add_mmx(uint8_t *dst, int stride, int dc); void ff_rv34_idct_dc_add_mmx(uint8_t *dst, ptrdiff_t stride, int dc);
void ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc); void ff_rv34_idct_dc_add_sse4(uint8_t *dst, ptrdiff_t stride, int dc);
av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp) av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp)
{ {
......
...@@ -42,9 +42,9 @@ void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src, ...@@ -42,9 +42,9 @@ void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
#define DECLARE_WEIGHT(opt) \ #define DECLARE_WEIGHT(opt) \
void ff_rv40_weight_func_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \ void ff_rv40_weight_func_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
int w1, int w2, int stride); \ int w1, int w2, ptrdiff_t stride); \
void ff_rv40_weight_func_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \ void ff_rv40_weight_func_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
int w1, int w2, int stride); int w1, int w2, ptrdiff_t stride);
DECLARE_WEIGHT(mmx) DECLARE_WEIGHT(mmx)
DECLARE_WEIGHT(sse2) DECLARE_WEIGHT(sse2)
DECLARE_WEIGHT(ssse3) DECLARE_WEIGHT(ssse3)
......
/* /*
* This file is part of Libav. * This file is part of FFmpeg.
* *
* Libav is free software; you can redistribute it and/or * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public * modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either * License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version. * version 2.1 of the License, or (at your option) any later version.
* *
* Libav is distributed in the hope that it will be useful, * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details. * Lesser General Public License for more details.
* *
* You should have received a copy of the GNU Lesser General Public * You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"w", "w",
"h", "h",
"cw", "cw",
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"in_w", "iw", ///< width of the input video "in_w", "iw", ///< width of the input video
"in_h", "ih", ///< height of the input video "in_h", "ih", ///< height of the input video
"out_w", "ow", ///< width of the cropped video "out_w", "ow", ///< width of the cropped video
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#include FT_FREETYPE_H #include FT_FREETYPE_H
#include FT_GLYPH_H #include FT_GLYPH_H
static const char * const var_names[] = { static const char *const var_names[] = {
"main_w", "w", "W", ///< width of the input video "main_w", "w", "W", ///< width of the input video
"main_h", "h", "H", ///< height of the input video "main_h", "h", "H", ///< height of the input video
"tw", "text_w", ///< width of the rendered text "tw", "text_w", ///< width of the rendered text
...@@ -72,8 +72,8 @@ static const char * const var_names[] = { ...@@ -72,8 +72,8 @@ static const char * const var_names[] = {
NULL NULL
}; };
static const char *fun2_names[] = { static const char *const fun2_names[] = {
"rand", "rand"
}; };
static double drand(void *opaque, double min, double max) static double drand(void *opaque, double min, double max)
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"w", ///< width of the input video "w", ///< width of the input video
"h", ///< height of the input video "h", ///< height of the input video
"val", ///< input value for the pixel "val", ///< input value for the pixel
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#include "internal.h" #include "internal.h"
#include "drawutils.h" #include "drawutils.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"main_w", "W", ///< width of the main video "main_w", "W", ///< width of the main video
"main_h", "H", ///< height of the main video "main_h", "H", ///< height of the main video
"overlay_w", "w", ///< width of the overlay video "overlay_w", "w", ///< width of the overlay video
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "drawutils.h" #include "drawutils.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"in_w", "iw", "in_w", "iw",
"in_h", "ih", "in_h", "ih",
"out_w", "ow", "out_w", "ow",
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "libswscale/swscale.h" #include "libswscale/swscale.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"in_w", "iw", "in_w", "iw",
"in_h", "ih", "in_h", "ih",
"out_w", "ow", "out_w", "ow",
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "libavutil/fifo.h" #include "libavutil/fifo.h"
#include "avfilter.h" #include "avfilter.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"TB", ///< timebase "TB", ///< timebase
"pts", ///< original pts in the file of the frame "pts", ///< original pts in the file of the frame
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "avfilter.h" #include "avfilter.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"INTERLACED", ///< tell if the current frame is interlaced "INTERLACED", ///< tell if the current frame is interlaced
"N", ///< frame number (starting at zero) "N", ///< frame number (starting at zero)
"POS", ///< original position in the file of the frame "POS", ///< original position in the file of the frame
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
static const char * const var_names[] = { static const char *const var_names[] = {
"AVTB", /* default timebase 1/AV_TIME_BASE */ "AVTB", /* default timebase 1/AV_TIME_BASE */
"intb", /* input timebase */ "intb", /* input timebase */
NULL NULL
......
This diff is collapsed.
...@@ -138,6 +138,11 @@ static enum CodecID av_str2id(const IdStrMap *tags, const char *str) ...@@ -138,6 +138,11 @@ static enum CodecID av_str2id(const IdStrMap *tags, const char *str)
return CODEC_ID_NONE; return CODEC_ID_NONE;
} }
enum CodecID ff_guess_image2_codec(const char *filename)
{
return av_str2id(img_tags, filename);
}
/* return -1 if no image found */ /* return -1 if no image found */
static int find_image_range(int *pfirst_index, int *plast_index, static int find_image_range(int *pfirst_index, int *plast_index,
const char *path) const char *path)
...@@ -194,7 +199,7 @@ static int find_image_range(int *pfirst_index, int *plast_index, ...@@ -194,7 +199,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
static int read_probe(AVProbeData *p) static int read_probe(AVProbeData *p)
{ {
if (p->filename && av_str2id(img_tags, p->filename)) { if (p->filename && ff_guess_image2_codec(p->filename)) {
if (av_filename_number_test(p->filename)) if (av_filename_number_test(p->filename))
return AVPROBE_SCORE_MAX; return AVPROBE_SCORE_MAX;
else else
...@@ -203,11 +208,6 @@ static int read_probe(AVProbeData *p) ...@@ -203,11 +208,6 @@ static int read_probe(AVProbeData *p)
return 0; return 0;
} }
enum CodecID ff_guess_image2_codec(const char *filename)
{
return av_str2id(img_tags, filename);
}
static int read_header(AVFormatContext *s1) static int read_header(AVFormatContext *s1)
{ {
VideoData *s = s1->priv_data; VideoData *s = s1->priv_data;
...@@ -277,7 +277,7 @@ static int read_header(AVFormatContext *s1) ...@@ -277,7 +277,7 @@ static int read_header(AVFormatContext *s1)
const char *str= strrchr(s->path, '.'); const char *str= strrchr(s->path, '.');
s->split_planes = str && !av_strcasecmp(str + 1, "y"); s->split_planes = str && !av_strcasecmp(str + 1, "y");
st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = av_str2id(img_tags, s->path); st->codec->codec_id = ff_guess_image2_codec(s->path);
if (st->codec->codec_id == CODEC_ID_LJPEG) if (st->codec->codec_id == CODEC_ID_LJPEG)
st->codec->codec_id = CODEC_ID_MJPEG; st->codec->codec_id = CODEC_ID_MJPEG;
} }
...@@ -419,7 +419,7 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -419,7 +419,7 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
avio_close(pb[1]); avio_close(pb[1]);
avio_close(pb[2]); avio_close(pb[2]);
}else{ }else{
if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000){ if (ff_guess_image2_codec(s->filename) == CODEC_ID_JPEG2000) {
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
if(st->codec->extradata_size > 8 && if(st->codec->extradata_size > 8 &&
AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){ AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){
......
...@@ -542,7 +542,7 @@ static EbmlSyntax matroska_clusters[] = { ...@@ -542,7 +542,7 @@ static EbmlSyntax matroska_clusters[] = {
{ 0 } { 0 }
}; };
static const char *matroska_doctypes[] = { "matroska", "webm" }; static const char *const matroska_doctypes[] = { "matroska", "webm" };
static int matroska_resync(MatroskaDemuxContext *matroska, int64_t last_pos) static int matroska_resync(MatroskaDemuxContext *matroska, int64_t last_pos)
{ {
......
...@@ -2311,7 +2311,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom) ...@@ -2311,7 +2311,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
int64_t dts; int64_t dts;
int data_offset = 0; int data_offset = 0;
unsigned entries, first_sample_flags = frag->flags; unsigned entries, first_sample_flags = frag->flags;
int flags, distance, i; int flags, distance, i, found_keyframe = 0;
for (i = 0; i < c->fc->nb_streams; i++) { for (i = 0; i < c->fc->nb_streams; i++) {
if (c->fc->streams[i]->id == frag->track_id) { if (c->fc->streams[i]->id == frag->track_id) {
...@@ -2365,7 +2365,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom) ...@@ -2365,7 +2365,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
unsigned sample_size = frag->size; unsigned sample_size = frag->size;
int sample_flags = i ? frag->flags : first_sample_flags; int sample_flags = i ? frag->flags : first_sample_flags;
unsigned sample_duration = frag->duration; unsigned sample_duration = frag->duration;
int keyframe; int keyframe = 0;
if (flags & MOV_TRUN_SAMPLE_DURATION) sample_duration = avio_rb32(pb); if (flags & MOV_TRUN_SAMPLE_DURATION) sample_duration = avio_rb32(pb);
if (flags & MOV_TRUN_SAMPLE_SIZE) sample_size = avio_rb32(pb); if (flags & MOV_TRUN_SAMPLE_SIZE) sample_size = avio_rb32(pb);
...@@ -2374,8 +2374,13 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom) ...@@ -2374,8 +2374,13 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
sc->ctts_data[sc->ctts_count].duration = (flags & MOV_TRUN_SAMPLE_CTS) ? sc->ctts_data[sc->ctts_count].duration = (flags & MOV_TRUN_SAMPLE_CTS) ?
avio_rb32(pb) : 0; avio_rb32(pb) : 0;
sc->ctts_count++; sc->ctts_count++;
if ((keyframe = st->codec->codec_type == AVMEDIA_TYPE_AUDIO || if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
(flags & MOV_TRUN_FIRST_SAMPLE_FLAGS && !i && !(sample_flags & ~MOV_FRAG_SAMPLE_FLAG_DEGRADATION_PRIORITY_MASK)) || sample_flags & MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO)) keyframe = 1;
else if (!found_keyframe)
keyframe = found_keyframe =
!(sample_flags & (MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC |
MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES));
if (keyframe)
distance = 0; distance = 0;
av_add_index_entry(st, offset, dts, sample_size, distance, av_add_index_entry(st, offset, dts, sample_size, distance,
keyframe ? AVINDEX_KEYFRAME : 0); keyframe ? AVINDEX_KEYFRAME : 0);
......
...@@ -65,14 +65,31 @@ static const struct { ...@@ -65,14 +65,31 @@ static const struct {
{ "mono", 1, AV_CH_LAYOUT_MONO }, { "mono", 1, AV_CH_LAYOUT_MONO },
{ "stereo", 2, AV_CH_LAYOUT_STEREO }, { "stereo", 2, AV_CH_LAYOUT_STEREO },
{ "2.1", 3, AV_CH_LAYOUT_2POINT1 }, { "2.1", 3, AV_CH_LAYOUT_2POINT1 },
{ "3.0", 3, AV_CH_LAYOUT_SURROUND },
{ "3.0(back)", 3, AV_CH_LAYOUT_2_1 },
{ "4.0", 4, AV_CH_LAYOUT_4POINT0 }, { "4.0", 4, AV_CH_LAYOUT_4POINT0 },
{ "quad", 4, AV_CH_LAYOUT_QUAD }, { "quad", 4, AV_CH_LAYOUT_QUAD },
{ "quad(side)", 4, AV_CH_LAYOUT_2_2 },
{ "3.1", 4, AV_CH_LAYOUT_3POINT1 },
{ "5.0", 5, AV_CH_LAYOUT_5POINT0_BACK }, { "5.0", 5, AV_CH_LAYOUT_5POINT0_BACK },
{ "5.0(side)", 5, AV_CH_LAYOUT_5POINT0 }, { "5.0(side)", 5, AV_CH_LAYOUT_5POINT0 },
{ "4.1", 5, AV_CH_LAYOUT_4POINT1 },
{ "5.1", 6, AV_CH_LAYOUT_5POINT1_BACK }, { "5.1", 6, AV_CH_LAYOUT_5POINT1_BACK },
{ "5.1(side)", 6, AV_CH_LAYOUT_5POINT1 }, { "5.1(side)", 6, AV_CH_LAYOUT_5POINT1 },
// { "5.1+downmix", 8, AV_CH_LAYOUT_5POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX, },
{ "6.0", 6, AV_CH_LAYOUT_6POINT0 },
{ "6.0(front)", 6, AV_CH_LAYOUT_6POINT0_FRONT },
{ "hexagonal", 6, AV_CH_LAYOUT_HEXAGONAL },
{ "6.1", 7, AV_CH_LAYOUT_6POINT1 },
{ "6.1", 7, AV_CH_LAYOUT_6POINT1_BACK },
{ "6.1(front)", 7, AV_CH_LAYOUT_6POINT1_FRONT },
// { "6.1+downmix", 9, AV_CH_LAYOUT_6POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX, },
{ "7.0", 7, AV_CH_LAYOUT_7POINT0 },
{ "7.0(front)", 7, AV_CH_LAYOUT_7POINT0_FRONT },
{ "7.1", 8, AV_CH_LAYOUT_7POINT1 }, { "7.1", 8, AV_CH_LAYOUT_7POINT1 },
{ "7.1(wide)", 8, AV_CH_LAYOUT_7POINT1_WIDE }, { "7.1(wide)", 8, AV_CH_LAYOUT_7POINT1_WIDE },
// { "7.1+downmix", 10, AV_CH_LAYOUT_7POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX, },
{ "octagonal", 8, AV_CH_LAYOUT_OCTAGONAL },
{ "downmix", 2, AV_CH_LAYOUT_STEREO_DOWNMIX, }, { "downmix", 2, AV_CH_LAYOUT_STEREO_DOWNMIX, },
}; };
......
...@@ -622,13 +622,13 @@ void av_free_expr(AVExpr *e) ...@@ -622,13 +622,13 @@ void av_free_expr(AVExpr *e)
#undef printf #undef printf
#include <string.h> #include <string.h>
static double const_values[] = { static const double const_values[] = {
M_PI, M_PI,
M_E, M_E,
0 0
}; };
static const char *const_names[] = { static const char *const const_names[] = {
"PI", "PI",
"E", "E",
0 0
......
This diff is collapsed.
This diff is collapsed.
...@@ -283,6 +283,11 @@ do_video_encoding prores.mov "-vcodec prores" ...@@ -283,6 +283,11 @@ do_video_encoding prores.mov "-vcodec prores"
do_video_decoding "" "-pix_fmt yuv420p" do_video_decoding "" "-pix_fmt yuv420p"
fi fi
if [ -n "$do_prores_kostya" ] ; then
do_video_encoding prores_kostya.mov "-vcodec prores_kostya -profile hq"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_svq1" ] ; then if [ -n "$do_svq1" ] ; then
do_video_encoding svq1.mov "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p" do_video_encoding svq1.mov "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p"
do_video_decoding "" "-pix_fmt yuv420p" do_video_decoding "" "-pix_fmt yuv420p"
......
40e7637e04991dbe9a23fe109f95bfc8 *./tests/data/vsynth1/prores_kostya.mov
3858901 ./tests/data/vsynth1/prores_kostya.mov
0a4153637d0cc0a88a8bcbf04cfaf8c6 *./tests/data/prores_kostya.vsynth1.out.yuv
stddev: 3.17 PSNR: 38.09 MAXDIFF: 39 bytes: 7603200/ 7603200
ed8b8a94da049518af8f95c5da736e57 *./tests/data/vsynth2/prores_kostya.mov
3884586 ./tests/data/vsynth2/prores_kostya.mov
ca2f6c1162635dedfa468c90f1fdc0ef *./tests/data/prores_kostya.vsynth2.out.yuv
stddev: 0.92 PSNR: 48.77 MAXDIFF: 10 bytes: 7603200/ 7603200
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment