Commit 4095fa90 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  dnxhddec: optimise dnxhd_decode_dct_block()
  rtp: remove disabled code
  eac3enc: use different numbers of blocks per frame to allow higher bitrates
  dnxhd: add regression test for 10-bit
  dnxhd: 10-bit support
  dsputil: update per-arch init funcs for non-h264 high bit depth
  dsputil: template get_pixels() for different bit depths
  dsputil: create 16/32-bit dctcoef versions of some functions
  jfdctint: add 10-bit version
  mov: add clcp type track as Subtitle stream.
  mpeg4: add Mpeg4 Profiles names.
  mpeg4: decode Level Profile for MPEG4 Part 2.
  ffprobe: display bitstream level.
  imgconvert: remove unused glue and xglue macros

Conflicts:
	libavcodec/dsputil_template.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 657eac04 bb32fded
......@@ -202,6 +202,7 @@ static void show_stream(AVFormatContext *fmt_ctx, int stream_idx)
}
printf("pix_fmt=%s\n", dec_ctx->pix_fmt != PIX_FMT_NONE ?
av_pix_fmt_descriptors[dec_ctx->pix_fmt].name : "unknown");
printf("level=%d\n", dec_ctx->level);
break;
case AVMEDIA_TYPE_AUDIO:
......
This diff is collapsed.
......@@ -152,6 +152,8 @@ typedef struct AC3EncodeContext {
int bit_rate; ///< target bit rate, in bits-per-second
int sample_rate; ///< sampling frequency, in Hz
int num_blks_code; ///< number of blocks code (numblkscod)
int num_blocks; ///< number of blocks per frame
int frame_size_min; ///< minimum frame size in case rounding is necessary
int frame_size; ///< current frame size in bytes
int frame_size_code; ///< frame size code (frmsizecod)
......
......@@ -93,7 +93,7 @@ static void scale_coefficients(AC3EncodeContext *s)
{
int blk, ch;
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
for (ch = 1; ch <= s->channels; ch++) {
s->ac3dsp.ac3_rshift_int32(block->mdct_coef[ch], AC3_MAX_COEFS,
......
......@@ -103,7 +103,7 @@ static int normalize_samples(AC3EncodeContext *s)
*/
static void scale_coefficients(AC3EncodeContext *s)
{
int chan_size = AC3_MAX_COEFS * AC3_MAX_BLOCKS;
int chan_size = AC3_MAX_COEFS * s->num_blocks;
s->ac3dsp.float_to_fixed24(s->fixed_coef_buffer + chan_size,
s->mdct_coef_buffer + chan_size,
chan_size * s->channels);
......
......@@ -79,13 +79,13 @@ static void deinterleave_input_samples(AC3EncodeContext *s,
int sinc;
/* copy last 256 samples of previous frame to the start of the current frame */
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][AC3_FRAME_SIZE],
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][AC3_BLOCK_SIZE * s->num_blocks],
AC3_BLOCK_SIZE * sizeof(s->planar_samples[0][0]));
/* deinterleave */
sinc = s->channels;
sptr = samples + s->channel_map[ch];
for (i = AC3_BLOCK_SIZE; i < AC3_FRAME_SIZE+AC3_BLOCK_SIZE; i++) {
for (i = AC3_BLOCK_SIZE; i < AC3_BLOCK_SIZE * (s->num_blocks + 1); i++) {
s->planar_samples[ch][i] = *sptr;
sptr += sinc;
}
......@@ -103,7 +103,7 @@ static void apply_mdct(AC3EncodeContext *s)
int blk, ch;
for (ch = 0; ch < s->channels; ch++) {
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
const SampleType *input_samples = &s->planar_samples[ch][blk * AC3_BLOCK_SIZE];
......@@ -159,7 +159,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
cpl_start = FFMIN(256, cpl_start + num_cpl_coefs) - num_cpl_coefs;
/* calculate coupling channel from fbw channels */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
CoefType *cpl_coef = &block->mdct_coef[CPL_CH][cpl_start];
if (!block->cpl_in_use)
......@@ -188,7 +188,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
while (i < s->cpl_end_freq) {
int band_size = s->cpl_band_sizes[bnd];
for (ch = CPL_CH; ch <= s->fbw_channels; ch++) {
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
if (!block->cpl_in_use || (ch > CPL_CH && !block->channel_in_cpl[ch]))
continue;
......@@ -203,7 +203,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
}
/* determine which blocks to send new coupling coordinates for */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
AC3Block *block0 = blk ? &s->blocks[blk-1] : NULL;
int new_coords = 0;
......@@ -261,7 +261,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
coordinates in successive blocks */
for (bnd = 0; bnd < s->num_cpl_bands; bnd++) {
blk = 0;
while (blk < AC3_MAX_BLOCKS) {
while (blk < s->num_blocks) {
int blk1;
CoefSumType energy_cpl;
AC3Block *block = &s->blocks[blk];
......@@ -273,7 +273,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
energy_cpl = energy[blk][CPL_CH][bnd];
blk1 = blk+1;
while (!s->blocks[blk1].new_cpl_coords && blk1 < AC3_MAX_BLOCKS) {
while (!s->blocks[blk1].new_cpl_coords && blk1 < s->num_blocks) {
if (s->blocks[blk1].cpl_in_use)
energy_cpl += energy[blk1][CPL_CH][bnd];
blk1++;
......@@ -285,7 +285,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
continue;
energy_ch = energy[blk][ch][bnd];
blk1 = blk+1;
while (!s->blocks[blk1].new_cpl_coords && blk1 < AC3_MAX_BLOCKS) {
while (!s->blocks[blk1].new_cpl_coords && blk1 < s->num_blocks) {
if (s->blocks[blk1].cpl_in_use)
energy_ch += energy[blk1][ch][bnd];
blk1++;
......@@ -297,7 +297,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
}
/* calculate exponents/mantissas for coupling coordinates */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
if (!block->cpl_in_use || !block->new_cpl_coords)
continue;
......@@ -362,7 +362,7 @@ static void compute_rematrixing_strategy(AC3EncodeContext *s)
if (s->channel_mode != AC3_CHMODE_STEREO)
return;
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
block = &s->blocks[blk];
block->new_rematrixing_strategy = !blk;
......@@ -440,7 +440,7 @@ int AC3_NAME(encode_frame)(AVCodecContext *avctx, unsigned char *frame,
scale_coefficients(s);
clip_coefficients(&s->dsp, s->blocks[0].mdct_coef[1],
AC3_MAX_COEFS * AC3_MAX_BLOCKS * s->channels);
AC3_MAX_COEFS * s->num_blocks * s->channels);
s->cpl_on = s->cpl_enabled;
ff_ac3_compute_coupling_strategy(s);
......
......@@ -270,7 +270,7 @@ static void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_axp_asm;
......@@ -321,7 +321,8 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
c->put_pixels_clamped = put_pixels_clamped_mvi_asm;
c->add_pixels_clamped = add_pixels_clamped_mvi_asm;
c->get_pixels = get_pixels_mvi;
if (!high_bit_depth)
c->get_pixels = get_pixels_mvi;
c->diff_pixels = diff_pixels_mvi;
c->sad[0] = pix_abs16x16_mvi_asm;
c->sad[1] = pix_abs8x8_mvi;
......
......@@ -75,7 +75,7 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block)
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
ff_put_pixels_clamped = c->put_pixels_clamped;
ff_add_pixels_clamped = c->add_pixels_clamped;
......
......@@ -72,7 +72,7 @@ int ff_pix_sum_armv6(uint8_t *pix, int line_size);
void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 &&
(avctx->idct_algo == FF_IDCT_AUTO ||
......@@ -106,8 +106,9 @@ void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx)
c->avg_pixels_tab[1][0] = ff_avg_pixels8_armv6;
}
if (!high_bit_depth)
c->get_pixels = ff_get_pixels_armv6;
c->add_pixels_clamped = ff_add_pixels_clamped_armv6;
c->get_pixels = ff_get_pixels_armv6;
c->diff_pixels = ff_diff_pixels_armv6;
c->pix_abs[0][0] = ff_pix_abs16_armv6;
......
......@@ -175,7 +175,7 @@ void ff_apply_window_int16_neon(int16_t *dst, const int16_t *src,
void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) {
if (avctx->idct_algo == FF_IDCT_AUTO ||
......
......@@ -155,7 +155,7 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h)
void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = AV_CPU_FLAG_IWMMXT; /* multimedia extension flags */
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (avctx->dsp_mask) {
if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
......
......@@ -2278,6 +2278,23 @@ typedef struct AVCodecContext {
#define FF_PROFILE_VC1_COMPLEX 2
#define FF_PROFILE_VC1_ADVANCED 3
#define FF_PROFILE_MPEG4_SIMPLE 0
#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
#define FF_PROFILE_MPEG4_CORE 2
#define FF_PROFILE_MPEG4_MAIN 3
#define FF_PROFILE_MPEG4_N_BIT 4
#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
#define FF_PROFILE_MPEG4_HYBRID 8
#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
/**
* level
* - encoding: Set by user.
......
......@@ -197,14 +197,14 @@ static int bfin_pix_abs8_xy2 (void *c, uint8_t *blk1, uint8_t *blk2, int line_si
void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
c->get_pixels = ff_bfin_get_pixels;
c->diff_pixels = ff_bfin_diff_pixels;
c->put_pixels_clamped = ff_bfin_put_pixels_clamped;
c->add_pixels_clamped = ff_bfin_add_pixels_clamped;
if (!high_bit_depth)
c->get_pixels = ff_bfin_get_pixels;
c->clear_blocks = bfin_clear_blocks;
c->pix_sum = ff_bfin_pix_sum;
c->pix_norm1 = ff_bfin_pix_norm1;
......@@ -253,10 +253,10 @@ void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
/* c->put_no_rnd_pixels_tab[0][3] = ff_bfin_put_pixels16_xy2_nornd; */
}
if (avctx->dct_algo == FF_DCT_AUTO)
c->fdct = ff_bfin_fdct;
if (avctx->bits_per_raw_sample <= 8) {
if (avctx->dct_algo == FF_DCT_AUTO)
c->fdct = ff_bfin_fdct;
if (avctx->idct_algo == FF_IDCT_VP3) {
c->idct_permutation_type = FF_NO_IDCT_PERM;
c->idct = ff_bfin_vp3_idct;
......
......@@ -88,7 +88,7 @@ static const struct algo fdct_tab[] = {
{ "REF-DBL", ff_ref_fdct, NO_PERM },
{ "FAAN", ff_faandct, FAAN_SCALE },
{ "IJG-AAN-INT", fdct_ifast, SCALE_PERM },
{ "IJG-LLM-INT", ff_jpeg_fdct_islow, NO_PERM },
{ "IJG-LLM-INT", ff_jpeg_fdct_islow_8, NO_PERM },
#if HAVE_MMX
{ "MMX", ff_fdct_mmx, NO_PERM, AV_CPU_FLAG_MMX },
......
This diff is collapsed.
......@@ -46,6 +46,6 @@ typedef struct {
extern const CIDEntry ff_dnxhd_cid_table[];
int ff_dnxhd_get_cid_table(int cid);
int ff_dnxhd_find_cid(AVCodecContext *avctx);
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth);
#endif /* AVCODEC_DNXHDDATA_H */
This diff is collapsed.
This diff is collapsed.
......@@ -52,8 +52,12 @@ typedef struct DNXHDEncContext {
struct DNXHDEncContext *thread[MAX_THREADS];
// Because our samples are either 8 or 16 bits for 8-bit and 10-bit
// encoding respectively, these refer either to bytes or to two-byte words.
unsigned dct_y_offset;
unsigned dct_uv_offset;
unsigned block_width_l2;
int interlaced;
int cur_field;
......
......@@ -306,25 +306,6 @@ static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
return s;
}
static void get_pixels_c(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
{
int i;
/* read the pixels */
for(i=0;i<8;i++) {
block[0] = pixels[0];
block[1] = pixels[1];
block[2] = pixels[2];
block[3] = pixels[3];
block[4] = pixels[4];
block[5] = pixels[5];
block[6] = pixels[6];
block[7] = pixels[7];
pixels += line_size;
block += 8;
}
}
static void diff_pixels_c(DCTELEM *restrict block, const uint8_t *s1,
const uint8_t *s2, int stride){
int i;
......@@ -2836,17 +2817,22 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
ff_check_alignment();
#if CONFIG_ENCODERS
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = fdct_ifast;
c->fdct248 = fdct_ifast248;
}
else if(avctx->dct_algo==FF_DCT_FAAN) {
c->fdct = ff_faandct;
c->fdct248 = ff_faandct248;
}
else {
c->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
c->fdct248 = ff_fdct248_islow;
if (avctx->bits_per_raw_sample == 10) {
c->fdct = ff_jpeg_fdct_islow_10;
c->fdct248 = ff_fdct248_islow_10;
} else {
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = fdct_ifast;
c->fdct248 = fdct_ifast248;
}
else if(avctx->dct_algo==FF_DCT_FAAN) {
c->fdct = ff_faandct;
c->fdct248 = ff_faandct248;
}
else {
c->fdct = ff_jpeg_fdct_islow_8; //slow/accurate/default
c->fdct248 = ff_fdct248_islow_8;
}
}
#endif //CONFIG_ENCODERS
......@@ -2910,7 +2896,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
}
}
c->get_pixels = get_pixels_c;
c->diff_pixels = diff_pixels_c;
c->put_pixels_clamped = ff_put_pixels_clamped_c;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_c;
......@@ -3138,13 +3123,14 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->PFX ## _pixels_tab[IDX][15] = FUNCC(PFX ## NUM ## _mc33, depth)
#define BIT_DEPTH_FUNCS(depth)\
#define BIT_DEPTH_FUNCS(depth, dct)\
c->get_pixels = FUNCC(get_pixels ## dct , depth);\
c->draw_edges = FUNCC(draw_edges , depth);\
c->emulated_edge_mc = FUNC (ff_emulated_edge_mc , depth);\
c->clear_block = FUNCC(clear_block , depth);\
c->clear_blocks = FUNCC(clear_blocks , depth);\
c->add_pixels8 = FUNCC(add_pixels8 , depth);\
c->add_pixels4 = FUNCC(add_pixels4 , depth);\
c->clear_block = FUNCC(clear_block ## dct , depth);\
c->clear_blocks = FUNCC(clear_blocks ## dct , depth);\
c->add_pixels8 = FUNCC(add_pixels8 ## dct , depth);\
c->add_pixels4 = FUNCC(add_pixels4 ## dct , depth);\
c->put_no_rnd_pixels_l2[0] = FUNCC(put_no_rnd_pixels16_l2, depth);\
c->put_no_rnd_pixels_l2[1] = FUNCC(put_no_rnd_pixels8_l2 , depth);\
\
......@@ -3178,15 +3164,23 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
switch (avctx->bits_per_raw_sample) {
case 9:
BIT_DEPTH_FUNCS(9);
if (c->dct_bits == 32) {
BIT_DEPTH_FUNCS(9, _32);
} else {
BIT_DEPTH_FUNCS(9, _16);
}
break;
case 10:
BIT_DEPTH_FUNCS(10);
if (c->dct_bits == 32) {
BIT_DEPTH_FUNCS(10, _32);
} else {
BIT_DEPTH_FUNCS(10, _16);
}
break;
default:
av_log(avctx, AV_LOG_DEBUG, "Unsupported bit depth: %d\n", avctx->bits_per_raw_sample);
case 8:
BIT_DEPTH_FUNCS(8);
BIT_DEPTH_FUNCS(8, _16);
break;
}
......
......@@ -40,8 +40,10 @@ typedef short DCTELEM;
void fdct_ifast (DCTELEM *data);
void fdct_ifast248 (DCTELEM *data);
void ff_jpeg_fdct_islow (DCTELEM *data);
void ff_fdct248_islow (DCTELEM *data);
void ff_jpeg_fdct_islow_8(DCTELEM *data);
void ff_jpeg_fdct_islow_10(DCTELEM *data);
void ff_fdct248_islow_8(DCTELEM *data);
void ff_fdct248_islow_10(DCTELEM *data);
void j_rev_dct (DCTELEM *data);
void j_rev_dct4 (DCTELEM *data);
......@@ -217,6 +219,11 @@ void ff_put_signed_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int lin
* DSPContext.
*/
typedef struct DSPContext {
/**
* Size of DCT coefficients.
*/
int dct_bits;
/* pixel ops : interface with DCT */
void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size);
void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride);
......
......@@ -192,43 +192,89 @@ void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, i
}
}
static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
int i;
pixel *restrict pixels = (pixel *restrict)p_pixels;
dctcoef *block = (dctcoef*)p_block;
line_size >>= sizeof(pixel)-1;
for(i=0;i<8;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels[4] += block[4];
pixels[5] += block[5];
pixels[6] += block[6];
pixels[7] += block[7];
pixels += line_size;
block += 8;
}
#define DCTELEM_FUNCS(dctcoef, suffix) \
static void FUNCC(get_pixels ## suffix)(DCTELEM *restrict _block, \
const uint8_t *_pixels, \
int line_size) \
{ \
const pixel *pixels = (const pixel *) _pixels; \
dctcoef *restrict block = (dctcoef *) _block; \
int i; \
\
/* read the pixels */ \
for(i=0;i<8;i++) { \
block[0] = pixels[0]; \
block[1] = pixels[1]; \
block[2] = pixels[2]; \
block[3] = pixels[3]; \
block[4] = pixels[4]; \
block[5] = pixels[5]; \
block[6] = pixels[6]; \
block[7] = pixels[7]; \
pixels += line_size / sizeof(pixel); \
block += 8; \
} \
} \
\
static void FUNCC(add_pixels8 ## suffix)(uint8_t *restrict _pixels, \
DCTELEM *_block, \
int line_size) \
{ \
int i; \
pixel *restrict pixels = (pixel *restrict)_pixels; \
dctcoef *block = (dctcoef*)_block; \
line_size /= sizeof(pixel); \
\
for(i=0;i<8;i++) { \
pixels[0] += block[0]; \
pixels[1] += block[1]; \
pixels[2] += block[2]; \
pixels[3] += block[3]; \
pixels[4] += block[4]; \
pixels[5] += block[5]; \
pixels[6] += block[6]; \
pixels[7] += block[7]; \
pixels += line_size; \
block += 8; \
} \
} \
\
static void FUNCC(add_pixels4 ## suffix)(uint8_t *restrict _pixels, \
DCTELEM *_block, \
int line_size) \
{ \
int i; \
pixel *restrict pixels = (pixel *restrict)_pixels; \
dctcoef *block = (dctcoef*)_block; \
line_size /= sizeof(pixel); \
\
for(i=0;i<4;i++) { \
pixels[0] += block[0]; \
pixels[1] += block[1]; \
pixels[2] += block[2]; \
pixels[3] += block[3]; \
pixels += line_size; \
block += 4; \
} \
} \
\
static void FUNCC(clear_block ## suffix)(DCTELEM *block) \
{ \
memset(block, 0, sizeof(dctcoef)*64); \
} \
\
/** \
* memset(blocks, 0, sizeof(DCTELEM)*6*64) \
*/ \
static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \
{ \
memset(blocks, 0, sizeof(dctcoef)*6*64); \
}
static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
int i;
pixel *restrict pixels = (pixel *restrict)p_pixels;
dctcoef *block = (dctcoef*)p_block;
line_size >>= sizeof(pixel)-1;
for(i=0;i<4;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels += line_size;
block += 4;
}
}
DCTELEM_FUNCS(DCTELEM, _16)
#if BIT_DEPTH > 8
DCTELEM_FUNCS(dctcoef, _32)
#endif
#define PIXOP2(OPNAME, OP) \
static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
......@@ -1232,16 +1278,3 @@ void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(avg_pixels16)(dst, src, stride, 16);
}
static void FUNCC(clear_block)(DCTELEM *block)
{
memset(block, 0, sizeof(dctcoef)*64);
}
/**
* memset(blocks, 0, sizeof(DCTELEM)*6*64)
*/
static void FUNCC(clear_blocks)(DCTELEM *blocks)
{
memset(blocks, 0, sizeof(dctcoef)*6*64);
}
......@@ -63,6 +63,11 @@ void ff_eac3_get_frame_exp_strategy(AC3EncodeContext *s)
{
int ch;
if (s->num_blocks < 6) {
s->use_frame_exp_strategy = 0;
return;
}
s->use_frame_exp_strategy = 1;
for (ch = !s->cpl_on; ch <= s->fbw_channels; ch++) {
int expstr = eac3_frame_expstr_index_tab[s->exp_strategy[ch][0]-1]
......@@ -89,7 +94,7 @@ void ff_eac3_set_cpl_states(AC3EncodeContext *s)
/* set first cpl coords */
for (ch = 1; ch <= s->fbw_channels; ch++)
first_cpl_coords[ch] = 1;
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
for (ch = 1; ch <= s->fbw_channels; ch++) {
if (block->channel_in_cpl[ch]) {
......@@ -104,7 +109,7 @@ void ff_eac3_set_cpl_states(AC3EncodeContext *s)
}
/* set first cpl leak */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
if (block->cpl_in_use) {
block->new_cpl_leak = 2;
......@@ -130,7 +135,7 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s)
put_bits(&s->pb, 2, s->bit_alloc.sr_code); /* sample rate code */
} else {
put_bits(&s->pb, 2, s->bit_alloc.sr_code); /* sample rate code */
put_bits(&s->pb, 2, 0x3); /* number of blocks = 6 */
put_bits(&s->pb, 2, s->num_blks_code); /* number of blocks */
}
put_bits(&s->pb, 3, s->channel_mode); /* audio coding mode */
put_bits(&s->pb, 1, s->lfe_on); /* LFE channel indicator */
......@@ -141,11 +146,15 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s)
/* TODO: mixing metadata */
put_bits(&s->pb, 1, 0); /* no info metadata */
/* TODO: info metadata */
if (s->num_blocks != 6)
put_bits(&s->pb, 1, !(s->avctx->frame_number % 6)); /* converter sync flag */
put_bits(&s->pb, 1, 0); /* no additional bit stream info */
/* frame header */
if (s->num_blocks == 6) {
put_bits(&s->pb, 1, !s->use_frame_exp_strategy);/* exponent strategy syntax */
put_bits(&s->pb, 1, 0); /* aht enabled = no */
}
put_bits(&s->pb, 2, 0); /* snr offset strategy = 1 */
put_bits(&s->pb, 1, 0); /* transient pre-noise processing enabled = no */
put_bits(&s->pb, 1, 0); /* block switch syntax enabled = no */
......@@ -158,7 +167,7 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s)
/* coupling strategy use flags */
if (s->channel_mode > AC3_CHMODE_MONO) {
put_bits(&s->pb, 1, s->blocks[0].cpl_in_use);
for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) {
for (blk = 1; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk];
put_bits(&s->pb, 1, block->new_cpl_strategy);
if (block->new_cpl_strategy)
......@@ -170,26 +179,31 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s)
for (ch = !s->cpl_on; ch <= s->fbw_channels; ch++)
put_bits(&s->pb, 5, s->frame_exp_strategy[ch]);
} else {
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++)
for (blk = 0; blk < s->num_blocks; blk++)
for (ch = !s->blocks[blk].cpl_in_use; ch <= s->fbw_channels; ch++)
put_bits(&s->pb, 2, s->exp_strategy[ch][blk]);
}
if (s->lfe_on) {
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++)
for (blk = 0; blk < s->num_blocks; blk++)
put_bits(&s->pb, 1, s->exp_strategy[s->lfe_channel][blk]);
}
/* E-AC-3 to AC-3 converter exponent strategy (unfortunately not optional...) */
/* E-AC-3 to AC-3 converter exponent strategy (not optional when num blocks == 6) */
if (s->num_blocks != 6) {
put_bits(&s->pb, 1, 0);
} else {
for (ch = 1; ch <= s->fbw_channels; ch++) {
if (s->use_frame_exp_strategy)
put_bits(&s->pb, 5, s->frame_exp_strategy[ch]);
else
put_bits(&s->pb, 5, 0);
}
}
/* snr offsets */
put_bits(&s->pb, 6, s->coarse_snr_offset);
put_bits(&s->pb, 4, s->fine_snr_offset[1]);
/* block start info */
put_bits(&s->pb, 1, 0);
if (s->num_blocks > 1)
put_bits(&s->pb, 1, 0);
}
......
......@@ -3707,6 +3707,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma);
s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
dsputil_init(&s->dsp, s->avctx);
} else {
av_log(avctx, AV_LOG_DEBUG, "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
......
......@@ -42,9 +42,6 @@
#include "x86/dsputil_mmx.h"
#endif
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
#define FF_COLOR_RGB 0 /**< RGB color space */
#define FF_COLOR_GRAY 1 /**< gray color space */
#define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
......
This diff is collapsed.
This diff is collapsed.
......@@ -421,13 +421,14 @@ static void ff_fdct_mlib(DCTELEM *data)
void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
c->get_pixels = get_pixels_mlib;
c->diff_pixels = diff_pixels_mlib;
c->add_pixels_clamped = add_pixels_clamped_mlib;
if (!high_bit_depth) {
c->get_pixels = get_pixels_mlib;
c->put_pixels_tab[0][0] = put_pixels16_mlib;
c->put_pixels_tab[0][1] = put_pixels16_x2_mlib;
c->put_pixels_tab[0][2] = put_pixels16_y2_mlib;
......
......@@ -1527,6 +1527,22 @@ static int mpeg4_decode_gop_header(MpegEncContext * s, GetBitContext *gb){
return 0;
}
static int mpeg4_decode_profile_level(MpegEncContext * s, GetBitContext *gb){
int profile_and_level_indication;
profile_and_level_indication = get_bits(gb, 8);
s->avctx->profile = (profile_and_level_indication & 0xf0) >> 4;
s->avctx->level = (profile_and_level_indication & 0x0f);
// for Simple profile, level 0
if (s->avctx->profile == 0 && s->avctx->level == 8) {
s->avctx->level = 0;
}
return 0;
}
static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){
int width, height, vo_ver_id;
......@@ -2181,6 +2197,9 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb)
else if(startcode == GOP_STARTCODE){
mpeg4_decode_gop_header(s, gb);
}
else if(startcode == VOS_STARTCODE){
mpeg4_decode_profile_level(s, gb);
}
else if(startcode == VOP_STARTCODE){
break;
}
......@@ -2241,6 +2260,25 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
static const AVProfile mpeg4_video_profiles[] = {
{ FF_PROFILE_MPEG4_SIMPLE, "Simple Profile" },
{ FF_PROFILE_MPEG4_SIMPLE_SCALABLE, "Simple Scalable Profile" },
{ FF_PROFILE_MPEG4_CORE, "Core Profile" },
{ FF_PROFILE_MPEG4_MAIN, "Main Profile" },
{ FF_PROFILE_MPEG4_N_BIT, "N-bit Profile" },
{ FF_PROFILE_MPEG4_SCALABLE_TEXTURE, "Scalable Texture Profile" },
{ FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION, "Simple Face Animation Profile" },
{ FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE, "Basic Animated Texture Profile" },
{ FF_PROFILE_MPEG4_HYBRID, "Hybrid Profile" },
{ FF_PROFILE_MPEG4_ADVANCED_REAL_TIME, "Advanced Real Time Simple Profile" },
{ FF_PROFILE_MPEG4_CORE_SCALABLE, "Code Scalable Profile" },
{ FF_PROFILE_MPEG4_ADVANCED_CODING, "Advanced Coding Profile" },
{ FF_PROFILE_MPEG4_ADVANCED_CORE, "Advanced Core Profile" },
{ FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE, "Advanced Scalable Texture Profile" },
{ FF_PROFILE_MPEG4_SIMPLE_STUDIO, "Simple Studio Profile" },
{ FF_PROFILE_MPEG4_ADVANCED_SIMPLE, "Advanced Simple Profile" },
};
AVCodec ff_mpeg4_decoder = {
"mpeg4",
AVMEDIA_TYPE_VIDEO,
......@@ -2255,6 +2293,7 @@ AVCodec ff_mpeg4_decoder = {
.max_lowres= 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
.pix_fmts= ff_hwaccel_pixfmt_list_420,
.profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
.update_thread_context= ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context)
};
......
......@@ -69,7 +69,8 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6
for(qscale=qmin; qscale<=qmax; qscale++){
int i;
if (dsp->fdct == ff_jpeg_fdct_islow
if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
dsp->fdct == ff_jpeg_fdct_islow_10
#ifdef FAAN_POSTSCALE
|| dsp->fdct == ff_faandct
#endif
......
......@@ -1373,7 +1373,7 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int l
void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
c->pix_abs[0][1] = sad16_x2_altivec;
c->pix_abs[0][2] = sad16_y2_altivec;
......@@ -1387,11 +1387,10 @@ void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
c->sse[0]= sse16_altivec;
c->pix_sum = pix_sum_altivec;
c->diff_pixels = diff_pixels_altivec;
c->get_pixels = get_pixels_altivec;
if (!high_bit_depth)
c->clear_block = clear_block_altivec;
c->add_bytes= add_bytes_altivec;
if (!high_bit_depth) {
c->get_pixels = get_pixels_altivec;
c->clear_block = clear_block_altivec;
c->put_pixels_tab[0][0] = put_pixels16_altivec;
/* the two functions do the same thing, so use the same code */
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
......
......@@ -145,7 +145,7 @@ static void prefetch_ppc(void *mem, int stride, int h)
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
// Common optimizations whether AltiVec is available or not
c->prefetch = prefetch_ppc;
......@@ -172,8 +172,9 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
c->gmc1 = gmc1_altivec;
#if CONFIG_ENCODERS
if (avctx->dct_algo == FF_DCT_AUTO ||
avctx->dct_algo == FF_DCT_ALTIVEC) {
if (avctx->bits_per_raw_sample <= 8 &&
(avctx->dct_algo == FF_DCT_AUTO ||
avctx->dct_algo == FF_DCT_ALTIVEC)) {
c->fdct = fdct_altivec;
}
#endif //CONFIG_ENCODERS
......
......@@ -967,7 +967,7 @@ H264_WEIGHT( 8, 8)
H264_WEIGHT( 8, 4)
void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
if (!high_bit_depth) {
......
......@@ -142,7 +142,7 @@ static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_siz
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
{
const int idct_algo= avctx->idct_algo;
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!high_bit_depth) {
c->clear_blocks = clear_blocks_mmi;
......@@ -152,9 +152,9 @@ void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
c->put_pixels_tab[0][0] = put_pixels16_mmi;
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmi;
}
c->get_pixels = get_pixels_mmi;
}
if (avctx->bits_per_raw_sample <= 8 &&
(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_PS2)) {
......
......@@ -333,7 +333,7 @@ DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK)
void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_rnd_pixels16_o;
......
......@@ -92,7 +92,7 @@ static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx)
{
const int idct_algo= avctx->idct_algo;
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
dsputil_init_align(c,avctx);
if (!high_bit_depth)
......
......@@ -3953,7 +3953,7 @@ void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx)
{
/* VIS-specific optimizations */
int accel = vis_level ();
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (accel & ACCEL_SPARC_VIS) {
if (avctx->bits_per_raw_sample <= 8 &&
......
......@@ -53,6 +53,7 @@ static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int l
void ff_dnxhd_init_mmx(DNXHDEncContext *ctx)
{
if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) {
ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2;
if (ctx->cid_table->bit_depth == 8)
ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2;
}
}
......@@ -2341,7 +2341,7 @@ void ff_vector_clip_int32_sse41 (int32_t *dst, const int32_t *src, int32_t min
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = av_get_cpu_flags();
const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
const int bit_depth = avctx->bits_per_raw_sample;
if (avctx->dsp_mask) {
......
......@@ -1098,10 +1098,12 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si
void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = av_get_cpu_flags();
int bit_depth = avctx->bits_per_raw_sample;
if (mm_flags & AV_CPU_FLAG_MMX) {
const int dct_algo = avctx->dct_algo;
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
if (avctx->bits_per_raw_sample <= 8 &&
(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
if(mm_flags & AV_CPU_FLAG_SSE2){
c->fdct = ff_fdct_sse2;
}else if(mm_flags & AV_CPU_FLAG_MMX2){
......@@ -1111,7 +1113,8 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
}
c->get_pixels = get_pixels_mmx;
if (bit_depth <= 8)
c->get_pixels = get_pixels_mmx;
c->diff_pixels = diff_pixels_mmx;
c->pix_sum = pix_sum16_mmx;
......@@ -1158,7 +1161,8 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
if(mm_flags & AV_CPU_FLAG_SSE2){
c->get_pixels = get_pixels_sse2;
if (bit_depth <= 8)
c->get_pixels = get_pixels_sse2;
c->sum_abs_dctelem= sum_abs_dctelem_sse2;
#if HAVE_YASM && HAVE_ALIGNED_STACK
c->hadamard8_diff[0]= ff_hadamard8_diff16_sse2;
......
......@@ -456,7 +456,7 @@ static int mov_read_hdlr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
else if(type == MKTAG('m','1','a',' '))
st->codec->codec_id = CODEC_ID_MP2;
else if(type == MKTAG('s','u','b','p'))
else if((type == MKTAG('s','u','b','p')) || (type == MKTAG('c','l','c','p')))
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
avio_rb32(pb); /* component manufacture */
......
......@@ -218,22 +218,6 @@ static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq)
return 1;
}
#if 0
/**
* This function is currently unused; without a valid local ntp time, I don't see how we could calculate the
* difference between the arrival and sent timestamp. As a result, the jitter and transit statistics values
* never change. I left this in in case someone else can see a way. (rdm)
*/
static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp, uint32_t arrival_timestamp)
{
uint32_t transit= arrival_timestamp - sent_timestamp;
int d;
s->transit= transit;
d= FFABS(transit - s->transit);
s->jitter += d - ((s->jitter + 8)>>4);
}
#endif
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
{
AVIOContext *pb;
......
......@@ -225,20 +225,6 @@ static int rtp_read(URLContext *h, uint8_t *buf, int size)
int len, n;
struct pollfd p[2] = {{s->rtp_fd, POLLIN, 0}, {s->rtcp_fd, POLLIN, 0}};
#if 0
for(;;) {
from_len = sizeof(from);
len = recvfrom (s->rtp_fd, buf, size, 0,
(struct sockaddr *)&from, &from_len);
if (len < 0) {
if (ff_neterrno() == AVERROR(EAGAIN) ||
ff_neterrno() == AVERROR(EINTR))
continue;
return AVERROR(EIO);
}
break;
}
#else
for(;;) {
if (url_interrupt_cb())
return AVERROR_EXIT;
......@@ -277,7 +263,6 @@ static int rtp_read(URLContext *h, uint8_t *buf, int size)
return AVERROR(EIO);
}
}
#endif
return len;
}
......@@ -296,14 +281,6 @@ static int rtp_write(URLContext *h, const uint8_t *buf, int size)
}
ret = ffurl_write(hd, buf, size);
#if 0
{
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 10 * 1000000;
nanosleep(&ts, NULL);
}
#endif
return ret;
}
......
......@@ -235,6 +235,11 @@ do_video_encoding dnxhd-720p-rd.dnxhd "-threads 4 -mbd rd -s hd720 -b 90M -pix_f
do_video_decoding "" "-s cif -pix_fmt yuv420p"
fi
if [ -n "$do_dnxhd_720p_10bit" ] ; then
do_video_encoding dnxhd-720p-10bit.dnxhd "-s hd720 -b 90M -pix_fmt yuv422p10 -vframes 5 -an"
do_video_decoding "" "-s cif -pix_fmt yuv420p"
fi
if [ -n "$do_svq1" ] ; then
do_video_encoding svq1.mov "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p"
do_video_decoding "" "-pix_fmt yuv420p"
......
cb29b6ae4e1562d95f9311991fef98df *./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd
2293760 ./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd
2f45bb1af7da5dd3dca870ac87237b7d *./tests/data/dnxhd_720p_10bit.vsynth1.out.yuv
stddev: 6.27 PSNR: 32.18 MAXDIFF: 64 bytes: 760320/ 7603200
8648511257afb816b5b911706ca391db *./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd
2293760 ./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd
391b6f5aa7c7b488b479cb43d420b860 *./tests/data/dnxhd_720p_10bit.vsynth2.out.yuv
stddev: 1.35 PSNR: 45.46 MAXDIFF: 23 bytes: 760320/ 7603200
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment