Commit 01590329 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master: (25 commits)
  rv40dsp x86: MMX/MMX2/3DNow/SSE2/SSSE3 implementations of MC
  ape: Use unsigned integer maths
  arm: dsputil: fix overreads in put/avg_pixels functions
  h264: K&R formatting cosmetics for header files (part II/II)
  h264: K&R formatting cosmetics for header files (part I/II)
  rtmp: Implement check bandwidth notification.
  rtmp: Support 'rtmp_swfurl', an option which specifies the URL of the SWF player.
  rtmp: Support 'rtmp_flashver', an option which overrides the version of the Flash plugin.
  rtmp: Support 'rtmp_tcurl', an option which overrides the URL of the target stream.
  cmdutils: Add fallback case to switch in check_stream_specifier().
  sctp: be consistent with socket option level
  configure: Add _XOPEN_SOURCE=600 to Solaris preprocessor flags.
  vcr1enc: drop pointless empty encode_init() wrapper function
  vcr1: drop pointless write-only AVCodecContext member from VCR1Context
  vcr1: group encoder code together to save #ifdefs
  vcr1: cosmetics: K&R prettyprinting, typos, parentheses, dead code, comments
  mov: make one comment slightly more specific
  lavr: replace the SSE version of ff_conv_fltp_to_flt_6ch() with SSE4 and AVX
  lavfi: move audio-related functions to a separate file.
  lavfi: remove some audio-related function from public API.
  ...

Conflicts:
	cmdutils.c
	libavcodec/h264.h
	libavcodec/h264_mvpred.h
	libavcodec/vcr1.c
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/defaults.c
	libavfilter/internal.h
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 2a793ff2 110d0cdc
......@@ -38,6 +38,7 @@
#if CONFIG_POSTPROC
#include "libpostproc/postprocess.h"
#endif
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
......@@ -1075,7 +1076,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
case 'd': type = AVMEDIA_TYPE_DATA; break;
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
default: abort(); // never reached, silence warning
default: av_assert0(0);
}
if (type != st->codec->codec_type)
return 0;
......
......@@ -2662,7 +2662,7 @@ case $target_os in
SHFLAGS='-shared -Wl,-h,$$(@F)'
enabled x86 && SHFLAGS="-mimpure-text $SHFLAGS"
network_extralibs="-lsocket -lnsl"
add_cppflags -D__EXTENSIONS__
add_cppflags -D__EXTENSIONS__ -D_XOPEN_SOURCE=600
# When using suncc to build, the Solaris linker will mark
# an executable with each instruction set encountered by
# the Solaris assembler. As our libraries contain their own
......
......@@ -393,7 +393,7 @@ static inline int range_get_symbol(APEContext *ctx,
}
/** @} */ // group rangecoder
static inline void update_rice(APERice *rice, int x)
static inline void update_rice(APERice *rice, unsigned int x)
{
int lim = rice->k ? (1 << (rice->k + 4)) : 0;
rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
......@@ -406,7 +406,7 @@ static inline void update_rice(APERice *rice, int x)
static inline int ape_decode_value(APEContext *ctx, APERice *rice)
{
int x, overflow;
unsigned int x, overflow;
if (ctx->fileversion < 3990) {
int tmpk;
......
......@@ -95,6 +95,7 @@ endfunc
.endm
.macro pixels16_y2 rnd=1, avg=0
sub r3, r3, #2
vld1.64 {q0}, [r1], r2
vld1.64 {q1}, [r1], r2
1: subs r3, r3, #2
......@@ -114,10 +115,25 @@ endfunc
vst1.64 {q2}, [r0,:128], r2
vst1.64 {q3}, [r0,:128], r2
bne 1b
avg q2, q0, q1
vld1.64 {q0}, [r1], r2
avg q3, q0, q1
.if \avg
vld1.8 {q8}, [r0,:128], r2
vld1.8 {q9}, [r0,:128]
vrhadd.u8 q2, q2, q8
vrhadd.u8 q3, q3, q9
sub r0, r0, r2
.endif
vst1.64 {q2}, [r0,:128], r2
vst1.64 {q3}, [r0,:128], r2
bx lr
.endm
.macro pixels16_xy2 rnd=1, avg=0
sub r3, r3, #2
vld1.64 {d0-d2}, [r1], r2
vld1.64 {d4-d6}, [r1], r2
.ifeq \rnd
......@@ -173,6 +189,42 @@ endfunc
vaddl.u8 q11, d3, d5
vst1.64 {q15}, [r0,:128], r2
bgt 1b
vld1.64 {d0-d2}, [r1], r2
vadd.u16 q12, q8, q9
.ifeq \rnd
vadd.u16 q12, q12, q13
.endif
vext.8 q15, q0, q1, #1
vadd.u16 q1 , q10, q11
shrn d28, q12, #2
.ifeq \rnd
vadd.u16 q1, q1, q13
.endif
shrn d29, q1, #2
.if \avg
vld1.8 {q8}, [r0,:128]
vrhadd.u8 q14, q14, q8
.endif
vaddl.u8 q8, d0, d30
vaddl.u8 q10, d1, d31
vst1.64 {q14}, [r0,:128], r2
vadd.u16 q12, q8, q9
.ifeq \rnd
vadd.u16 q12, q12, q13
.endif
vadd.u16 q0, q10, q11
shrn d30, q12, #2
.ifeq \rnd
vadd.u16 q0, q0, q13
.endif
shrn d31, q0, #2
.if \avg
vld1.8 {q9}, [r0,:128]
vrhadd.u8 q15, q15, q9
.endif
vst1.64 {q15}, [r0,:128], r2
bx lr
.endm
......@@ -228,6 +280,7 @@ endfunc
.endm
.macro pixels8_y2 rnd=1, avg=0
sub r3, r3, #2
vld1.64 {d0}, [r1], r2
vld1.64 {d1}, [r1], r2
1: subs r3, r3, #2
......@@ -246,10 +299,24 @@ endfunc
vst1.64 {d4}, [r0,:64], r2
vst1.64 {d5}, [r0,:64], r2
bne 1b
avg d4, d0, d1
vld1.64 {d0}, [r1], r2
avg d5, d0, d1
.if \avg
vld1.8 {d2}, [r0,:64], r2
vld1.8 {d3}, [r0,:64]
vrhadd.u8 q2, q2, q1
sub r0, r0, r2
.endif
vst1.64 {d4}, [r0,:64], r2
vst1.64 {d5}, [r0,:64], r2
bx lr
.endm
.macro pixels8_xy2 rnd=1, avg=0
sub r3, r3, #2
vld1.64 {q0}, [r1], r2
vld1.64 {q1}, [r1], r2
.ifeq \rnd
......@@ -291,6 +358,31 @@ endfunc
vaddl.u8 q9, d2, d6
vst1.64 {d7}, [r0,:64], r2
bgt 1b
vld1.64 {q0}, [r1], r2
vadd.u16 q10, q8, q9
vext.8 d4, d0, d1, #1
.ifeq \rnd
vadd.u16 q10, q10, q11
.endif
vaddl.u8 q8, d0, d4
shrn d5, q10, #2
vadd.u16 q10, q8, q9
.if \avg
vld1.8 {d7}, [r0,:64]
vrhadd.u8 d5, d5, d7
.endif
.ifeq \rnd
vadd.u16 q10, q10, q11
.endif
vst1.64 {d5}, [r0,:64], r2
shrn d7, q10, #2
.if \avg
vld1.8 {d5}, [r0,:64]
vrhadd.u8 d7, d7, d5
.endif
vst1.64 {d7}, [r0,:64], r2
bx lr
.endm
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -28,56 +28,90 @@
#define AVCODEC_H264DSP_H
#include <stdint.h>
#include "dsputil.h"
typedef void (*h264_weight_func)(uint8_t *block, int stride, int height,
int log2_denom, int weight, int offset);
typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height,
int log2_denom, int weightd, int weights, int offset);
typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src,
int stride, int height, int log2_denom,
int weightd, int weights, int offset);
/**
* Context for storing H.264 DSP functions
*/
typedef struct H264DSPContext{
typedef struct H264DSPContext {
/* weighted MC */
h264_weight_func weight_h264_pixels_tab[4];
h264_biweight_func biweight_h264_pixels_tab[4];
/* loop filter */
void (*h264_v_loop_filter_luma)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_luma)(uint8_t *pix/*align 4 */, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_luma_mbaff)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_v_loop_filter_luma)(uint8_t *pix /*align 16*/, int stride,
int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_luma)(uint8_t *pix /*align 4 */, int stride,
int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_luma_mbaff)(uint8_t *pix /*align 16*/, int stride,
int alpha, int beta, int8_t *tc0);
/* v/h_loop_filter_luma_intra: align 16 */
void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
void (*h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta);
void (*h264_v_loop_filter_chroma)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_chroma)(uint8_t *pix/*align 4*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_chroma_mbaff)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
void (*h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride,
int alpha, int beta);
void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride,
int alpha, int beta);
void (*h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix /*align 16*/,
int stride, int alpha, int beta);
void (*h264_v_loop_filter_chroma)(uint8_t *pix /*align 8*/, int stride,
int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_chroma)(uint8_t *pix /*align 4*/, int stride,
int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_chroma_mbaff)(uint8_t *pix /*align 8*/,
int stride, int alpha, int beta,
int8_t *tc0);
void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix /*align 8*/,
int stride, int alpha, int beta);
void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix /*align 8*/,
int stride, int alpha, int beta);
void (*h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix /*align 8*/,
int stride, int alpha, int beta);
// h264_loop_filter_strength: simd only. the C version is inlined in h264.c
void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field);
void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40],
int8_t ref[2][40], int16_t mv[2][40][2],
int bidir, int edges, int step,
int mask_mv0, int mask_mv1, int field);
/* IDCT */
void (*h264_idct_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
void (*h264_idct8_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
void (*h264_idct_dc_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
void (*h264_idct8_dc_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
void (*h264_idct_add)(uint8_t *dst /*align 4*/,
DCTELEM *block /*align 16*/, int stride);
void (*h264_idct8_add)(uint8_t *dst /*align 8*/,
DCTELEM *block /*align 16*/, int stride);
void (*h264_idct_dc_add)(uint8_t *dst /*align 4*/,
DCTELEM *block /*align 16*/, int stride);
void (*h264_idct8_dc_add)(uint8_t *dst /*align 8*/,
DCTELEM *block /*align 16*/, int stride);
void (*h264_idct_add16)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[15*8]);
void (*h264_idct8_add4)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[15*8]);
void (*h264_idct_add8)(uint8_t **dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[15*8]);
void (*h264_idct_add16intra)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[15*8]);
void (*h264_luma_dc_dequant_idct)(DCTELEM *output, DCTELEM *input/*align 16*/, int qmul);
void (*h264_idct_add16)(uint8_t *dst /*align 16*/, const int *blockoffset,
DCTELEM *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
void (*h264_idct8_add4)(uint8_t *dst /*align 16*/, const int *blockoffset,
DCTELEM *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
void (*h264_idct_add8)(uint8_t **dst /*align 16*/, const int *blockoffset,
DCTELEM *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
void (*h264_idct_add16intra)(uint8_t *dst /*align 16*/, const int *blockoffset,
DCTELEM *block /*align 16*/,
int stride, const uint8_t nnzc[15 * 8]);
void (*h264_luma_dc_dequant_idct)(DCTELEM *output,
DCTELEM *input /*align 16*/, int qmul);
void (*h264_chroma_dc_dequant_idct)(DCTELEM *block, int qmul);
}H264DSPContext;
} H264DSPContext;
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc);
void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth, const int chroma_format_idc);
void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chroma_format_idc);
void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chroma_format_idc);
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
#endif /* AVCODEC_H264DSP_H */
......@@ -35,18 +35,18 @@
* Prediction types
*/
//@{
#define VERT_PRED 0
#define HOR_PRED 1
#define DC_PRED 2
#define DIAG_DOWN_LEFT_PRED 3
#define DIAG_DOWN_RIGHT_PRED 4
#define VERT_RIGHT_PRED 5
#define HOR_DOWN_PRED 6
#define VERT_LEFT_PRED 7
#define HOR_UP_PRED 8
#define VERT_PRED 0
#define HOR_PRED 1
#define DC_PRED 2
#define DIAG_DOWN_LEFT_PRED 3
#define DIAG_DOWN_RIGHT_PRED 4
#define VERT_RIGHT_PRED 5
#define HOR_DOWN_PRED 6
#define VERT_LEFT_PRED 7
#define HOR_UP_PRED 8
// DC edge (not for VP8)
#define LEFT_DC_PRED 9
#define LEFT_DC_PRED 9
#define TOP_DC_PRED 10
#define DC_128_PRED 11
......@@ -56,7 +56,7 @@
#define VERT_LEFT_PRED_RV40_NODOWN 14
// VP8 specific
#define TM_VP8_PRED 9 ///< "True Motion", used instead of plane
#define TM_VP8_PRED 9 ///< "True Motion", used instead of plane
#define VERT_VP8_PRED 10 ///< for VP8, #VERT_PRED is the average of
///< (left col+cur col x2+right col) / 4;
///< this is the "unaveraged" one
......@@ -65,44 +65,53 @@
#define DC_127_PRED 12
#define DC_129_PRED 13
#define DC_PRED8x8 0
#define HOR_PRED8x8 1
#define VERT_PRED8x8 2
#define PLANE_PRED8x8 3
#define DC_PRED8x8 0
#define HOR_PRED8x8 1
#define VERT_PRED8x8 2
#define PLANE_PRED8x8 3
// DC edge
#define LEFT_DC_PRED8x8 4
#define TOP_DC_PRED8x8 5
#define DC_128_PRED8x8 6
#define LEFT_DC_PRED8x8 4
#define TOP_DC_PRED8x8 5
#define DC_128_PRED8x8 6
// H264/SVQ3 (8x8) specific
#define ALZHEIMER_DC_L0T_PRED8x8 7
#define ALZHEIMER_DC_0LT_PRED8x8 8
#define ALZHEIMER_DC_L00_PRED8x8 9
#define ALZHEIMER_DC_L0T_PRED8x8 7
#define ALZHEIMER_DC_0LT_PRED8x8 8
#define ALZHEIMER_DC_L00_PRED8x8 9
#define ALZHEIMER_DC_0L0_PRED8x8 10
// VP8 specific
#define DC_127_PRED8x8 7
#define DC_129_PRED8x8 8
#define DC_127_PRED8x8 7
#define DC_129_PRED8x8 8
//@}
/**
* Context for storing H.264 prediction functions
*/
typedef struct H264PredContext{
void (*pred4x4 [9+3+3])(uint8_t *src, const uint8_t *topright, int stride);//FIXME move to dsp?
void (*pred8x8l [9+3])(uint8_t *src, int topleft, int topright, int stride);
void (*pred8x8 [4+3+4])(uint8_t *src, int stride);
void (*pred16x16[4+3+2])(uint8_t *src, int stride);
typedef struct H264PredContext {
void(*pred4x4[9 + 3 + 3])(uint8_t *src, const uint8_t *topright, int stride); //FIXME move to dsp?
void(*pred8x8l[9 + 3])(uint8_t *src, int topleft, int topright, int stride);
void(*pred8x8[4 + 3 + 4])(uint8_t *src, int stride);
void(*pred16x16[4 + 3 + 2])(uint8_t *src, int stride);
void (*pred4x4_add [2])(uint8_t *pix/*align 4*/, const DCTELEM *block/*align 16*/, int stride);
void (*pred8x8l_add [2])(uint8_t *pix/*align 8*/, const DCTELEM *block/*align 16*/, int stride);
void (*pred8x8_add [3])(uint8_t *pix/*align 8*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride);
void (*pred16x16_add[3])(uint8_t *pix/*align 16*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride);
}H264PredContext;
void(*pred4x4_add[2])(uint8_t *pix /*align 4*/,
const DCTELEM *block /*align 16*/, int stride);
void(*pred8x8l_add[2])(uint8_t *pix /*align 8*/,
const DCTELEM *block /*align 16*/, int stride);
void(*pred8x8_add[3])(uint8_t *pix /*align 8*/,
const int *block_offset,
const DCTELEM *block /*align 16*/, int stride);
void(*pred16x16_add[3])(uint8_t *pix /*align 16*/,
const int *block_offset,
const DCTELEM *block /*align 16*/, int stride);
} H264PredContext;
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_arm(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
#endif /* AVCODEC_H264PRED_H */
......@@ -21,39 +21,57 @@
/**
* @file
* ati vcr1 codec.
* ATI VCR1 codec
*/
#include "avcodec.h"
#include "dsputil.h"
//#undef NDEBUG
//#include <assert.h>
/* Disable the encoder. */
#undef CONFIG_VCR1_ENCODER
#define CONFIG_VCR1_ENCODER 0
typedef struct VCR1Context{
AVCodecContext *avctx;
typedef struct VCR1Context {
AVFrame picture;
int delta[16];
int offset[4];
} VCR1Context;
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
static av_cold void common_init(AVCodecContext *avctx)
{
VCR1Context *const a = avctx->priv_data;
avctx->coded_frame = &a->picture;
avcodec_get_frame_defaults(&a->picture);
}
static av_cold int decode_init(AVCodecContext *avctx)
{
common_init(avctx);
avctx->pix_fmt = PIX_FMT_YUV410P;
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
VCR1Context *s = avctx->priv_data;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0;
}
static int decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
VCR1Context * const a = avctx->priv_data;
AVFrame *picture = data;
AVFrame * const p = &a->picture;
const uint8_t *bytestream= buf;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
VCR1Context *const a = avctx->priv_data;
AVFrame *picture = data;
AVFrame *const p = &a->picture;
const uint8_t *bytestream = buf;
int i, x, y;
if(p->data[0])
if (p->data[0])
avctx->release_buffer(avctx, p);
if(buf_size < 16 + avctx->height + avctx->width*avctx->height*5/8){
......@@ -61,57 +79,57 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL);
}
p->reference= 0;
if(avctx->get_buffer(avctx, p) < 0){
p->reference = 0;
if (avctx->get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
for(i=0; i<16; i++){
a->delta[i]= *(bytestream++);
for (i = 0; i < 16; i++) {
a->delta[i] = *bytestream++;
bytestream++;
}
for(y=0; y<avctx->height; y++){
for (y = 0; y < avctx->height; y++) {
int offset;
uint8_t *luma= &a->picture.data[0][ y*a->picture.linesize[0] ];
uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]];
if((y&3) == 0){
uint8_t *cb= &a->picture.data[1][ (y>>2)*a->picture.linesize[1] ];
uint8_t *cr= &a->picture.data[2][ (y>>2)*a->picture.linesize[2] ];
if ((y & 3) == 0) {
uint8_t *cb = &a->picture.data[1][(y >> 2) * a->picture.linesize[1]];
uint8_t *cr = &a->picture.data[2][(y >> 2) * a->picture.linesize[2]];
for(i=0; i<4; i++)
a->offset[i]= *(bytestream++);
for (i = 0; i < 4; i++)
a->offset[i] = *bytestream++;
offset= a->offset[0] - a->delta[ bytestream[2]&0xF ];
for(x=0; x<avctx->width; x+=4){
luma[0]=( offset += a->delta[ bytestream[2]&0xF ]);
luma[1]=( offset += a->delta[ bytestream[2]>>4 ]);
luma[2]=( offset += a->delta[ bytestream[0]&0xF ]);
luma[3]=( offset += a->delta[ bytestream[0]>>4 ]);
luma += 4;
offset = a->offset[0] - a->delta[bytestream[2] & 0xF];
for (x = 0; x < avctx->width; x += 4) {
luma[0] = offset += a->delta[bytestream[2] & 0xF];
luma[1] = offset += a->delta[bytestream[2] >> 4];
luma[2] = offset += a->delta[bytestream[0] & 0xF];
luma[3] = offset += a->delta[bytestream[0] >> 4];
luma += 4;
*(cb++) = bytestream[3];
*(cr++) = bytestream[1];
*cb++ = bytestream[3];
*cr++ = bytestream[1];
bytestream+= 4;
bytestream += 4;
}
}else{
offset= a->offset[y&3] - a->delta[ bytestream[2]&0xF ];
for(x=0; x<avctx->width; x+=8){
luma[0]=( offset += a->delta[ bytestream[2]&0xF ]);
luma[1]=( offset += a->delta[ bytestream[2]>>4 ]);
luma[2]=( offset += a->delta[ bytestream[3]&0xF ]);
luma[3]=( offset += a->delta[ bytestream[3]>>4 ]);
luma[4]=( offset += a->delta[ bytestream[0]&0xF ]);
luma[5]=( offset += a->delta[ bytestream[0]>>4 ]);
luma[6]=( offset += a->delta[ bytestream[1]&0xF ]);
luma[7]=( offset += a->delta[ bytestream[1]>>4 ]);
luma += 8;
bytestream+= 4;
} else {
offset = a->offset[y & 3] - a->delta[bytestream[2] & 0xF];
for (x = 0; x < avctx->width; x += 8) {
luma[0] = offset += a->delta[bytestream[2] & 0xF];
luma[1] = offset += a->delta[bytestream[2] >> 4];
luma[2] = offset += a->delta[bytestream[3] & 0xF];
luma[3] = offset += a->delta[bytestream[3] >> 4];
luma[4] = offset += a->delta[bytestream[0] & 0xF];
luma[5] = offset += a->delta[bytestream[0] >> 4];
luma[6] = offset += a->delta[bytestream[1] & 0xF];
luma[7] = offset += a->delta[bytestream[1] >> 4];
luma += 8;
bytestream += 4;
}
}
}
......@@ -122,62 +140,6 @@ static int decode_frame(AVCodecContext *avctx,
return buf_size;
}
#if CONFIG_VCR1_ENCODER
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
VCR1Context * const a = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p = &a->picture;
int size;
*p = *pict;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
avpriv_align_put_bits(&a->pb);
while(get_bit_count(&a->pb)&31)
put_bits(&a->pb, 8, 0);
size= get_bit_count(&a->pb)/32;
return size*4;
}
#endif
static av_cold void common_init(AVCodecContext *avctx){
VCR1Context * const a = avctx->priv_data;
avctx->coded_frame = &a->picture;
avcodec_get_frame_defaults(&a->picture);
a->avctx= avctx;
}
static av_cold int decode_init(AVCodecContext *avctx){
common_init(avctx);
avctx->pix_fmt= PIX_FMT_YUV410P;
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx){
VCR1Context *s = avctx->priv_data;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0;
}
#if CONFIG_VCR1_ENCODER
static av_cold int encode_init(AVCodecContext *avctx){
common_init(avctx);
return 0;
}
#endif
AVCodec ff_vcr1_decoder = {
.name = "vcr1",
.type = AVMEDIA_TYPE_VIDEO,
......@@ -190,14 +152,39 @@ AVCodec ff_vcr1_decoder = {
.long_name = NULL_IF_CONFIG_SMALL("ATI VCR1"),
};
/* Disable the encoder. */
#undef CONFIG_VCR1_ENCODER
#define CONFIG_VCR1_ENCODER 0
#if CONFIG_VCR1_ENCODER
static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
int buf_size, void *data)
{
VCR1Context *const a = avctx->priv_data;
AVFrame *pict = data;
AVFrame *const p = &a->picture;
int size;
*p = *pict;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
avpriv_align_put_bits(&a->pb);
while (get_bit_count(&a->pb) & 31)
put_bits(&a->pb, 8, 0);
size = get_bit_count(&a->pb) / 32;
return size * 4;
}
AVCodec ff_vcr1_encoder = {
.name = "vcr1",
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_VCR1,
.priv_data_size = sizeof(VCR1Context),
.init = encode_init,
.init = common_init,
.encode = encode_frame,
.long_name = NULL_IF_CONFIG_SMALL("ATI VCR1"),
};
#endif
#endif /* CONFIG_VCR1_ENCODER */
......@@ -1792,6 +1792,22 @@ QPEL_2TAP(avg_, 16, 3dnow)
QPEL_2TAP(put_, 8, 3dnow)
QPEL_2TAP(avg_, 8, 3dnow)
void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
{
put_pixels8_xy2_mmx(dst, src, stride, 8);
}
void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
{
put_pixels16_xy2_mmx(dst, src, stride, 16);
}
void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
{
avg_pixels8_xy2_mmx(dst, src, stride, 8);
}
void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
{
avg_pixels16_xy2_mmx(dst, src, stride, 16);
}
#if HAVE_YASM
typedef void emu_edge_core_func(uint8_t *buf, const uint8_t *src,
......
......@@ -199,6 +199,11 @@ void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd);
void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd);
void ff_put_rv40_qpel8_mc33_mmx(uint8_t *block, uint8_t *pixels, int line_size);
void ff_put_rv40_qpel16_mc33_mmx(uint8_t *block, uint8_t *pixels, int line_size);
void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *block, uint8_t *pixels, int line_size);
void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *block, uint8_t *pixels, int line_size);
void ff_mmx_idct(DCTELEM *block);
void ff_mmxext_idct(DCTELEM *block);
......
;******************************************************************************
;* MMX/SSE2-optimized functions for the RV40 decoder
;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
;* Copyright (C) 2012 Christophe Gisquet <christophe.gisquet@gmail.com>
;*
;* This file is part of Libav.
......@@ -25,11 +27,319 @@
SECTION_RODATA
align 16
shift_round: times 8 dw 1 << (16 - 6)
cextern pw_16
pw_1024: times 8 dw 1 << (16 - 6) ; pw_1024
sixtap_filter_hb_m: times 8 db 1, -5
times 8 db 52, 20
; multiplied by 2 to have the same shift
times 8 db 2, -10
times 8 db 40, 40
; back to normal
times 8 db 1, -5
times 8 db 20, 52
sixtap_filter_v_m: times 8 dw 1
times 8 dw -5
times 8 dw 52
times 8 dw 20
; multiplied by 2 to have the same shift
times 8 dw 2
times 8 dw -10
times 8 dw 40
times 8 dw 40
; back to normal
times 8 dw 1
times 8 dw -5
times 8 dw 20
times 8 dw 52
%ifdef PIC
%define sixtap_filter_hw picregq
%define sixtap_filter_hb picregq
%define sixtap_filter_v picregq
%define npicregs 1
%else
%define sixtap_filter_hw sixtap_filter_hw_m
%define sixtap_filter_hb sixtap_filter_hb_m
%define sixtap_filter_v sixtap_filter_v_m
%define npicregs 0
%endif
filter_h6_shuf1: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
filter_h6_shuf2: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
filter_h6_shuf3: db 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 10, 9, 11, 10, 12, 11
cextern pw_32
cextern pw_16
cextern pw_512
SECTION .text
;-----------------------------------------------------------------------------
; subpel MC functions:
;
; void [put|rv40]_rv40_qpel_[h|v]_<opt>(uint8_t *dst, int deststride,
; uint8_t *src, int srcstride,
; int len, int m);
;----------------------------------------------------------------------
%macro LOAD 2
%if WIN64
movsxd %1q, %1d
%endif
%ifdef PIC
add %1q, picregq
%else
add %1q, %2
%endif
%endmacro
%macro STORE 3
%ifidn %3, avg
movh %2, [dstq]
%endif
packuswb %1, %1
%ifidn %3, avg
%if cpuflag(3dnow)
pavgusb %1, %2
%else
pavgb %1, %2
%endif
%endif
movh [dstq], %1
%endmacro
%macro FILTER_V 1
cglobal %1_rv40_qpel_v, 6,6+npicregs,12, dst, dststride, src, srcstride, height, my, picreg
%ifdef PIC
lea picregq, [sixtap_filter_v_m]
%endif
pxor m7, m7
LOAD my, sixtap_filter_v
; read 5 lines
sub srcq, srcstrideq
sub srcq, srcstrideq
movh m0, [srcq]
movh m1, [srcq+srcstrideq]
movh m2, [srcq+srcstrideq*2]
lea srcq, [srcq+srcstrideq*2]
add srcq, srcstrideq
movh m3, [srcq]
movh m4, [srcq+srcstrideq]
punpcklbw m0, m7
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
%ifdef m8
mova m8, [myq+ 0]
mova m9, [myq+16]
mova m10, [myq+32]
mova m11, [myq+48]
%define COEFF05 m8
%define COEFF14 m9
%define COEFF2 m10
%define COEFF3 m11
%else
%define COEFF05 [myq+ 0]
%define COEFF14 [myq+16]
%define COEFF2 [myq+32]
%define COEFF3 [myq+48]
%endif
.nextrow:
mova m6, m1
movh m5, [srcq+2*srcstrideq] ; read new row
paddw m6, m4
punpcklbw m5, m7
pmullw m6, COEFF14
paddw m0, m5
pmullw m0, COEFF05
paddw m6, m0
mova m0, m1
paddw m6, [pw_32]
mova m1, m2
pmullw m2, COEFF2
paddw m6, m2
mova m2, m3
pmullw m3, COEFF3
paddw m6, m3
; round/clip/store
mova m3, m4
psraw m6, 6
mova m4, m5
STORE m6, m5, %1
; go to next line
add dstq, dststrideq
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
REP_RET
%endmacro
%macro FILTER_H 1
cglobal %1_rv40_qpel_h, 6, 6+npicregs, 12, dst, dststride, src, srcstride, height, mx, picreg
%ifdef PIC
lea picregq, [sixtap_filter_v_m]
%endif
pxor m7, m7
LOAD mx, sixtap_filter_v
mova m6, [pw_32]
%ifdef m8
mova m8, [mxq+ 0]
mova m9, [mxq+16]
mova m10, [mxq+32]
mova m11, [mxq+48]
%define COEFF05 m8
%define COEFF14 m9
%define COEFF2 m10
%define COEFF3 m11
%else
%define COEFF05 [mxq+ 0]
%define COEFF14 [mxq+16]
%define COEFF2 [mxq+32]
%define COEFF3 [mxq+48]
%endif
.nextrow:
movq m0, [srcq-2]
movq m5, [srcq+3]
movq m1, [srcq-1]
movq m4, [srcq+2]
punpcklbw m0, m7
punpcklbw m5, m7
punpcklbw m1, m7
punpcklbw m4, m7
movq m2, [srcq-0]
movq m3, [srcq+1]
paddw m0, m5
paddw m1, m4
punpcklbw m2, m7
punpcklbw m3, m7
pmullw m0, COEFF05
pmullw m1, COEFF14
pmullw m2, COEFF2
pmullw m3, COEFF3
paddw m0, m6
paddw m1, m2
paddw m0, m3
paddw m0, m1
psraw m0, 6
STORE m0, m1, %1
; go to next line
add dstq, dststrideq
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
REP_RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
FILTER_V put
FILTER_H put
INIT_MMX mmx2
FILTER_V avg
FILTER_H avg
INIT_MMX 3dnow
FILTER_V avg
FILTER_H avg
%endif
INIT_XMM sse2
FILTER_H put
FILTER_H avg
FILTER_V put
FILTER_V avg
%macro FILTER_SSSE3 1
cglobal %1_rv40_qpel_v, 6,6+npicregs,8, dst, dststride, src, srcstride, height, my, picreg
%ifdef PIC
lea picregq, [sixtap_filter_hb_m]
%endif
; read 5 lines
sub srcq, srcstrideq
LOAD my, sixtap_filter_hb
sub srcq, srcstrideq
movh m0, [srcq]
movh m1, [srcq+srcstrideq]
movh m2, [srcq+srcstrideq*2]
lea srcq, [srcq+srcstrideq*2]
add srcq, srcstrideq
mova m5, [myq]
movh m3, [srcq]
movh m4, [srcq+srcstrideq]
lea srcq, [srcq+2*srcstrideq]
.nextrow:
mova m6, m2
punpcklbw m0, m1
punpcklbw m6, m3
pmaddubsw m0, m5
pmaddubsw m6, [myq+16]
movh m7, [srcq] ; read new row
paddw m6, m0
mova m0, m1
mova m1, m2
mova m2, m3
mova m3, m4
mova m4, m7
punpcklbw m7, m3
pmaddubsw m7, m5
paddw m6, m7
pmulhrsw m6, [pw_512]
STORE m6, m7, %1
; go to next line
add dstq, dststrideq
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
REP_RET
cglobal %1_rv40_qpel_h, 6,6+npicregs,8, dst, dststride, src, srcstride, height, mx, picreg
%ifdef PIC
lea picregq, [sixtap_filter_hb_m]
%endif
mova m3, [filter_h6_shuf2]
mova m4, [filter_h6_shuf3]
LOAD mx, sixtap_filter_hb
mova m5, [mxq] ; set up 6tap filter in bytes
mova m6, [mxq+16]
mova m7, [filter_h6_shuf1]
.nextrow:
movu m0, [srcq-2]
mova m1, m0
mova m2, m0
pshufb m0, m7
pshufb m1, m3
pshufb m2, m4
pmaddubsw m0, m5
pmaddubsw m1, m6
pmaddubsw m2, m5
paddw m0, m1
paddw m0, m2
pmulhrsw m0, [pw_512]
STORE m0, m1, %1
; go to next line
add dstq, dststrideq
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
REP_RET
%endmacro
INIT_XMM ssse3
FILTER_SSSE3 put
FILTER_SSSE3 avg
; %1=5bits weights?, %2=dst %3=src1 %4=src3 %5=stride if sse2
%macro RV40_WCORE 4-5
movh m4, [%3 + r6 + 0]
......@@ -143,7 +453,7 @@ SECTION .text
%macro RV40_WEIGHT 3
cglobal rv40_weight_func_%1_%2, 6, 7, 8
%if cpuflag(ssse3)
mova m1, [shift_round]
mova m1, [pw_1024]
%else
mova m1, [pw_16]
%endif
......
......@@ -22,8 +22,11 @@
/**
* @file
* RV40 decoder motion compensation functions x86-optimised
* 2,0 and 0,2 have h264 equivalents.
* 3,3 is bugged in the rv40 format and maps to _xy2 version
*/
#include "libavcodec/x86/dsputil_mmx.h"
#include "libavcodec/rv34dsp.h"
void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
......@@ -53,6 +56,132 @@ DECLARE_WEIGHT(mmx)
DECLARE_WEIGHT(sse2)
DECLARE_WEIGHT(ssse3)
/** @{ */
/**
* Define one qpel function.
* LOOPSIZE must be already set to the number of pixels processed per
* iteration in the inner loop of the called functions.
* COFF(x) must be already defined so as to provide the offset into any
* array of coeffs used by the called function for the qpel position x.
*/
#define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
uint8_t *src, \
int stride) \
{ \
int i; \
if (PH && PV) { \
DECLARE_ALIGNED(16, uint8_t, tmp)[SIZE * (SIZE + 5)]; \
uint8_t *tmpptr = tmp + SIZE * 2; \
src -= stride * 2; \
\
for (i = 0; i < SIZE; i += LOOPSIZE) \
ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
SIZE + 5, HCOFF(PH)); \
for (i = 0; i < SIZE; i += LOOPSIZE) \
ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
SIZE, SIZE, VCOFF(PV)); \
} else if (PV) { \
for (i = 0; i < SIZE; i += LOOPSIZE) \
ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
stride, SIZE, VCOFF(PV)); \
} else { \
for (i = 0; i < SIZE; i += LOOPSIZE) \
ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
stride, SIZE, HCOFF(PH)); \
} \
};
/** Declare functions for sizes 8 and 16 and given operations
* and qpel position. */
#define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
/** Declare all functions for all sizes and qpel positions */
#define QPEL_MC_DECL(OP, OPT) \
void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
const uint8_t *src, \
ptrdiff_t srcStride, \
int len, int m); \
void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
const uint8_t *src, \
ptrdiff_t srcStride, \
int len, int m); \
QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
QPEL_FUNCS_DECL(OP, 3, 2, OPT)
/** @} */
#define LOOPSIZE 8
#define HCOFF(x) (32 * (x - 1))
#define VCOFF(x) (32 * (x - 1))
QPEL_MC_DECL(put_, _ssse3)
QPEL_MC_DECL(avg_, _ssse3)
#undef LOOPSIZE
#undef HCOFF
#undef VCOFF
#define LOOPSIZE 8
#define HCOFF(x) (64 * (x - 1))
#define VCOFF(x) (64 * (x - 1))
QPEL_MC_DECL(put_, _sse2)
QPEL_MC_DECL(avg_, _sse2)
#if ARCH_X86_32
#undef LOOPSIZE
#undef HCOFF
#undef VCOFF
#define LOOPSIZE 4
#define HCOFF(x) (64 * (x - 1))
#define VCOFF(x) (64 * (x - 1))
QPEL_MC_DECL(put_, _mmx)
#define ff_put_rv40_qpel_h_mmx2 ff_put_rv40_qpel_h_mmx
#define ff_put_rv40_qpel_v_mmx2 ff_put_rv40_qpel_v_mmx
QPEL_MC_DECL(avg_, _mmx2)
#define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
#define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
QPEL_MC_DECL(avg_, _3dnow)
#endif
/** @{ */
/** Set one function */
#define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
/** Set functions put and avg for sizes 8 and 16 and a given qpel position */
#define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
/** Set all functions for all sizes and qpel positions */
#define QPEL_MC_SET(OP, OPT) \
QPEL_FUNCS_SET (OP, 0, 1, OPT) \
QPEL_FUNCS_SET (OP, 0, 3, OPT) \
QPEL_FUNCS_SET (OP, 1, 0, OPT) \
QPEL_FUNCS_SET (OP, 1, 1, OPT) \
QPEL_FUNCS_SET (OP, 1, 2, OPT) \
QPEL_FUNCS_SET (OP, 1, 3, OPT) \
QPEL_FUNCS_SET (OP, 2, 1, OPT) \
QPEL_FUNCS_SET (OP, 2, 2, OPT) \
QPEL_FUNCS_SET (OP, 2, 3, OPT) \
QPEL_FUNCS_SET (OP, 3, 0, OPT) \
QPEL_FUNCS_SET (OP, 3, 1, OPT) \
QPEL_FUNCS_SET (OP, 3, 2, OPT)
/** @} */
void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
{
#if HAVE_YASM
......@@ -65,25 +194,42 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmx;
c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmx;
c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmx;
c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_mmx;
c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_mmx;
c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_mmx;
c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_mmx;
#if ARCH_X86_32
QPEL_MC_SET(put_, _mmx)
#endif
}
if (mm_flags & AV_CPU_FLAG_MMX2) {
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2;
c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmx2;
#if ARCH_X86_32
QPEL_MC_SET(avg_, _mmx2)
#endif
} else if (mm_flags & AV_CPU_FLAG_3DNOW) {
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
#if ARCH_X86_32
QPEL_MC_SET(avg_, _3dnow)
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE2) {
c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
QPEL_MC_SET(put_, _sse2)
QPEL_MC_SET(avg_, _sse2)
}
if (mm_flags & AV_CPU_FLAG_SSSE3) {
c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
QPEL_MC_SET(put_, _ssse3)
QPEL_MC_SET(avg_, _ssse3)
}
#endif
}
......@@ -21,6 +21,7 @@ HEADERS = asrc_abuffer.h \
vsrc_buffer.h \
OBJS = allfilters.o \
audio.o \
avfilter.o \
avfiltergraph.o \
buffersink.o \
......
......@@ -144,7 +144,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
AConvertContext *aconvert = inlink->dst->priv;
const int n = insamplesref->audio->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outsamplesref = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, n);
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
swr_convert(aconvert->swr, outsamplesref->data, n,
(void *)insamplesref->data, n);
......@@ -153,7 +153,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
outsamplesref->audio->channel_layout = outlink->channel_layout;
outsamplesref->audio->planar = outlink->planar;
avfilter_filter_samples(outlink, outsamplesref);
ff_filter_samples(outlink, outsamplesref);
avfilter_unref_buffer(insamplesref);
}
......
......@@ -89,7 +89,7 @@ static int query_formats(AVFilterContext *ctx)
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
{
avfilter_filter_samples(inlink->dst->outputs[0], insamplesref);
ff_filter_samples(inlink->dst->outputs[0], insamplesref);
}
AVFilter avfilter_af_aformat = {
......
......@@ -208,7 +208,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
nb_samples = FFMIN(am->queue[0].nb_samples,
am->queue[1].nb_samples);
outbuf = avfilter_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE,
outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE,
nb_samples);
outs = outbuf->data[0];
for (i = 0; i < 2; i++) {
......@@ -264,7 +264,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
am->queue[i].nb_buf * sizeof(**inbuf));
}
}
avfilter_filter_samples(ctx->outputs[0], outbuf);
ff_filter_samples(ctx->outputs[0], outbuf);
}
AVFilter avfilter_af_amerge = {
......
......@@ -21,6 +21,7 @@
* null audio filter
*/
#include "audio.h"
#include "avfilter.h"
AVFilter avfilter_af_anull = {
......@@ -31,8 +32,8 @@ AVFilter avfilter_af_anull = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = avfilter_null_get_audio_buffer,
.filter_samples = avfilter_null_filter_samples },
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = ff_null_filter_samples },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
......
......@@ -92,7 +92,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
const int n_in = insamplesref->audio->nb_samples;
int n_out = n_in * aresample->ratio;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outsamplesref = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
n_out = swr_convert(aresample->swr, outsamplesref->data, n_out,
(void *)insamplesref->data, n_in);
......@@ -103,7 +103,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
outsamplesref->pts = insamplesref->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
av_rescale(outlink->sample_rate, insamplesref->pts, inlink ->sample_rate);
avfilter_filter_samples(outlink, outsamplesref);
ff_filter_samples(outlink, outsamplesref);
avfilter_unref_buffer(insamplesref);
}
......
......@@ -83,7 +83,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
av_log(ctx, AV_LOG_INFO, "]\n");
showinfo->frame++;
avfilter_filter_samples(inlink->dst->outputs[0], samplesref);
ff_filter_samples(inlink->dst->outputs[0], samplesref);
}
AVFilter avfilter_af_ashowinfo = {
......@@ -95,7 +95,7 @@ AVFilter avfilter_af_ashowinfo = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = avfilter_null_get_audio_buffer,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples,
.min_perms = AV_PERM_READ, },
{ .name = NULL}},
......
......@@ -27,9 +27,9 @@
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{
avfilter_filter_samples(inlink->dst->outputs[0],
ff_filter_samples(inlink->dst->outputs[0],
avfilter_ref_buffer(insamples, ~AV_PERM_WRITE));
avfilter_filter_samples(inlink->dst->outputs[1],
ff_filter_samples(inlink->dst->outputs[1],
avfilter_ref_buffer(insamples, ~AV_PERM_WRITE));
avfilter_unref_buffer(insamples);
}
......@@ -41,7 +41,7 @@ AVFilter avfilter_af_asplit = {
.inputs = (const AVFilterPad[]) {
{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = avfilter_null_get_audio_buffer,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, },
{ .name = NULL}
},
......
......@@ -119,7 +119,7 @@ static void send_out(AVFilterContext *ctx, int out_id)
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples /
(double)ctx->inputs[out_id]->sample_rate;
avfilter_filter_samples(ctx->outputs[out_id], buf);
ff_filter_samples(ctx->outputs[out_id], buf);
queue->nb--;
queue->tail = (queue->tail + 1) % QUEUE_SIZE;
if (as->req[out_id])
......
......@@ -122,7 +122,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
AVFilterLink *outlink = inlink->dst->outputs[0];
int16_t *taps, *endin, *in, *out;
AVFilterBufferRef *outsamples =
avfilter_get_audio_buffer(inlink, AV_PERM_WRITE,
ff_get_audio_buffer(inlink, AV_PERM_WRITE,
insamples->audio->nb_samples);
avfilter_copy_buffer_ref_props(outsamples, insamples);
......@@ -141,7 +141,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
// save part of input for next round
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
avfilter_filter_samples(outlink, outsamples);
ff_filter_samples(outlink, outsamples);
avfilter_unref_buffer(insamples);
}
......
......@@ -340,7 +340,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{
int n = insamples->audio->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outsamples = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, n);
AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
PanContext *pan = inlink->dst->priv;
swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n);
......@@ -348,7 +348,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
outsamples->audio->channel_layout = outlink->channel_layout;
outsamples->audio->planar = outlink->planar;
avfilter_filter_samples(outlink, outsamples);
ff_filter_samples(outlink, outsamples);
avfilter_unref_buffer(insamples);
}
......
......@@ -123,7 +123,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
}
}
avfilter_filter_samples(inlink->dst->outputs[0], insamples);
ff_filter_samples(inlink->dst->outputs[0], insamples);
}
static int query_formats(AVFilterContext *ctx)
......@@ -163,7 +163,7 @@ AVFilter avfilter_af_silencedetect = {
.inputs = (const AVFilterPad[]) {
{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = avfilter_null_get_audio_buffer,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, },
{ .name = NULL }
},
......
......@@ -167,7 +167,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
}
}
}
avfilter_filter_samples(outlink, insamples);
ff_filter_samples(outlink, insamples);
}
AVFilter avfilter_af_volume = {
......
......@@ -205,7 +205,7 @@ static int request_frame(AVFilterLink *outlink)
if (eval->duration >= 0 && t > eval->duration)
return AVERROR_EOF;
samplesref = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples);
samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples);
/* evaluate expression for each single sample and for each channel */
for (i = 0; i < eval->nb_samples; i++, eval->n++) {
......@@ -223,7 +223,7 @@ static int request_frame(AVFilterLink *outlink)
samplesref->audio->sample_rate = eval->sample_rate;
eval->pts += eval->nb_samples;
avfilter_filter_samples(outlink, samplesref);
ff_filter_samples(outlink, samplesref);
return 0;
}
......
......@@ -108,13 +108,13 @@ static int request_frame(AVFilterLink *outlink)
AVFilterBufferRef *samplesref;
samplesref =
avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, null->nb_samples);
ff_get_audio_buffer(outlink, AV_PERM_WRITE, null->nb_samples);
samplesref->pts = null->pts;
samplesref->pos = -1;
samplesref->audio->channel_layout = null->channel_layout;
samplesref->audio->sample_rate = outlink->sample_rate;
avfilter_filter_samples(outlink, avfilter_ref_buffer(samplesref, ~0));
ff_filter_samples(outlink, avfilter_ref_buffer(samplesref, ~0));
avfilter_unref_buffer(samplesref);
null->pts += null->nb_samples;
......
This diff is collapsed.
/*
* Copyright (c) Stefano Sabatini | stefasab at gmail.com
* Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_AUDIO_H
#define AVFILTER_AUDIO_H
#include "avfilter.h"
/** default handler for get_audio_buffer() for audio inputs */
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
/** get_audio_buffer() handler for filters which simply pass audio along */
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
/**
* Request an audio samples buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param perms the required access permissions
* @param nb_samples the number of samples per channel
* @return A reference to the samples. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
/** default handler for filter_samples() for audio inputs */
void ff_default_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** filter_samples() handler for filters which simply pass audio along */
void ff_null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
/**
* Send a buffer of audio samples to the next filter.
*
* @param link the output link over which the audio samples are being sent
* @param samplesref a reference to the buffer of audio samples being sent. The
* receiving filter will free this reference when it no longer
* needs it or pass it on to the next filter.
*/
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
#endif /* AVFILTER_AUDIO_H */
......@@ -27,6 +27,7 @@
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "avfilter.h"
#include "internal.h"
......@@ -410,7 +411,7 @@ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
av_dlog(ctx, "]%s", end ? "\n" : "");
}
static void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
{
if (link->type == AVMEDIA_TYPE_VIDEO) {
av_dlog(ctx,
......@@ -434,8 +435,6 @@ static void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
}
}
#define FF_DPRINTF_START(ctx, func) av_dlog(NULL, "%-16s: ", #func)
AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
AVFilterBufferRef *ret = NULL;
......@@ -501,133 +500,6 @@ fail:
return NULL;
}
AVFilterBufferRef *avfilter_get_audio_buffer(AVFilterLink *link,
int perms, int nb_samples)
{
AVFilterBufferRef *ret = NULL;
if (link->dstpad->get_audio_buffer)
ret = link->dstpad->get_audio_buffer(link, perms, nb_samples);
if (!ret)
ret = avfilter_default_get_audio_buffer(link, perms, nb_samples);
if (ret)
ret->type = AVMEDIA_TYPE_AUDIO;
return ret;
}
AVFilterBufferRef *
avfilter_get_audio_buffer_ref_from_arrays(uint8_t *data[8], int linesize[8], int perms,
int nb_samples, enum AVSampleFormat sample_fmt,
uint64_t channel_layout, int planar)
{
AVFilterBuffer *samples = av_mallocz(sizeof(AVFilterBuffer));
AVFilterBufferRef *samplesref = av_mallocz(sizeof(AVFilterBufferRef));
if (!samples || !samplesref)
goto fail;
samplesref->buf = samples;
samplesref->buf->free = ff_avfilter_default_free_buffer;
if (!(samplesref->audio = av_mallocz(sizeof(AVFilterBufferRefAudioProps))))
goto fail;
samplesref->audio->nb_samples = nb_samples;
samplesref->audio->channel_layout = channel_layout;
samplesref->audio->planar = planar;
/* make sure the buffer gets read permission or it's useless for output */
samplesref->perms = perms | AV_PERM_READ;
samples->refcount = 1;
samplesref->type = AVMEDIA_TYPE_AUDIO;
samplesref->format = sample_fmt;
memcpy(samples->data, data, sizeof(samples->data));
memcpy(samples->linesize, linesize, sizeof(samples->linesize));
memcpy(samplesref->data, data, sizeof(samplesref->data));
memcpy(samplesref->linesize, linesize, sizeof(samplesref->linesize));
return samplesref;
fail:
if (samplesref && samplesref->audio)
av_freep(&samplesref->audio);
av_freep(&samplesref);
av_freep(&samples);
return NULL;
}
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_alt(uint8_t **data,
int linesize, int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
uint64_t channel_layout)
{
int planes;
AVFilterBuffer *samples = av_mallocz(sizeof(*samples));
AVFilterBufferRef *samplesref = av_mallocz(sizeof(*samplesref));
if (!samples || !samplesref)
goto fail;
samplesref->buf = samples;
samplesref->buf->free = ff_avfilter_default_free_buffer;
if (!(samplesref->audio = av_mallocz(sizeof(*samplesref->audio))))
goto fail;
samplesref->audio->nb_samples = nb_samples;
samplesref->audio->channel_layout = channel_layout;
samplesref->audio->planar = av_sample_fmt_is_planar(sample_fmt);
planes = samplesref->audio->planar ? av_get_channel_layout_nb_channels(channel_layout) : 1;
/* make sure the buffer gets read permission or it's useless for output */
samplesref->perms = perms | AV_PERM_READ;
samples->refcount = 1;
samplesref->type = AVMEDIA_TYPE_AUDIO;
samplesref->format = sample_fmt;
memcpy(samples->data, data,
FFMIN(FF_ARRAY_ELEMS(samples->data), planes)*sizeof(samples->data[0]));
memcpy(samplesref->data, samples->data, sizeof(samples->data));
samples->linesize[0] = samplesref->linesize[0] = linesize;
if (planes > FF_ARRAY_ELEMS(samples->data)) {
samples-> extended_data = av_mallocz(sizeof(*samples->extended_data) *
planes);
samplesref->extended_data = av_mallocz(sizeof(*samplesref->extended_data) *
planes);
if (!samples->extended_data || !samplesref->extended_data)
goto fail;
memcpy(samples-> extended_data, data, sizeof(*data)*planes);
memcpy(samplesref->extended_data, data, sizeof(*data)*planes);
} else {
samples->extended_data = samples->data;
samplesref->extended_data = samplesref->data;
}
return samplesref;
fail:
if (samples && samples->extended_data != samples->data)
av_freep(&samples->extended_data);
if (samplesref) {
av_freep(&samplesref->audio);
if (samplesref->extended_data != samplesref->data)
av_freep(&samplesref->extended_data);
}
av_freep(&samplesref);
av_freep(&samples);
return NULL;
}
int avfilter_request_frame(AVFilterLink *link)
{
FF_DPRINTF_START(NULL, request_frame); ff_dlog_link(NULL, link, 1);
......@@ -657,7 +529,7 @@ int avfilter_poll_frame(AVFilterLink *link)
return min;
}
static void update_link_current_pts(AVFilterLink *link, int64_t pts)
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
{
if (pts == AV_NOPTS_VALUE)
return;
......@@ -706,7 +578,7 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
}
start_frame(link, link->cur_buf);
update_link_current_pts(link, link->cur_buf->pts);
ff_update_link_current_pts(link, link->cur_buf->pts);
}
void avfilter_end_frame(AVFilterLink *link)
......@@ -778,44 +650,6 @@ int avfilter_process_command(AVFilterContext *filter, const char *cmd, const cha
return AVERROR(ENOSYS);
}
void avfilter_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad;
int i;
int64_t pts;
FF_DPRINTF_START(NULL, filter_samples); ff_dlog_link(NULL, link, 1);
if (!(filter_samples = dst->filter_samples))
filter_samples = avfilter_default_filter_samples;
/* prepare to copy the samples if the buffer has insufficient permissions */
if ((dst->min_perms & samplesref->perms) != dst->min_perms ||
dst->rej_perms & samplesref->perms) {
av_log(link->dst, AV_LOG_DEBUG,
"Copying audio data in avfilter (have perms %x, need %x, reject %x)\n",
samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms);
link->cur_buf = avfilter_default_get_audio_buffer(link, dst->min_perms,
samplesref->audio->nb_samples);
link->cur_buf->pts = samplesref->pts;
link->cur_buf->audio->sample_rate = samplesref->audio->sample_rate;
/* Copy actual data into new samples buffer */
for (i = 0; samplesref->data[i] && i < 8; i++)
memcpy(link->cur_buf->data[i], samplesref->data[i], samplesref->linesize[0]);
avfilter_unref_buffer(samplesref);
} else
link->cur_buf = samplesref;
pts = link->cur_buf->pts;
filter_samples(link, link->cur_buf);
update_link_current_pts(link, pts);
}
#define MAX_REGISTERED_AVFILTERS_NB 128
static AVFilter *registered_avfilters[MAX_REGISTERED_AVFILTERS_NB + 1];
......
......@@ -430,7 +430,8 @@ struct AVFilterPad {
*
* Input audio pads only.
*/
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms, int nb_samples);
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms,
int nb_samples);
/**
* Callback called after the slices of a frame are completely sent. If
......@@ -508,16 +509,10 @@ void avfilter_default_draw_slice(AVFilterLink *link, int y, int h, int slice_dir
/** default handler for end_frame() for video inputs */
void avfilter_default_end_frame(AVFilterLink *link);
/** default handler for filter_samples() for audio inputs */
void avfilter_default_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** default handler for get_video_buffer() for video inputs */
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
/** default handler for get_audio_buffer() for audio inputs */
AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link,
int perms, int nb_samples);
/**
* Helpers for query_formats() which set all links to the same list of
......@@ -541,17 +536,10 @@ void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
/** end_frame() handler for filters which simply pass video along */
void avfilter_null_end_frame(AVFilterLink *link);
/** filter_samples() handler for filters which simply pass audio along */
void avfilter_null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** get_video_buffer() handler for filters which simply pass video along */
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
/** get_audio_buffer() handler for filters which simply pass audio along */
AVFilterBufferRef *avfilter_null_get_audio_buffer(AVFilterLink *link,
int perms, int nb_samples);
/**
* Filter definition. This defines the pads a filter contains, and all the
* callback functions used to interact with the filter.
......@@ -665,7 +653,7 @@ struct AVFilterLink {
AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
/* These parameters apply only to audio */
uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/audioconvert.h)
#if LIBAVFILTER_VERSION_MAJOR < 3
#if FF_API_SAMPLERATE64
int64_t sample_rate; ///< samples per second
#else
int sample_rate; ///< samples per second
......@@ -790,19 +778,6 @@ AVFilterBufferRef *
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
int w, int h, enum PixelFormat format);
/**
* Request an audio samples buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param perms the required access permissions
* @param nb_samples the number of samples per channel
* @return A reference to the samples. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFilterBufferRef *avfilter_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
/**
* Create an audio buffer reference wrapped around an already
* allocated samples buffer.
......@@ -904,17 +879,7 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
*/
int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);
/**
* Send a buffer of audio samples to the next filter.
*
* @param link the output link over which the audio samples are being sent
* @param samplesref a reference to the buffer of audio samples being sent. The
* receiving filter will free this reference when it no longer
* needs it or pass it on to the next filter.
*/
void avfilter_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** Initialize the filter system. Register all built-in filters. */
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
/** Uninitialize the filter system. Unregister all filters. */
......@@ -1024,4 +989,6 @@ static inline void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
&f->output_pads, &f->outputs, p);
}
#include "audio.h"
#endif /* AVFILTER_AVFILTER_H */
......@@ -23,6 +23,7 @@
#include "libavutil/audioconvert.h"
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "internal.h"
......@@ -87,38 +88,6 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
return picref;
}
AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
{
AVFilterBufferRef *samplesref = NULL;
int linesize[8] = {0};
uint8_t *data[8] = {0};
int ch, nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
/* right now we don't support more than 8 channels */
av_assert0(nb_channels <= 8);
/* Calculate total buffer size, round to multiple of 16 to be SIMD friendly */
if (av_samples_alloc(data, linesize,
nb_channels, nb_samples,
av_get_alt_sample_fmt(link->format, link->planar),
16) < 0)
return NULL;
for (ch = 1; link->planar && ch < nb_channels; ch++)
linesize[ch] = linesize[0];
samplesref =
avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms,
nb_samples, link->format,
link->channel_layout, link->planar);
if (!samplesref) {
av_free(data[0]);
return NULL;
}
return samplesref;
}
void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
AVFilterLink *outlink = NULL;
......@@ -163,27 +132,6 @@ void avfilter_default_end_frame(AVFilterLink *inlink)
}
}
/* FIXME: samplesref is same as link->cur_buf. Need to consider removing the redundant parameter. */
void avfilter_default_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{
AVFilterLink *outlink = NULL;
if (inlink->dst->output_count)
outlink = inlink->dst->outputs[0];
if (outlink) {
outlink->out_buf = avfilter_default_get_audio_buffer(inlink, AV_PERM_WRITE,
samplesref->audio->nb_samples);
outlink->out_buf->pts = samplesref->pts;
outlink->out_buf->audio->sample_rate = samplesref->audio->sample_rate;
avfilter_filter_samples(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
avfilter_unref_buffer(outlink->out_buf);
outlink->out_buf = NULL;
}
avfilter_unref_buffer(samplesref);
inlink->cur_buf = NULL;
}
static void set_common_formats(AVFilterContext *ctx, AVFilterFormats *fmts,
enum AVMediaType type, int offin, int offout)
{
......@@ -258,18 +206,7 @@ void avfilter_null_end_frame(AVFilterLink *link)
avfilter_end_frame(link->dst->outputs[0]);
}
void avfilter_null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
avfilter_filter_samples(link->dst->outputs[0], samplesref);
}
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
}
AVFilterBufferRef *avfilter_null_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
{
return avfilter_get_audio_buffer(link->dst->outputs[0], perms, nb_samples);
}
......@@ -149,4 +149,10 @@ static inline void ff_null_start_frame_keep_ref(AVFilterLink *inlink,
avfilter_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0));
}
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
#define FF_DPRINTF_START(ctx, func) av_dlog(NULL, "%-16s: ", #func)
void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
#endif /* AVFILTER_INTERNAL_H */
......@@ -297,7 +297,7 @@ static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
break;
case AVMEDIA_TYPE_AUDIO:
buf = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE,
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
ref->audio->nb_samples);
channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
data_size = av_samples_get_buffer_size(NULL, channels,
......@@ -562,7 +562,7 @@ static int request_frame(AVFilterLink *link)
avfilter_unref_buffer(buf);
break;
case AVMEDIA_TYPE_AUDIO:
avfilter_filter_samples(link, avfilter_ref_buffer(buf, ~0));
ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
avfilter_unref_buffer(buf);
break;
default:
......
......@@ -437,7 +437,7 @@ static int amovie_get_samples(AVFilterLink *outlink)
if (data_size < 0)
return data_size;
movie->samplesref =
avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
memcpy(movie->samplesref->data[0], movie->frame->data[0], data_size);
movie->samplesref->pts = movie->pkt.pts;
movie->samplesref->pos = movie->pkt.pos;
......@@ -463,7 +463,7 @@ static int amovie_request_frame(AVFilterLink *outlink)
return ret;
} while (!movie->samplesref);
avfilter_filter_samples(outlink, avfilter_ref_buffer(movie->samplesref, ~0));
ff_filter_samples(outlink, avfilter_ref_buffer(movie->samplesref, ~0));
avfilter_unref_buffer(movie->samplesref);
movie->samplesref = NULL;
......
......@@ -47,5 +47,8 @@
#ifndef FF_API_GRAPH_AVCLASS
#define FF_API_GRAPH_AVCLASS (LIBAVFILTER_VERSION_MAJOR > 2)
#endif
#ifndef FF_API_SAMPLERATE64
#define FF_API_SAMPLERATE64 (LIBAVFILTER_VERSION_MAJOR < 3)
#endif
#endif // AVFILTER_VERSION_H
......@@ -1051,7 +1051,7 @@ static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom)
return AVERROR_INVALIDDATA;
if (atom.size >= 10) {
// Broken files created by legacy versions of Libav and FFmpeg will
// Broken files created by legacy versions of libavformat will
// wrap a whole fiel atom inside of a glbl atom.
unsigned size = avio_rb32(pb);
unsigned type = avio_rl32(pb);
......
......@@ -44,6 +44,8 @@
#define APP_MAX_LENGTH 128
#define PLAYPATH_MAX_LENGTH 256
#define TCURL_MAX_LENGTH 512
#define FLASHVER_MAX_LENGTH 64
/** RTMP protocol handler state */
typedef enum {
......@@ -82,6 +84,9 @@ typedef struct RTMPContext {
int flv_header_bytes; ///< number of initialized bytes in flv_header
int nb_invokes; ///< keeps track of invoke messages
int create_stream_invoke; ///< invoke id for the create stream command
char* tcurl; ///< url of the target stream
char* flashver; ///< version of the flash plugin
char* swfurl; ///< url of the swf player
} RTMPContext;
#define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
......@@ -110,35 +115,34 @@ static const uint8_t rtmp_server_key[] = {
/**
* Generate 'connect' call and send it to the server.
*/
static void gen_connect(URLContext *s, RTMPContext *rt, const char *proto,
const char *host, int port)
static void gen_connect(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t ver[64], *p;
char tcurl[512];
uint8_t *p;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096);
p = pkt.data;
ff_url_join(tcurl, sizeof(tcurl), proto, NULL, host, port, "/%s", rt->app);
ff_amf_write_string(&p, "connect");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_object_start(&p);
ff_amf_write_field_name(&p, "app");
ff_amf_write_string(&p, rt->app);
if (rt->is_input) {
snprintf(ver, sizeof(ver), "%s %d,%d,%d,%d", RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1,
RTMP_CLIENT_VER2, RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
} else {
snprintf(ver, sizeof(ver), "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
if (!rt->is_input) {
ff_amf_write_field_name(&p, "type");
ff_amf_write_string(&p, "nonprivate");
}
ff_amf_write_field_name(&p, "flashVer");
ff_amf_write_string(&p, ver);
ff_amf_write_string(&p, rt->flashver);
if (rt->swfurl) {
ff_amf_write_field_name(&p, "swfUrl");
ff_amf_write_string(&p, rt->swfurl);
}
ff_amf_write_field_name(&p, "tcUrl");
ff_amf_write_string(&p, tcurl);
ff_amf_write_string(&p, rt->tcurl);
if (rt->is_input) {
ff_amf_write_field_name(&p, "fpad");
ff_amf_write_bool(&p, 0);
......@@ -368,6 +372,25 @@ static void gen_server_bw(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_destroy(&pkt);
}
/**
* Generate check bandwidth message and send it to the server.
*/
static void gen_check_bw(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 21);
p = pkt.data;
ff_amf_write_string(&p, "_checkbw");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
}
/**
* Generate report on bytes read so far and send it to the server.
*/
......@@ -687,6 +710,8 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
} else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
gen_check_bw(s, rt);
}
break;
}
......@@ -910,13 +935,31 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
}
if (!rt->tcurl) {
rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
port, "/%s", rt->app);
}
if (!rt->flashver) {
rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
if (rt->is_input) {
snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
} else {
snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
"FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
}
}
rt->client_report_size = 1048576;
rt->bytes_read = 0;
rt->last_bytes_read = 0;
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
proto, path, rt->app, rt->playpath);
gen_connect(s, rt, proto, hostname, port);
gen_connect(s, rt);
do {
ret = get_packet(s, 1);
......@@ -1052,11 +1095,14 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
static const AVOption rtmp_options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
{"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
{"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
{"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_tcurl", "URL of the target stream. Defaults to rtmp://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{ NULL },
};
......
......@@ -227,7 +227,7 @@ static int sctp_open(URLContext *h, const char *uri, int flags)
if (s->max_streams) {
initparams.sinit_max_instreams = s->max_streams;
initparams.sinit_num_ostreams = s->max_streams;
if (setsockopt(fd, SOL_SCTP, SCTP_INITMSG, &initparams,
if (setsockopt(fd, IPPROTO_SCTP, SCTP_INITMSG, &initparams,
sizeof(initparams)) < 0)
av_log(h, AV_LOG_ERROR,
"SCTP ERROR: Unable to initialize socket max streams %d\n",
......
......@@ -54,26 +54,24 @@ cglobal conv_fltp_to_flt_6ch, 2,8,7, dst, src, src1, src2, src3, src4, src5, len
mova m3, [srcq+src3q]
mova m4, [srcq+src4q]
mova m5, [srcq+src5q]
%if cpuflag(sse)
%if cpuflag(sse4)
SBUTTERFLYPS 0, 1, 6
SBUTTERFLYPS 2, 3, 6
SBUTTERFLYPS 4, 5, 6
movaps m6, m4
shufps m4, m0, q3210
blendps m6, m4, m0, 1100b
movlhps m0, m2
movhlps m6, m2
movaps [dstq ], m0
movaps [dstq+16], m4
movaps [dstq+32], m6
movaps m6, m5
shufps m5, m1, q3210
movhlps m4, m2
blendps m2, m5, m1, 1100b
movlhps m1, m3
movhlps m6, m3
movhlps m5, m3
movaps [dstq ], m0
movaps [dstq+16], m6
movaps [dstq+32], m4
movaps [dstq+48], m1
movaps [dstq+64], m5
movaps [dstq+80], m6
movaps [dstq+64], m2
movaps [dstq+80], m5
%else ; mmx
SBUTTERFLY dq, 0, 1, 6
SBUTTERFLY dq, 2, 3, 6
......@@ -100,5 +98,9 @@ cglobal conv_fltp_to_flt_6ch, 2,8,7, dst, src, src1, src2, src3, src4, src5, len
INIT_MMX mmx
CONV_FLTP_TO_FLT_6CH
INIT_XMM sse
INIT_XMM sse4
CONV_FLTP_TO_FLT_6CH
%if HAVE_AVX
INIT_XMM avx
CONV_FLTP_TO_FLT_6CH
%endif
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment