Commit 67f5650a authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  rv34: add NEON rv34_idct_add
  rv34: 1-pass inter MB reconstruction
  add SMJPEG muxer
  avformat: split out common SMJPEG code
  pictordec: Use bytestream2 functions
  avconv: use avcodec_encode_audio2()
  pcmenc: use AVCodec.encode2()
  avcodec: bump minor version and add APIChanges for the new audio encoding API
  avcodec: Add avcodec_encode_audio2() as replacement for avcodec_encode_audio()
  avcodec: add a public function, avcodec_fill_audio_frame().
  rv34: Intra 16x16 handling
  rv34: Inter/intra MB code split

Conflicts:
	Changelog
	libavcodec/avcodec.h
	libavcodec/pictordec.c
	libavcodec/utils.c
	libavcodec/version.h
	libavcodec/x86/rv34dsp.asm
	libavformat/version.h
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 905c4dc2 9e12002f
...@@ -14,6 +14,7 @@ version next: ...@@ -14,6 +14,7 @@ version next:
- astreamsync audio filter - astreamsync audio filter
- amerge audio filter - amerge audio filter
- GSM audio parser - GSM audio parser
- SMJPEG muxer
- Automatic thread count based on detection number of (available) CPU cores - Automatic thread count based on detection number of (available) CPU cores
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder - y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
- ffprobe -show_error option - ffprobe -show_error option
......
This diff is collapsed.
...@@ -13,6 +13,14 @@ libavutil: 2011-04-18 ...@@ -13,6 +13,14 @@ libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
2012-xx-xx - lavc 53.34.0
New audio encoding API:
xxxxxxx Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
encoders.
xxxxxxx Add avcodec_fill_audio_frame() as a convenience function.
xxxxxxx Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
Add AVCodec.encode2().
2012-01-xx - xxxxxxx - lavfi 2.15.0 2012-01-xx - xxxxxxx - lavfi 2.15.0
Add a new installed header -- libavfilter/version.h -- with version macros. Add a new installed header -- libavfilter/version.h -- with version macros.
......
...@@ -317,7 +317,7 @@ library: ...@@ -317,7 +317,7 @@ library:
@tab Used in Sierra CD-ROM games. @tab Used in Sierra CD-ROM games.
@item Smacker @tab @tab X @item Smacker @tab @tab X
@tab Multimedia format used by many games. @tab Multimedia format used by many games.
@item SMJPEG @tab @tab X @item SMJPEG @tab X @tab X
@tab Used in certain Loki game ports. @tab Used in certain Loki game ports.
@item Sony OpenMG (OMA) @tab X @tab X @item Sony OpenMG (OMA) @tab X @tab X
@tab Audio format used in Sony Sonic Stage and Sony Vegas. @tab Audio format used in Sony Sonic Stage and Sony Vegas.
......
This diff is collapsed.
...@@ -23,16 +23,18 @@ ...@@ -23,16 +23,18 @@
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavcodec/rv34dsp.h" #include "libavcodec/rv34dsp.h"
void ff_rv34_inv_transform_neon(DCTELEM *block);
void ff_rv34_inv_transform_noround_neon(DCTELEM *block); void ff_rv34_inv_transform_noround_neon(DCTELEM *block);
void ff_rv34_inv_transform_dc_neon(DCTELEM *block);
void ff_rv34_inv_transform_noround_dc_neon(DCTELEM *block); void ff_rv34_inv_transform_noround_dc_neon(DCTELEM *block);
void ff_rv34_idct_add_neon(uint8_t *dst, int stride, DCTELEM *block);
void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc);
void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp) void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
{ {
c->rv34_inv_transform_tab[0] = ff_rv34_inv_transform_neon; c->rv34_inv_transform = ff_rv34_inv_transform_noround_neon;
c->rv34_inv_transform_tab[1] = ff_rv34_inv_transform_noround_neon; c->rv34_inv_transform_dc = ff_rv34_inv_transform_noround_dc_neon;
c->rv34_inv_transform_dc_tab[0] = ff_rv34_inv_transform_dc_neon;
c->rv34_inv_transform_dc_tab[1] = ff_rv34_inv_transform_noround_dc_neon; c->rv34_idct_add = ff_rv34_idct_add_neon;
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_neon;
} }
...@@ -19,13 +19,10 @@ ...@@ -19,13 +19,10 @@
*/ */
#include "asm.S" #include "asm.S"
#include "neon.S"
.macro rv34_inv_transform .macro rv34_inv_transform r0
mov r1, #16 vld1.16 {q14-q15}, [\r0,:128]
vld1.16 {d28}, [r0,:64], r1 @ block[i+8*0]
vld1.16 {d29}, [r0,:64], r1 @ block[i+8*1]
vld1.16 {d30}, [r0,:64], r1 @ block[i+8*2]
vld1.16 {d31}, [r0,:64], r1 @ block[i+8*3]
vmov.s16 d0, #13 vmov.s16 d0, #13
vshll.s16 q12, d29, #3 vshll.s16 q12, d29, #3
vshll.s16 q13, d29, #4 vshll.s16 q13, d29, #4
...@@ -35,12 +32,12 @@ ...@@ -35,12 +32,12 @@
vmlal.s16 q10, d30, d0 vmlal.s16 q10, d30, d0
vmull.s16 q11, d28, d0 vmull.s16 q11, d28, d0
vmlsl.s16 q11, d30, d0 vmlsl.s16 q11, d30, d0
vsubw.s16 q12, q12, d29 @ z2 = block[i+8*1]*7 vsubw.s16 q12, q12, d29 @ z2 = block[i+4*1]*7
vaddw.s16 q13, q13, d29 @ z3 = block[i+8*1]*17 vaddw.s16 q13, q13, d29 @ z3 = block[i+4*1]*17
vsubw.s16 q9, q9, d31 vsubw.s16 q9, q9, d31
vaddw.s16 q1, q1, d31 vaddw.s16 q1, q1, d31
vadd.s32 q13, q13, q9 @ z3 = 17*block[i+8*1] + 7*block[i+8*3] vadd.s32 q13, q13, q9 @ z3 = 17*block[i+4*1] + 7*block[i+4*3]
vsub.s32 q12, q12, q1 @ z2 = 7*block[i+8*1] - 17*block[i+8*3] vsub.s32 q12, q12, q1 @ z2 = 7*block[i+4*1] - 17*block[i+4*3]
vadd.s32 q1, q10, q13 @ z0 + z3 vadd.s32 q1, q10, q13 @ z0 + z3
vadd.s32 q2, q11, q12 @ z1 + z2 vadd.s32 q2, q11, q12 @ z1 + z2
vsub.s32 q8, q10, q13 @ z0 - z3 vsub.s32 q8, q10, q13 @ z0 - z3
...@@ -70,25 +67,39 @@ ...@@ -70,25 +67,39 @@
vsub.s32 q15, q14, q9 @ z0 - z3 vsub.s32 q15, q14, q9 @ z0 - z3
.endm .endm
/* void ff_rv34_inv_transform_neon(DCTELEM *block); */ /* void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block) */
function ff_rv34_inv_transform_neon, export=1 function ff_rv34_idct_add_neon, export=1
mov r2, r0 mov r3, r0
rv34_inv_transform rv34_inv_transform r2
vrshrn.s32 d1, q2, #10 @ (z1 + z2) >> 10 vmov.i16 q12, #0
vrshrn.s32 d0, q1, #10 @ (z0 + z3) >> 10 vrshrn.s32 d16, q1, #10 @ (z0 + z3) >> 10
vrshrn.s32 d2, q3, #10 @ (z1 - z2) >> 10 vrshrn.s32 d17, q2, #10 @ (z1 + z2) >> 10
vrshrn.s32 d3, q15, #10 @ (z0 - z3) >> 10 vrshrn.s32 d18, q3, #10 @ (z1 - z2) >> 10
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1 vrshrn.s32 d19, q15, #10 @ (z0 - z3) >> 10
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1 vld1.32 {d28[]}, [r0,:32], r1
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1 vld1.32 {d29[]}, [r0,:32], r1
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1 vtrn.32 q8, q9
vld1.32 {d28[1]}, [r0,:32], r1
vld1.32 {d29[1]}, [r0,:32], r1
vst1.16 {q12}, [r2,:128]! @ memset(block, 0, 16)
vst1.16 {q12}, [r2,:128] @ memset(block+16, 0, 16)
vtrn.16 d16, d17
vtrn.32 d28, d29
vtrn.16 d18, d19
vaddw.u8 q0, q8, d28
vaddw.u8 q1, q9, d29
vqmovun.s16 d28, q0
vqmovun.s16 d29, q1
vst1.32 {d28[0]}, [r3,:32], r1
vst1.32 {d28[1]}, [r3,:32], r1
vst1.32 {d29[0]}, [r3,:32], r1
vst1.32 {d29[1]}, [r3,:32], r1
bx lr bx lr
endfunc endfunc
/* void rv34_inv_transform_noround_neon(DCTELEM *block); */ /* void rv34_inv_transform_noround_neon(DCTELEM *block); */
function ff_rv34_inv_transform_noround_neon, export=1 function ff_rv34_inv_transform_noround_neon, export=1
mov r2, r0 rv34_inv_transform r0
rv34_inv_transform
vshl.s32 q11, q2, #1 vshl.s32 q11, q2, #1
vshl.s32 q10, q1, #1 vshl.s32 q10, q1, #1
vshl.s32 q12, q3, #1 vshl.s32 q12, q3, #1
...@@ -101,24 +112,33 @@ function ff_rv34_inv_transform_noround_neon, export=1 ...@@ -101,24 +112,33 @@ function ff_rv34_inv_transform_noround_neon, export=1
vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11 vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11
vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11 vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11
vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11 vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1 vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r0,:64]!
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1 vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r0,:64]!
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1 vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r0,:64]!
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1 vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r0,:64]!
bx lr bx lr
endfunc endfunc
/* void rv34_inv_transform_dc_c(DCTELEM *block) */ /* void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc) */
function ff_rv34_inv_transform_dc_neon, export=1 function ff_rv34_idct_dc_add_neon, export=1
vld1.16 {d28[]}, [r0,:16] @ block[0] mov r3, r0
vmov.i16 d4, #169 vld1.32 {d28[]}, [r0,:32], r1
mov r1, #16 vld1.32 {d29[]}, [r0,:32], r1
vmull.s16 q3, d28, d4 vdup.16 d0, r2
vrshrn.s32 d0, q3, #10 vmov.s16 d1, #169
vst1.16 {d0}, [r0,:64], r1 vld1.32 {d28[1]}, [r0,:32], r1
vst1.16 {d0}, [r0,:64], r1 vmull.s16 q1, d0, d1 @ dc * 13 * 13
vst1.16 {d0}, [r0,:64], r1 vld1.32 {d29[1]}, [r0,:32], r1
vst1.16 {d0}, [r0,:64], r1 vrshrn.s32 d0, q1, #10 @ (dc * 13 * 13 + 0x200) >> 10
vmov d1, d0
vaddw.u8 q2, q0, d28
vaddw.u8 q3, q0, d29
vqmovun.s16 d28, q2
vqmovun.s16 d29, q3
vst1.32 {d28[0]}, [r3,:32], r1
vst1.32 {d29[0]}, [r3,:32], r1
vst1.32 {d28[1]}, [r3,:32], r1
vst1.32 {d29[1]}, [r3,:32], r1
bx lr bx lr
endfunc endfunc
...@@ -127,12 +147,10 @@ function ff_rv34_inv_transform_noround_dc_neon, export=1 ...@@ -127,12 +147,10 @@ function ff_rv34_inv_transform_noround_dc_neon, export=1
vld1.16 {d28[]}, [r0,:16] @ block[0] vld1.16 {d28[]}, [r0,:16] @ block[0]
vmov.i16 d4, #251 vmov.i16 d4, #251
vorr.s16 d4, #256 @ 13^2 * 3 vorr.s16 d4, #256 @ 13^2 * 3
mov r1, #16
vmull.s16 q3, d28, d4 vmull.s16 q3, d28, d4
vshrn.s32 d0, q3, #11 vshrn.s32 d0, q3, #11
vst1.64 {d0}, [r0,:64], r1 vmov.i16 d1, d0
vst1.64 {d0}, [r0,:64], r1 vst1.64 {q0}, [r0,:128]!
vst1.64 {d0}, [r0,:64], r1 vst1.64 {q0}, [r0,:128]!
vst1.64 {d0}, [r0,:64], r1
bx lr bx lr
endfunc endfunc
...@@ -761,6 +761,11 @@ typedef struct RcOverride{ ...@@ -761,6 +761,11 @@ typedef struct RcOverride{
* Encoders: * Encoders:
* The encoder needs to be fed with NULL data at the end of encoding until the * The encoder needs to be fed with NULL data at the end of encoding until the
* encoder no longer returns data. * encoder no longer returns data.
*
* NOTE: For encoders implementing the AVCodec.encode2() function, setting this
* flag also means that the encoder must set the pts and duration for
* each output packet. If this flag is not set, the pts and duration will
* be determined by libavcodec from the input frame.
*/ */
#define CODEC_CAP_DELAY 0x0020 #define CODEC_CAP_DELAY 0x0020
/** /**
...@@ -815,6 +820,10 @@ typedef struct RcOverride{ ...@@ -815,6 +820,10 @@ typedef struct RcOverride{
* Codec supports avctx->thread_count == 0 (auto). * Codec supports avctx->thread_count == 0 (auto).
*/ */
#define CODEC_CAP_AUTO_THREADS 0x8000 #define CODEC_CAP_AUTO_THREADS 0x8000
/**
* Audio encoder supports receiving a different number of samples in each call.
*/
#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
/** /**
* Codec is lossless. * Codec is lossless.
*/ */
...@@ -3314,6 +3323,19 @@ typedef struct AVCodec { ...@@ -3314,6 +3323,19 @@ typedef struct AVCodec {
* Initialize codec static data, called from avcodec_register(). * Initialize codec static data, called from avcodec_register().
*/ */
void (*init_static_data)(struct AVCodec *codec); void (*init_static_data)(struct AVCodec *codec);
/**
* Encode data to an AVPacket.
*
* @param avctx codec context
* @param avpkt output AVPacket (may contain a user-provided buffer)
* @param[in] frame AVFrame containing the raw data to be encoded
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
* non-empty packet was returned in avpkt.
* @return 0 on success, negative error code on failure
*/
int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
int *got_packet_ptr);
} AVCodec; } AVCodec;
/** /**
...@@ -4331,9 +4353,12 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, ...@@ -4331,9 +4353,12 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
*/ */
void avsubtitle_free(AVSubtitle *sub); void avsubtitle_free(AVSubtitle *sub);
#if FF_API_OLD_ENCODE_AUDIO
/** /**
* Encode an audio frame from samples into buf. * Encode an audio frame from samples into buf.
* *
* @deprecated Use avcodec_encode_audio2 instead.
*
* @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
* However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
* will know how much space is needed because it depends on the value passed * will know how much space is needed because it depends on the value passed
...@@ -4353,8 +4378,71 @@ void avsubtitle_free(AVSubtitle *sub); ...@@ -4353,8 +4378,71 @@ void avsubtitle_free(AVSubtitle *sub);
* @return On error a negative value is returned, on success zero or the number * @return On error a negative value is returned, on success zero or the number
* of bytes used to encode the data read from the input buffer. * of bytes used to encode the data read from the input buffer.
*/ */
int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size, int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
const short *samples); uint8_t *buf, int buf_size,
const short *samples);
#endif
/**
* Encode a frame of audio.
*
* Takes input samples from frame and writes the next output packet, if
* available, to avpkt. The output packet does not necessarily contain data for
* the most recent frame, as encoders can delay, split, and combine input frames
* internally as needed.
*
* @param avctx codec context
* @param avpkt output AVPacket.
* The user can supply an output buffer by setting
* avpkt->data and avpkt->size prior to calling the
* function, but if the size of the user-provided data is not
* large enough, encoding will fail. All other AVPacket fields
* will be reset by the encoder using av_init_packet(). If
* avpkt->data is NULL, the encoder will allocate it.
* The encoder will set avpkt->size to the size of the
* output packet.
* @param[in] frame AVFrame containing the raw audio data to be encoded.
* May be NULL when flushing an encoder that has the
* CODEC_CAP_DELAY capability set.
* There are 2 codec capabilities that affect the allowed
* values of frame->nb_samples.
* If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final
* frame may be smaller than avctx->frame_size, and all other
* frames must be equal to avctx->frame_size.
* If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
* can have any number of samples.
* If neither is set, frame->nb_samples must be equal to
* avctx->frame_size for all frames.
* @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
* output packet is non-empty, and to 0 if it is
* empty. If the function returns an error, the
* packet can be assumed to be invalid, and the
* value of got_packet_ptr is undefined and should
* not be used.
* @return 0 on success, negative error code on failure
*/
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr);
/**
* Fill audio frame data and linesize.
* AVFrame extended_data channel pointers are allocated if necessary for
* planar audio.
*
* @param frame the AVFrame
* frame->nb_samples must be set prior to calling the
* function. This function fills in frame->data,
* frame->extended_data, frame->linesize[0].
* @param nb_channels channel count
* @param sample_fmt sample format
* @param buf buffer to use for frame data
* @param buf_size size of buffer
* @param align plane size sample alignment
* @return 0 on success, negative error code on failure
*/
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
enum AVSampleFormat sample_fmt, const uint8_t *buf,
int buf_size, int align);
/** /**
* Encode a video frame from pict into buf. * Encode a video frame from pict into buf.
......
...@@ -61,6 +61,14 @@ typedef struct AVCodecInternal { ...@@ -61,6 +61,14 @@ typedef struct AVCodecInternal {
* should be freed from the original context only. * should be freed from the original context only.
*/ */
int is_copy; int is_copy;
#if FF_API_OLD_DECODE_AUDIO
/**
* Internal sample count used by avcodec_encode_audio() to fabricate pts.
* Can be removed along with avcodec_encode_audio().
*/
int sample_count;
#endif
} AVCodecInternal; } AVCodecInternal;
struct AVCodecDefault { struct AVCodecDefault {
...@@ -111,4 +119,21 @@ int avpriv_unlock_avformat(void); ...@@ -111,4 +119,21 @@ int avpriv_unlock_avformat(void);
*/ */
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - FF_INPUT_BUFFER_PADDING_SIZE) #define FF_MAX_EXTRADATA_SIZE ((1 << 28) - FF_INPUT_BUFFER_PADDING_SIZE)
/**
* Check AVPacket size and/or allocate data.
*
* Encoders supporting AVCodec.encode2() can use this as a convenience to
* ensure the output packet data is large enough, whether provided by the user
* or allocated in this function.
*
* @param avpkt the AVPacket
* If avpkt->data is already set, avpkt->size is checked
* to ensure it is large enough.
* If avpkt->data is NULL, a new buffer is allocated.
* All other AVPacket fields will be reset with av_init_packet().
* @param size the minimum required packet size
* @return 0 on success, negative error code on failure
*/
int ff_alloc_packet(AVPacket *avpkt, int size);
#endif /* AVCODEC_INTERNAL_H */ #endif /* AVCODEC_INTERNAL_H */
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "libavutil/common.h" /* for av_reverse */ #include "libavutil/common.h" /* for av_reverse */
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
#include "pcm_tablegen.h" #include "pcm_tablegen.h"
#define MAX_CHANNELS 64 #define MAX_CHANNELS 64
...@@ -77,10 +78,10 @@ static av_cold int pcm_encode_close(AVCodecContext *avctx) ...@@ -77,10 +78,10 @@ static av_cold int pcm_encode_close(AVCodecContext *avctx)
bytestream_put_##endian(&dst, v); \ bytestream_put_##endian(&dst, v); \
} }
static int pcm_encode_frame(AVCodecContext *avctx, static int pcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
unsigned char *frame, int buf_size, void *data) const AVFrame *frame, int *got_packet_ptr)
{ {
int n, sample_size, v; int n, sample_size, v, ret;
const short *samples; const short *samples;
unsigned char *dst; unsigned char *dst;
const uint8_t *srcu8; const uint8_t *srcu8;
...@@ -91,9 +92,14 @@ static int pcm_encode_frame(AVCodecContext *avctx, ...@@ -91,9 +92,14 @@ static int pcm_encode_frame(AVCodecContext *avctx,
const uint32_t *samples_uint32_t; const uint32_t *samples_uint32_t;
sample_size = av_get_bits_per_sample(avctx->codec->id)/8; sample_size = av_get_bits_per_sample(avctx->codec->id)/8;
n = buf_size / sample_size; n = frame->nb_samples * avctx->channels;
samples = data; samples = (const short *)frame->data[0];
dst = frame;
if ((ret = ff_alloc_packet(avpkt, n * sample_size))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret;
}
dst = avpkt->data;
switch(avctx->codec->id) { switch(avctx->codec->id) {
case CODEC_ID_PCM_U32LE: case CODEC_ID_PCM_U32LE:
...@@ -130,7 +136,7 @@ static int pcm_encode_frame(AVCodecContext *avctx, ...@@ -130,7 +136,7 @@ static int pcm_encode_frame(AVCodecContext *avctx,
ENCODE(uint16_t, be16, samples, dst, n, 0, 0x8000) ENCODE(uint16_t, be16, samples, dst, n, 0, 0x8000)
break; break;
case CODEC_ID_PCM_S8: case CODEC_ID_PCM_S8:
srcu8= data; srcu8 = frame->data[0];
for(;n>0;n--) { for(;n>0;n--) {
v = *srcu8++; v = *srcu8++;
*dst++ = v - 128; *dst++ = v - 128;
...@@ -186,9 +192,10 @@ static int pcm_encode_frame(AVCodecContext *avctx, ...@@ -186,9 +192,10 @@ static int pcm_encode_frame(AVCodecContext *avctx,
default: default:
return -1; return -1;
} }
//avctx->frame_size = (dst - frame) / (sample_size * avctx->channels);
return dst - frame; avpkt->size = frame->nb_samples * avctx->channels * sample_size;
*got_packet_ptr = 1;
return 0;
} }
typedef struct PCMDecode { typedef struct PCMDecode {
...@@ -474,8 +481,9 @@ AVCodec ff_ ## name_ ## _encoder = { \ ...@@ -474,8 +481,9 @@ AVCodec ff_ ## name_ ## _encoder = { \
.type = AVMEDIA_TYPE_AUDIO, \ .type = AVMEDIA_TYPE_AUDIO, \
.id = id_, \ .id = id_, \
.init = pcm_encode_init, \ .init = pcm_encode_init, \
.encode = pcm_encode_frame, \ .encode2 = pcm_encode_frame, \
.close = pcm_encode_close, \ .close = pcm_encode_close, \
.capabilities = CODEC_CAP_VARIABLE_FRAME_SIZE, \
.sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \ .sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
} }
......
...@@ -33,6 +33,7 @@ typedef struct PicContext { ...@@ -33,6 +33,7 @@ typedef struct PicContext {
AVFrame frame; AVFrame frame;
int width, height; int width, height;
int nb_planes; int nb_planes;
GetByteContext g;
} PicContext; } PicContext;
static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y) static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y)
...@@ -55,7 +56,8 @@ static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y) ...@@ -55,7 +56,8 @@ static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y)
} }
} }
static void picmemset(PicContext *s, int value, int run, int *x, int *y, int *plane, int bits_per_plane) static void picmemset(PicContext *s, int value, int run,
int *x, int *y, int *plane, int bits_per_plane)
{ {
uint8_t *d; uint8_t *d;
int shift = *plane * bits_per_plane; int shift = *plane * bits_per_plane;
...@@ -107,34 +109,35 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -107,34 +109,35 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt) AVPacket *avpkt)
{ {
PicContext *s = avctx->priv_data; PicContext *s = avctx->priv_data;
int buf_size = avpkt->size;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + buf_size;
uint32_t *palette; uint32_t *palette;
int bits_per_plane, bpp, etype, esize, npal; int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
int i, x, y, plane; int i, x, y, plane, tmp;
if (buf_size < 11) bytestream2_init(&s->g, avpkt->data, avpkt->size);
if (bytestream2_get_bytes_left(&s->g) < 11)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (bytestream_get_le16(&buf) != 0x1234) if (bytestream2_get_le16u(&s->g) != 0x1234)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
s->width = bytestream_get_le16(&buf);
s->height = bytestream_get_le16(&buf); s->width = bytestream2_get_le16u(&s->g);
buf += 4; s->height = bytestream2_get_le16u(&s->g);
bits_per_plane = *buf & 0xF; bytestream2_skip(&s->g, 4);
s->nb_planes = (*buf++ >> 4) + 1; tmp = bytestream2_get_byteu(&s->g);
bpp = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane; bits_per_plane = tmp & 0xF;
s->nb_planes = (tmp >> 4) + 1;
bpp = bits_per_plane * s->nb_planes;
if (bits_per_plane > 8 || bpp < 1 || bpp > 32) { if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
av_log_ask_for_sample(avctx, "unsupported bit depth\n"); av_log_ask_for_sample(avctx, "unsupported bit depth\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (*buf == 0xFF || bpp == 8) { if (bytestream2_peek_byte(&s->g) == 0xFF || bpp == 8) {
buf += 2; bytestream2_skip(&s->g, 2);
etype = bytestream_get_le16(&buf); etype = bytestream2_get_le16(&s->g);
esize = bytestream_get_le16(&buf); esize = bytestream2_get_le16(&s->g);
if (buf_end - buf < esize) if (bytestream2_get_bytes_left(&s->g) < esize)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} else { } else {
etype = -1; etype = -1;
...@@ -159,25 +162,30 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -159,25 +162,30 @@ static int decode_frame(AVCodecContext *avctx,
s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame.palette_has_changed = 1;
pos_after_pal = bytestream2_tell(&s->g) + esize;
palette = (uint32_t*)s->frame.data[1]; palette = (uint32_t*)s->frame.data[1];
if (etype == 1 && esize > 1 && *buf < 6) { if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) {
int idx = *buf; int idx = bytestream2_get_byte(&s->g);
npal = 4; npal = 4;
for (i = 0; i < npal; i++) for (i = 0; i < npal; i++)
palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ]; palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
} else if (etype == 2) { } else if (etype == 2) {
npal = FFMIN(esize, 16); npal = FFMIN(esize, 16);
for (i = 0; i < npal; i++) for (i = 0; i < npal; i++) {
palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)]; int pal_idx = bytestream2_get_byte(&s->g);
palette[i] = ff_cga_palette[FFMIN(pal_idx, 16)];
}
} else if (etype == 3) { } else if (etype == 3) {
npal = FFMIN(esize, 16); npal = FFMIN(esize, 16);
for (i = 0; i < npal; i++) for (i = 0; i < npal; i++) {
palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)]; int pal_idx = bytestream2_get_byte(&s->g);
palette[i] = ff_ega_palette[FFMIN(pal_idx, 63)];
}
} else if (etype == 4 || etype == 5) { } else if (etype == 4 || etype == 5) {
npal = FFMIN(esize / 3, 256); npal = FFMIN(esize / 3, 256);
for (i = 0; i < npal; i++) { for (i = 0; i < npal; i++) {
palette[i] = AV_RB24(buf + i*3) << 2; palette[i] = bytestream2_get_be24(&s->g) << 2;
palette[i] |= 0xFF << 24 | palette[i] >> 6 & 0x30303; palette[i] |= 0xFFU << 24 | palette[i] >> 6 & 0x30303;
} }
} else { } else {
if (bpp == 1) { if (bpp == 1) {
...@@ -195,29 +203,34 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -195,29 +203,34 @@ static int decode_frame(AVCodecContext *avctx,
} }
// fill remaining palette entries // fill remaining palette entries
memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4); memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4);
buf += esize; // skip remaining palette bytes
bytestream2_seek(&s->g, pos_after_pal, SEEK_SET);
y = s->height - 1; y = s->height - 1;
if (bytestream_get_le16(&buf)) { if (bytestream2_get_le16(&s->g)) {
x = 0; x = 0;
plane = 0; plane = 0;
while (y >= 0 && buf_end - buf >= 6) { while (y >= 0 && bytestream2_get_bytes_left(&s->g) >= 6) {
const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf); int stop_size, marker, t1, t2;
//ignore uncompressed block size reported at buf[2]
int marker = buf[4]; t1 = bytestream2_get_bytes_left(&s->g);
buf += 5; t2 = bytestream2_get_le16(&s->g);
stop_size = t1 - FFMIN(t1, t2);
// ignore uncompressed block size
bytestream2_skip(&s->g, 2);
marker = bytestream2_get_byte(&s->g);
while (plane < s->nb_planes && y >= 0 && buf_pend - buf >= 1) { while (plane < s->nb_planes && y >= 0 &&
bytestream2_get_bytes_left(&s->g) > stop_size) {
int run = 1; int run = 1;
int val = *buf++; int val = bytestream2_get_byte(&s->g);
if (val == marker) { if (val == marker) {
run = *buf++; run = bytestream2_get_byte(&s->g);
if (run == 0) if (run == 0)
run = bytestream_get_le16(&buf); run = bytestream2_get_le16(&s->g);
val = *buf++; val = bytestream2_get_byte(&s->g);
} }
if (buf > buf_end) if (!bytestream2_get_bytes_left(&s->g))
break; break;
if (bits_per_plane == 8) { if (bits_per_plane == 8) {
...@@ -228,16 +241,16 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -228,16 +241,16 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
} else { } else {
while (y >= 0 && buf < buf_end) { while (y >= 0 && bytestream2_get_bytes_left(&s->g) > 0) {
memcpy(s->frame.data[0] + y * s->frame.linesize[0], buf, FFMIN(avctx->width, buf_end - buf)); memcpy(s->frame.data[0] + y * s->frame.linesize[0], s->g.buffer, FFMIN(avctx->width, bytestream2_get_bytes_left(&s->g)));
buf += avctx->width; bytestream2_skip(&s->g, avctx->width);
y--; y--;
} }
} }
*data_size = sizeof(AVFrame); *data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame; *(AVFrame*)data = s->frame;
return buf_size; return avpkt->size;
} }
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
......
This diff is collapsed.
...@@ -37,10 +37,10 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block) ...@@ -37,10 +37,10 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
int i; int i;
for(i = 0; i < 4; i++){ for(i = 0; i < 4; i++){
const int z0 = 13*(block[i+8*0] + block[i+8*2]); const int z0 = 13*(block[i+4*0] + block[i+4*2]);
const int z1 = 13*(block[i+8*0] - block[i+8*2]); const int z1 = 13*(block[i+4*0] - block[i+4*2]);
const int z2 = 7* block[i+8*1] - 17*block[i+8*3]; const int z2 = 7* block[i+4*1] - 17*block[i+4*3];
const int z3 = 17* block[i+8*1] + 7*block[i+8*3]; const int z3 = 17* block[i+4*1] + 7*block[i+4*3];
temp[4*i+0] = z0 + z3; temp[4*i+0] = z0 + z3;
temp[4*i+1] = z1 + z2; temp[4*i+1] = z1 + z2;
...@@ -50,14 +50,16 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block) ...@@ -50,14 +50,16 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
} }
/** /**
* Real Video 3.0/4.0 inverse transform * Real Video 3.0/4.0 inverse transform + sample reconstruction
* Code is almost the same as in SVQ3, only scaling is different. * Code is almost the same as in SVQ3, only scaling is different.
*/ */
static void rv34_inv_transform_c(DCTELEM *block){ static void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block){
int temp[16]; int temp[16];
int i; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
rv34_row_transform(temp, block); rv34_row_transform(temp, block);
memset(block, 0, 16*sizeof(DCTELEM));
for(i = 0; i < 4; i++){ for(i = 0; i < 4; i++){
const int z0 = 13*(temp[4*0+i] + temp[4*2+i]) + 0x200; const int z0 = 13*(temp[4*0+i] + temp[4*2+i]) + 0x200;
...@@ -65,10 +67,12 @@ static void rv34_inv_transform_c(DCTELEM *block){ ...@@ -65,10 +67,12 @@ static void rv34_inv_transform_c(DCTELEM *block){
const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i]; const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i]; const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
block[i*8+0] = (z0 + z3) >> 10; dst[0] = cm[ dst[0] + ( (z0 + z3) >> 10 ) ];
block[i*8+1] = (z1 + z2) >> 10; dst[1] = cm[ dst[1] + ( (z1 + z2) >> 10 ) ];
block[i*8+2] = (z1 - z2) >> 10; dst[2] = cm[ dst[2] + ( (z1 - z2) >> 10 ) ];
block[i*8+3] = (z0 - z3) >> 10; dst[3] = cm[ dst[3] + ( (z0 - z3) >> 10 ) ];
dst += stride;
} }
} }
...@@ -90,21 +94,27 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){ ...@@ -90,21 +94,27 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){
const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i]; const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i]; const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
block[i*8+0] = ((z0 + z3) * 3) >> 11; block[i*4+0] = ((z0 + z3) * 3) >> 11;
block[i*8+1] = ((z1 + z2) * 3) >> 11; block[i*4+1] = ((z1 + z2) * 3) >> 11;
block[i*8+2] = ((z1 - z2) * 3) >> 11; block[i*4+2] = ((z1 - z2) * 3) >> 11;
block[i*8+3] = ((z0 - z3) * 3) >> 11; block[i*4+3] = ((z0 - z3) * 3) >> 11;
} }
} }
static void rv34_inv_transform_dc_c(DCTELEM *block) static void rv34_idct_dc_add_c(uint8_t *dst, int stride, int dc)
{ {
DCTELEM dc = (13 * 13 * block[0] + 0x200) >> 10; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i, j; int i, j;
for (i = 0; i < 4; i++, block += 8) cm += (13*13*dc + 0x200) >> 10;
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++) for (j = 0; j < 4; j++)
block[j] = dc; dst[j] = cm[ dst[j] ];
dst += stride;
}
} }
static void rv34_inv_transform_dc_noround_c(DCTELEM *block) static void rv34_inv_transform_dc_noround_c(DCTELEM *block)
...@@ -112,7 +122,7 @@ static void rv34_inv_transform_dc_noround_c(DCTELEM *block) ...@@ -112,7 +122,7 @@ static void rv34_inv_transform_dc_noround_c(DCTELEM *block)
DCTELEM dc = (13 * 13 * 3 * block[0]) >> 11; DCTELEM dc = (13 * 13 * 3 * block[0]) >> 11;
int i, j; int i, j;
for (i = 0; i < 4; i++, block += 8) for (i = 0; i < 4; i++, block += 4)
for (j = 0; j < 4; j++) for (j = 0; j < 4; j++)
block[j] = dc; block[j] = dc;
} }
...@@ -121,10 +131,11 @@ static void rv34_inv_transform_dc_noround_c(DCTELEM *block) ...@@ -121,10 +131,11 @@ static void rv34_inv_transform_dc_noround_c(DCTELEM *block)
av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) { av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) {
c->rv34_inv_transform_tab[0] = rv34_inv_transform_c; c->rv34_inv_transform = rv34_inv_transform_noround_c;
c->rv34_inv_transform_tab[1] = rv34_inv_transform_noround_c; c->rv34_inv_transform_dc = rv34_inv_transform_dc_noround_c;
c->rv34_inv_transform_dc_tab[0] = rv34_inv_transform_dc_c;
c->rv34_inv_transform_dc_tab[1] = rv34_inv_transform_dc_noround_c; c->rv34_idct_add = rv34_idct_add_c;
c->rv34_idct_dc_add = rv34_idct_dc_add_c;
if (HAVE_NEON) if (HAVE_NEON)
ff_rv34dsp_init_neon(c, dsp); ff_rv34dsp_init_neon(c, dsp);
......
...@@ -36,6 +36,10 @@ typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/, ...@@ -36,6 +36,10 @@ typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/,
typedef void (*rv34_inv_transform_func)(DCTELEM *block); typedef void (*rv34_inv_transform_func)(DCTELEM *block);
typedef void (*rv34_idct_add_func)(uint8_t *dst, int stride, DCTELEM *block);
typedef void (*rv34_idct_dc_add_func)(uint8_t *dst, int stride,
int dc);
typedef void (*rv40_weak_loop_filter_func)(uint8_t *src, int stride, typedef void (*rv40_weak_loop_filter_func)(uint8_t *src, int stride,
int filter_p1, int filter_q1, int filter_p1, int filter_q1,
int alpha, int beta, int alpha, int beta,
...@@ -55,8 +59,10 @@ typedef struct RV34DSPContext { ...@@ -55,8 +59,10 @@ typedef struct RV34DSPContext {
h264_chroma_mc_func put_chroma_pixels_tab[3]; h264_chroma_mc_func put_chroma_pixels_tab[3];
h264_chroma_mc_func avg_chroma_pixels_tab[3]; h264_chroma_mc_func avg_chroma_pixels_tab[3];
rv40_weight_func rv40_weight_pixels_tab[2]; rv40_weight_func rv40_weight_pixels_tab[2];
rv34_inv_transform_func rv34_inv_transform_tab[2]; rv34_inv_transform_func rv34_inv_transform;
void (*rv34_inv_transform_dc_tab[2])(DCTELEM *block); rv34_inv_transform_func rv34_inv_transform_dc;
rv34_idct_add_func rv34_idct_add;
rv34_idct_dc_add_func rv34_idct_dc_add;
rv40_weak_loop_filter_func rv40_weak_loop_filter[2]; rv40_weak_loop_filter_func rv40_weak_loop_filter[2];
rv40_strong_loop_filter_func rv40_strong_loop_filter[2]; rv40_strong_loop_filter_func rv40_strong_loop_filter[2];
rv40_loop_filter_strength_func rv40_loop_filter_strength[2]; rv40_loop_filter_strength_func rv40_loop_filter_strength[2];
......
This diff is collapsed.
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 55 #define LIBAVCODEC_VERSION_MINOR 56
#define LIBAVCODEC_VERSION_MICRO 105 #define LIBAVCODEC_VERSION_MICRO 105
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
...@@ -123,5 +123,8 @@ ...@@ -123,5 +123,8 @@
#ifndef FF_API_AVFRAME_AGE #ifndef FF_API_AVFRAME_AGE
#define FF_API_AVFRAME_AGE (LIBAVCODEC_VERSION_MAJOR < 54) #define FF_API_AVFRAME_AGE (LIBAVCODEC_VERSION_MAJOR < 54)
#endif #endif
#ifndef FF_API_OLD_ENCODE_AUDIO
#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */
...@@ -35,21 +35,84 @@ SECTION .text ...@@ -35,21 +35,84 @@ SECTION .text
sar %1, 10 sar %1, 10
%endmacro %endmacro
%macro rv34_idct_dequant4x4_dc 1 %macro rv34_idct 1
cglobal rv34_idct_dequant4x4_%1_mmx2, 1, 2, 0 cglobal rv34_idct_%1_mmx2, 1, 2, 0
movsx r1, word [r0] movsx r1, word [r0]
IDCT_DC r1 IDCT_DC r1
movd mm0, r1d movd m0, r1d
pshufw mm0, mm0, 0 pshufw m0, m0, 0
movq [r0+ 0], mm0 movq [r0+ 0], m0
movq [r0+16], mm0 movq [r0+ 8], m0
movq [r0+32], mm0 movq [r0+16], m0
movq [r0+48], mm0 movq [r0+24], m0
REP_RET REP_RET
%endmacro %endmacro
INIT_MMX INIT_MMX
%define IDCT_DC IDCT_DC_ROUND %define IDCT_DC IDCT_DC_ROUND
rv34_idct_dequant4x4_dc dc rv34_idct dc
%define IDCT_DC IDCT_DC_NOROUND %define IDCT_DC IDCT_DC_NOROUND
rv34_idct_dequant4x4_dc dc_noround rv34_idct dc_noround
; ff_rv34_idct_dc_add_mmx(uint8_t *dst, int stride, int dc);
cglobal rv34_idct_dc_add_mmx, 3, 3
; calculate DC
IDCT_DC_ROUND r2
pxor m1, m1
movd m0, r2
psubw m1, m0
packuswb m0, m0
packuswb m1, m1
punpcklbw m0, m0
punpcklbw m1, m1
punpcklwd m0, m0
punpcklwd m1, m1
; add DC
lea r2, [r0+r1*2]
movh m2, [r0]
movh m3, [r0+r1]
movh m4, [r2]
movh m5, [r2+r1]
paddusb m2, m0
paddusb m3, m0
paddusb m4, m0
paddusb m5, m0
psubusb m2, m1
psubusb m3, m1
psubusb m4, m1
psubusb m5, m1
movh [r0], m2
movh [r0+r1], m3
movh [r2], m4
movh [r2+r1], m5
RET
; ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc);
INIT_XMM
cglobal rv34_idct_dc_add_sse4, 3, 3, 6
; load data
IDCT_DC_ROUND r2
pxor m1, m1
; calculate DC
movd m0, r2
lea r2, [r0+r1*2]
movd m2, [r0]
movd m3, [r0+r1]
pshuflw m0, m0, 0
movd m4, [r2]
movd m5, [r2+r1]
punpcklqdq m0, m0
punpckldq m2, m3
punpckldq m4, m5
punpcklbw m2, m1
punpcklbw m4, m1
paddw m2, m0
paddw m4, m0
packuswb m2, m4
movd [r0], m2
pextrd [r0+r1], m2, 1
pextrd [r2], m2, 2
pextrd [r2+r1], m2, 3
RET
...@@ -24,17 +24,22 @@ ...@@ -24,17 +24,22 @@
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "libavcodec/rv34dsp.h" #include "libavcodec/rv34dsp.h"
void ff_rv34_idct_dequant4x4_dc_mmx2(DCTELEM *block); void ff_rv34_idct_dc_mmx2(DCTELEM *block);
void ff_rv34_idct_dequant4x4_dc_noround_mmx2(DCTELEM *block); void ff_rv34_idct_dc_noround_mmx2(DCTELEM *block);
void ff_rv34_idct_dc_add_mmx(uint8_t *dst, int stride, int dc);
void ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc);
av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp) av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp)
{ {
#if HAVE_YASM #if HAVE_YASM
int mm_flags = av_get_cpu_flags(); int mm_flags = av_get_cpu_flags();
if (mm_flags & AV_CPU_FLAG_MMX)
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_mmx;
if (mm_flags & AV_CPU_FLAG_MMX2) { if (mm_flags & AV_CPU_FLAG_MMX2) {
c->rv34_inv_transform_dc_tab[0] = ff_rv34_idct_dequant4x4_dc_mmx2; c->rv34_inv_transform_dc = ff_rv34_idct_dc_noround_mmx2;
c->rv34_inv_transform_dc_tab[1] = ff_rv34_idct_dequant4x4_dc_noround_mmx2;
} }
if (mm_flags & AV_CPU_FLAG_SSE4)
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_sse4;
#endif #endif
} }
...@@ -291,7 +291,8 @@ OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o ...@@ -291,7 +291,8 @@ OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o
OBJS-$(CONFIG_SHORTEN_DEMUXER) += rawdec.o OBJS-$(CONFIG_SHORTEN_DEMUXER) += rawdec.o
OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o
OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o
OBJS-$(CONFIG_SMJPEG_DEMUXER) += smjpeg.o OBJS-$(CONFIG_SMJPEG_DEMUXER) += smjpegdec.o smjpeg.o
OBJS-$(CONFIG_SMJPEG_MUXER) += smjpegenc.o smjpeg.o
OBJS-$(CONFIG_SOL_DEMUXER) += sol.o pcm.o OBJS-$(CONFIG_SOL_DEMUXER) += sol.o pcm.o
OBJS-$(CONFIG_SOX_DEMUXER) += soxdec.o pcm.o OBJS-$(CONFIG_SOX_DEMUXER) += soxdec.o pcm.o
OBJS-$(CONFIG_SOX_MUXER) += soxenc.o OBJS-$(CONFIG_SOX_MUXER) += soxenc.o
......
...@@ -212,7 +212,7 @@ void av_register_all(void) ...@@ -212,7 +212,7 @@ void av_register_all(void)
REGISTER_DEMUXER (SHORTEN, shorten); REGISTER_DEMUXER (SHORTEN, shorten);
REGISTER_DEMUXER (SIFF, siff); REGISTER_DEMUXER (SIFF, siff);
REGISTER_DEMUXER (SMACKER, smacker); REGISTER_DEMUXER (SMACKER, smacker);
REGISTER_DEMUXER (SMJPEG, smjpeg); REGISTER_MUXDEMUX (SMJPEG, smjpeg);
REGISTER_DEMUXER (SOL, sol); REGISTER_DEMUXER (SOL, sol);
REGISTER_MUXDEMUX (SOX, sox); REGISTER_MUXDEMUX (SOX, sox);
REGISTER_MUXDEMUX (SPDIF, spdif); REGISTER_MUXDEMUX (SPDIF, spdif);
......
/* /*
* SMJPEG demuxer * SMJPEG common code
* Copyright (c) 2011 Paul B Mahol * Copyright (c) 2011-2012 Paul B Mahol
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.
* *
...@@ -21,167 +21,20 @@ ...@@ -21,167 +21,20 @@
/** /**
* @file * @file
* This is a demuxer for Loki SDL Motion JPEG files * SMJPEG common code
*/ */
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
#include "riff.h" #include "smjpeg.h"
static const AVCodecTag codec_smjpeg_video_tags[] = { const AVCodecTag ff_codec_smjpeg_video_tags[] = {
{ CODEC_ID_MJPEG, MKTAG('J', 'F', 'I', 'F') }, { CODEC_ID_MJPEG, MKTAG('J', 'F', 'I', 'F') },
{ CODEC_ID_NONE, 0 }, { CODEC_ID_NONE, 0 },
}; };
static const AVCodecTag codec_smjpeg_audio_tags[] = { const AVCodecTag ff_codec_smjpeg_audio_tags[] = {
{ CODEC_ID_ADPCM_IMA_SMJPEG, MKTAG('A', 'P', 'C', 'M') }, { CODEC_ID_ADPCM_IMA_SMJPEG, MKTAG('A', 'P', 'C', 'M') },
{ CODEC_ID_PCM_S16LE, MKTAG('N', 'O', 'N', 'E') }, { CODEC_ID_PCM_S16LE, MKTAG('N', 'O', 'N', 'E') },
{ CODEC_ID_NONE, 0 }, { CODEC_ID_NONE, 0 },
}; };
typedef struct SMJPEGContext {
int audio_stream_index;
int video_stream_index;
} SMJPEGContext;
static int smjpeg_probe(AVProbeData *p)
{
if (!memcmp(p->buf, "\x0\xaSMJPEG", 8))
return AVPROBE_SCORE_MAX;
return 0;
}
static int smjpeg_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
SMJPEGContext *sc = s->priv_data;
AVStream *ast = NULL, *vst = NULL;
AVIOContext *pb = s->pb;
uint32_t version, htype, hlength, duration;
char *comment;
avio_skip(pb, 8); // magic
version = avio_rb32(pb);
if (version)
av_log_ask_for_sample(s, "unknown version %d\n", version);
duration = avio_rb32(pb); // in msec
while (!pb->eof_reached) {
htype = avio_rl32(pb);
switch (htype) {
case MKTAG('_', 'T', 'X', 'T'):
hlength = avio_rb32(pb);
if (!hlength || hlength > 512)
return AVERROR_INVALIDDATA;
comment = av_malloc(hlength + 1);
if (!comment)
return AVERROR(ENOMEM);
if (avio_read(pb, comment, hlength) != hlength) {
av_freep(&comment);
av_log(s, AV_LOG_ERROR, "error when reading comment\n");
return AVERROR_INVALIDDATA;
}
comment[hlength] = 0;
av_dict_set(&s->metadata, "comment", comment,
AV_DICT_DONT_STRDUP_VAL);
break;
case MKTAG('_', 'S', 'N', 'D'):
if (ast) {
av_log_ask_for_sample(s, "multiple audio streams not supported\n");
return AVERROR_INVALIDDATA;
}
hlength = avio_rb32(pb);
if (hlength < 8)
return AVERROR_INVALIDDATA;
ast = avformat_new_stream(s, 0);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->sample_rate = avio_rb16(pb);
ast->codec->bits_per_coded_sample = avio_r8(pb);
ast->codec->channels = avio_r8(pb);
ast->codec->codec_tag = avio_rl32(pb);
ast->codec->codec_id = ff_codec_get_id(codec_smjpeg_audio_tags,
ast->codec->codec_tag);
ast->duration = duration;
sc->audio_stream_index = ast->index;
avpriv_set_pts_info(ast, 32, 1, 1000);
avio_skip(pb, hlength - 8);
break;
case MKTAG('_', 'V', 'I', 'D'):
if (vst) {
av_log_ask_for_sample(s, "multiple video streams not supported\n");
return AVERROR_INVALIDDATA;
}
hlength = avio_rb32(pb);
if (hlength < 12)
return AVERROR_INVALIDDATA;
avio_skip(pb, 4); // number of frames
vst = avformat_new_stream(s, 0);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->width = avio_rb16(pb);
vst->codec->height = avio_rb16(pb);
vst->codec->codec_tag = avio_rl32(pb);
vst->codec->codec_id = ff_codec_get_id(codec_smjpeg_video_tags,
vst->codec->codec_tag);
vst->duration = duration;
sc->video_stream_index = vst->index;
avpriv_set_pts_info(vst, 32, 1, 1000);
avio_skip(pb, hlength - 12);
break;
case MKTAG('H', 'E', 'N', 'D'):
return 0;
default:
av_log(s, AV_LOG_ERROR, "unknown header %x\n", htype);
return AVERROR_INVALIDDATA;
}
}
return AVERROR_EOF;
}
static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGContext *sc = s->priv_data;
uint32_t dtype, ret, size, timestamp;
if (s->pb->eof_reached)
return AVERROR_EOF;
dtype = avio_rl32(s->pb);
switch (dtype) {
case MKTAG('s', 'n', 'd', 'D'):
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->audio_stream_index;
pkt->pts = timestamp;
break;
case MKTAG('v', 'i', 'd', 'D'):
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->video_stream_index;
pkt->pts = timestamp;
break;
case MKTAG('D', 'O', 'N', 'E'):
ret = AVERROR_EOF;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
ret = AVERROR_INVALIDDATA;
break;
}
return ret;
}
AVInputFormat ff_smjpeg_demuxer = {
.name = "smjpeg",
.long_name = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
.priv_data_size = sizeof(SMJPEGContext),
.read_probe = smjpeg_probe,
.read_header = smjpeg_read_header,
.read_packet = smjpeg_read_packet,
.extensions = "mjpg",
};
/*
* SMJPEG common code
* Copyright (c) 2011-2012 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* SMJPEG common code
*/
#ifndef AVFORMAT_SMJPEG_H
#define AVFORMAT_SMJPEG_H
#include "internal.h"
#define SMJPEG_MAGIC "\x0\xaSMJPEG"
#define SMJPEG_DONE MKTAG('D', 'O', 'N', 'E')
#define SMJPEG_HEND MKTAG('H', 'E', 'N', 'D')
#define SMJPEG_SND MKTAG('_', 'S', 'N', 'D')
#define SMJPEG_SNDD MKTAG('s', 'n', 'd', 'D')
#define SMJPEG_TXT MKTAG('_', 'T', 'X', 'T')
#define SMJPEG_VID MKTAG('_', 'V', 'I', 'D')
#define SMJPEG_VIDD MKTAG('v', 'i', 'd', 'D')
extern const AVCodecTag ff_codec_smjpeg_video_tags[];
extern const AVCodecTag ff_codec_smjpeg_audio_tags[];
#endif /* AVFORMAT_SMJPEG_H */
/*
* SMJPEG demuxer
* Copyright (c) 2011 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* This is a demuxer for Loki SDL Motion JPEG files
*/
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include "smjpeg.h"
typedef struct SMJPEGContext {
int audio_stream_index;
int video_stream_index;
} SMJPEGContext;
static int smjpeg_probe(AVProbeData *p)
{
if (!memcmp(p->buf, SMJPEG_MAGIC, 8))
return AVPROBE_SCORE_MAX;
return 0;
}
static int smjpeg_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
SMJPEGContext *sc = s->priv_data;
AVStream *ast = NULL, *vst = NULL;
AVIOContext *pb = s->pb;
uint32_t version, htype, hlength, duration;
char *comment;
avio_skip(pb, 8); // magic
version = avio_rb32(pb);
if (version)
av_log_ask_for_sample(s, "unknown version %d\n", version);
duration = avio_rb32(pb); // in msec
while (!pb->eof_reached) {
htype = avio_rl32(pb);
switch (htype) {
case SMJPEG_TXT:
hlength = avio_rb32(pb);
if (!hlength || hlength > 512)
return AVERROR_INVALIDDATA;
comment = av_malloc(hlength + 1);
if (!comment)
return AVERROR(ENOMEM);
if (avio_read(pb, comment, hlength) != hlength) {
av_freep(&comment);
av_log(s, AV_LOG_ERROR, "error when reading comment\n");
return AVERROR_INVALIDDATA;
}
comment[hlength] = 0;
av_dict_set(&s->metadata, "comment", comment,
AV_DICT_DONT_STRDUP_VAL);
break;
case SMJPEG_SND:
if (ast) {
av_log_ask_for_sample(s, "multiple audio streams not supported\n");
return AVERROR_INVALIDDATA;
}
hlength = avio_rb32(pb);
if (hlength < 8)
return AVERROR_INVALIDDATA;
ast = avformat_new_stream(s, 0);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->sample_rate = avio_rb16(pb);
ast->codec->bits_per_coded_sample = avio_r8(pb);
ast->codec->channels = avio_r8(pb);
ast->codec->codec_tag = avio_rl32(pb);
ast->codec->codec_id = ff_codec_get_id(ff_codec_smjpeg_audio_tags,
ast->codec->codec_tag);
ast->duration = duration;
sc->audio_stream_index = ast->index;
avpriv_set_pts_info(ast, 32, 1, 1000);
avio_skip(pb, hlength - 8);
break;
case SMJPEG_VID:
if (vst) {
av_log_ask_for_sample(s, "multiple video streams not supported\n");
return AVERROR_INVALIDDATA;
}
hlength = avio_rb32(pb);
if (hlength < 12)
return AVERROR_INVALIDDATA;
avio_skip(pb, 4); // number of frames
vst = avformat_new_stream(s, 0);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->width = avio_rb16(pb);
vst->codec->height = avio_rb16(pb);
vst->codec->codec_tag = avio_rl32(pb);
vst->codec->codec_id = ff_codec_get_id(ff_codec_smjpeg_video_tags,
vst->codec->codec_tag);
vst->duration = duration;
sc->video_stream_index = vst->index;
avpriv_set_pts_info(vst, 32, 1, 1000);
avio_skip(pb, hlength - 12);
break;
case SMJPEG_HEND:
return 0;
default:
av_log(s, AV_LOG_ERROR, "unknown header %x\n", htype);
return AVERROR_INVALIDDATA;
}
}
return AVERROR_EOF;
}
static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGContext *sc = s->priv_data;
uint32_t dtype, ret, size, timestamp;
if (s->pb->eof_reached)
return AVERROR_EOF;
dtype = avio_rl32(s->pb);
switch (dtype) {
case SMJPEG_SNDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->audio_stream_index;
pkt->pts = timestamp;
break;
case SMJPEG_VIDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->video_stream_index;
pkt->pts = timestamp;
break;
case SMJPEG_DONE:
ret = AVERROR_EOF;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
ret = AVERROR_INVALIDDATA;
break;
}
return ret;
}
AVInputFormat ff_smjpeg_demuxer = {
.name = "smjpeg",
.long_name = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
.priv_data_size = sizeof(SMJPEGContext),
.read_probe = smjpeg_probe,
.read_header = smjpeg_read_header,
.read_packet = smjpeg_read_packet,
.extensions = "mjpg",
};
/*
* SMJPEG muxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* This is a muxer for Loki SDL Motion JPEG files
*/
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include "smjpeg.h"
typedef struct SMJPEGMuxContext {
uint32_t duration;
} SMJPEGMuxContext;
static int smjpeg_write_header(AVFormatContext *s)
{
AVDictionaryEntry *t = NULL;
AVIOContext *pb = s->pb;
int n, tag;
if (s->nb_streams > 2) {
av_log(s, AV_LOG_ERROR, "more than >2 streams are not supported\n");
return AVERROR(EINVAL);
}
avio_write(pb, SMJPEG_MAGIC, 8);
avio_wb32(pb, 0);
avio_wb32(pb, 0);
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
avio_wl32(pb, SMJPEG_TXT);
avio_wb32(pb, strlen(t->key) + strlen(t->value) + 3);
avio_write(pb, t->key, strlen(t->key));
avio_write(pb, " = ", 3);
avio_write(pb, t->value, strlen(t->value));
}
for (n = 0; n < s->nb_streams; n++) {
AVStream *st = s->streams[n];
AVCodecContext *codec = st->codec;
if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
tag = ff_codec_get_tag(ff_codec_smjpeg_audio_tags, codec->codec_id);
if (!tag) {
av_log(s, AV_LOG_ERROR, "unsupported audio codec\n");
return AVERROR(EINVAL);
}
avio_wl32(pb, SMJPEG_SND);
avio_wb32(pb, 8);
avio_wb16(pb, codec->sample_rate);
avio_w8(pb, av_get_bits_per_sample(codec->codec_id));
avio_w8(pb, codec->channels);
avio_wl32(pb, tag);
avpriv_set_pts_info(st, 32, 1, 1000);
} else if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
tag = ff_codec_get_tag(ff_codec_smjpeg_video_tags, codec->codec_id);
if (!tag) {
av_log(s, AV_LOG_ERROR, "unsupported video codec\n");
return AVERROR(EINVAL);
}
avio_wl32(pb, SMJPEG_VID);
avio_wb32(pb, 12);
avio_wb32(pb, 0);
avio_wb16(pb, codec->width);
avio_wb16(pb, codec->height);
avio_wl32(pb, tag);
avpriv_set_pts_info(st, 32, 1, 1000);
}
}
avio_wl32(pb, SMJPEG_HEND);
avio_flush(pb);
return 0;
}
static int smjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGMuxContext *smc = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
AVCodecContext *codec = st->codec;
if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
avio_wl32(pb, SMJPEG_SNDD);
else if (codec->codec_type == AVMEDIA_TYPE_VIDEO)
avio_wl32(pb, SMJPEG_VIDD);
else
return 0;
avio_wb32(pb, pkt->pts);
avio_wb32(pb, pkt->size);
avio_write(pb, pkt->data, pkt->size);
avio_flush(pb);
smc->duration = FFMAX(smc->duration, pkt->pts + pkt->duration);
return 0;
}
static int smjpeg_write_trailer(AVFormatContext *s)
{
SMJPEGMuxContext *smc = s->priv_data;
AVIOContext *pb = s->pb;
int64_t currentpos;
if (pb->seekable) {
currentpos = avio_tell(pb);
avio_seek(pb, 12, SEEK_SET);
avio_wb32(pb, smc->duration);
avio_seek(pb, currentpos, SEEK_SET);
}
avio_wl32(pb, SMJPEG_DONE);
avio_flush(pb);
return 0;
}
AVOutputFormat ff_smjpeg_muxer = {
.name = "smjpeg",
.long_name = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
.priv_data_size = sizeof(SMJPEGMuxContext),
.audio_codec = CODEC_ID_PCM_S16LE,
.video_codec = CODEC_ID_MJPEG,
.write_header = smjpeg_write_header,
.write_packet = smjpeg_write_packet,
.write_trailer = smjpeg_write_trailer,
.flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag *const []){ ff_codec_smjpeg_video_tags, ff_codec_smjpeg_audio_tags, 0 },
};
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 53 #define LIBAVFORMAT_VERSION_MAJOR 53
#define LIBAVFORMAT_VERSION_MINOR 29 #define LIBAVFORMAT_VERSION_MINOR 30
#define LIBAVFORMAT_VERSION_MICRO 100 #define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment