Commit ec7ecb88 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  dca: Switch dca_sample_rates to avpriv_ prefix; it is used across libs
  ARM: use =const syntax instead of explicit literal pools
  ARM: use standard syntax for all LDRD/STRD instructions
  fft: port FFT/IMDCT 3dnow functions to yasm, and disable on x86-64.
  dct-test: allow to compile without HAVE_INLINE_ASM.
  x86/dsputilenc: bury inline asm under HAVE_INLINE_ASM.
  dca: Move tables used outside of dcadec.c to a separate file.
  dca: Rename dca.c ---> dcadec.c
  x86: h264dsp: Remove unused variable ff_pb_3_1
  apetag: change a forgotten return to return 0

Conflicts:
	libavcodec/Makefile
	libavcodec/dca.c
	libavcodec/x86/fft_3dn.c
	libavcodec/x86/fft_3dn2.c
	libavcodec/x86/fft_mmx.asm
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 4c8fc6a2 19cf7163
...@@ -134,8 +134,8 @@ OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o ...@@ -134,8 +134,8 @@ OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
OBJS-$(CONFIG_COOK_DECODER) += cook.o OBJS-$(CONFIG_COOK_DECODER) += cook.o
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o \ OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadsp.o \
dca_parser.o dca_parser.o synth_filter.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \ OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
dirac_arith.o mpeg12data.o dwt.o dirac_arith.o mpeg12data.o dwt.o
...@@ -660,6 +660,7 @@ OBJS-$(CONFIG_OGG_MUXER) += xiph.o flac.o flacdata.o \ ...@@ -660,6 +660,7 @@ OBJS-$(CONFIG_OGG_MUXER) += xiph.o flac.o flacdata.o \
vorbis_data.o vorbis_data.o
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o mpegvideo.o xiph.o OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o mpegvideo.o xiph.o
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \ OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
xiph.o flac.o flacdata.o \ xiph.o flac.o flacdata.o \
vorbis_data.o vorbis_data.o
...@@ -716,7 +717,7 @@ OBJS-$(CONFIG_AC3_PARSER) += ac3_parser.o ac3tab.o \ ...@@ -716,7 +717,7 @@ OBJS-$(CONFIG_AC3_PARSER) += ac3_parser.o ac3tab.o \
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o dca.o
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o
OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
.macro h264_chroma_mc8 type, codec=h264 .macro h264_chroma_mc8 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc8_neon, export=1 function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
push {r4-r7, lr} push {r4-r7, lr}
ldrd r4, [sp, #20] ldrd r4, r5, [sp, #20]
.ifc \type,avg .ifc \type,avg
mov lr, r0 mov lr, r0
.endif .endif
...@@ -182,7 +182,7 @@ endfunc ...@@ -182,7 +182,7 @@ endfunc
.macro h264_chroma_mc4 type, codec=h264 .macro h264_chroma_mc4 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc4_neon, export=1 function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
push {r4-r7, lr} push {r4-r7, lr}
ldrd r4, [sp, #20] ldrd r4, r5, [sp, #20]
.ifc \type,avg .ifc \type,avg
mov lr, r0 mov lr, r0
.endif .endif
......
...@@ -886,7 +886,7 @@ T mov sp, r0 ...@@ -886,7 +886,7 @@ T mov sp, r0
mov r12, #8 mov r12, #8
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel8_h_lowpass_neon bl put_h264_qpel8_h_lowpass_neon
ldrd r0, [r11], #8 ldrd r0, r1, [r11], #8
mov r3, r2 mov r3, r2
add r12, sp, #64 add r12, sp, #64
sub r1, r1, r2, lsl #1 sub r1, r1, r2, lsl #1
...@@ -913,7 +913,7 @@ T mov sp, r0 ...@@ -913,7 +913,7 @@ T mov sp, r0
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel8_h_lowpass_neon bl put_h264_qpel8_h_lowpass_neon
mov r4, r0 mov r4, r0
ldrd r0, [r11], #8 ldrd r0, r1, [r11], #8
sub r1, r1, r2, lsl #1 sub r1, r1, r2, lsl #1
sub r1, r1, #2 sub r1, r1, #2
mov r3, r2 mov r3, r2
...@@ -958,7 +958,7 @@ T mov sp, r0 ...@@ -958,7 +958,7 @@ T mov sp, r0
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel8_v_lowpass_neon bl put_h264_qpel8_v_lowpass_neon
mov r4, r0 mov r4, r0
ldrd r0, [r11], #8 ldrd r0, r1, [r11], #8
sub r1, r1, r3, lsl #1 sub r1, r1, r3, lsl #1
sub r1, r1, #2 sub r1, r1, #2
sub r2, r4, #64 sub r2, r4, #64
...@@ -1071,7 +1071,7 @@ T mov sp, r0 ...@@ -1071,7 +1071,7 @@ T mov sp, r0
mov r3, #16 mov r3, #16
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel16_h_lowpass_neon bl put_h264_qpel16_h_lowpass_neon
ldrd r0, [r11], #8 ldrd r0, r1, [r11], #8
mov r3, r2 mov r3, r2
add r12, sp, #64 add r12, sp, #64
sub r1, r1, r2, lsl #1 sub r1, r1, r2, lsl #1
...@@ -1096,7 +1096,7 @@ T mov sp, r0 ...@@ -1096,7 +1096,7 @@ T mov sp, r0
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel16_h_lowpass_neon_packed bl put_h264_qpel16_h_lowpass_neon_packed
mov r4, r0 mov r4, r0
ldrd r0, [r11], #8 ldrd r0, r1, [r11], #8
sub r1, r1, r2, lsl #1 sub r1, r1, r2, lsl #1
sub r1, r1, #2 sub r1, r1, #2
mov r3, r2 mov r3, r2
...@@ -1139,7 +1139,7 @@ T mov sp, r0 ...@@ -1139,7 +1139,7 @@ T mov sp, r0
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel16_v_lowpass_neon_packed bl put_h264_qpel16_v_lowpass_neon_packed
mov r4, r0 mov r4, r0
ldrd r0, [r11], #8 ldrd r0, r1, [r11], #8
sub r1, r1, r3, lsl #1 sub r1, r1, r3, lsl #1
sub r1, r1, #2 sub r1, r1, #2
mov r2, r3 mov r2, r3
......
...@@ -61,9 +61,9 @@ function ff_dct_unquantize_h263_armv5te, export=1 ...@@ -61,9 +61,9 @@ function ff_dct_unquantize_h263_armv5te, export=1
mov ip, #0 mov ip, #0
subs r3, r3, #2 subs r3, r3, #2
ble 2f ble 2f
ldrd r4, [r0, #0] ldrd r4, r5, [r0, #0]
1: 1:
ldrd r6, [r0, #8] ldrd r6, r7, [r0, #8]
dequant_t r9, r4, r1, r2, r9 dequant_t r9, r4, r1, r2, r9
dequant_t lr, r5, r1, r2, lr dequant_t lr, r5, r1, r2, lr
...@@ -87,7 +87,7 @@ function ff_dct_unquantize_h263_armv5te, export=1 ...@@ -87,7 +87,7 @@ function ff_dct_unquantize_h263_armv5te, export=1
subs r3, r3, #8 subs r3, r3, #8
it gt it gt
ldrdgt r4, [r0, #0] /* load data early to avoid load/use pipeline stall */ ldrdgt r4, r5, [r0, #0] /* load data early to avoid load/use pipeline stall */
bgt 1b bgt 1b
adds r3, r3, #2 adds r3, r3, #2
......
...@@ -25,8 +25,7 @@ ...@@ -25,8 +25,7 @@
#include "libavutil/arm/asm.S" #include "libavutil/arm/asm.S"
/* useful constants for the algorithm, they are save in __constant_ptr__ at */ /* useful constants for the algorithm */
/* the end of the source code.*/
#define W1 22725 #define W1 22725
#define W2 21407 #define W2 21407
#define W3 19266 #define W3 19266
...@@ -36,16 +35,6 @@ ...@@ -36,16 +35,6 @@
#define W7 4520 #define W7 4520
#define MASK_MSHW 0xFFFF0000 #define MASK_MSHW 0xFFFF0000
/* offsets of the constants in the vector */
#define offW1 0
#define offW2 4
#define offW3 8
#define offW4 12
#define offW5 16
#define offW6 20
#define offW7 24
#define offMASK_MSHW 28
#define ROW_SHIFT 11 #define ROW_SHIFT 11
#define ROW_SHIFT2MSHW (16-11) #define ROW_SHIFT2MSHW (16-11)
#define COL_SHIFT 20 #define COL_SHIFT 20
...@@ -63,7 +52,6 @@ function ff_simple_idct_arm, export=1 ...@@ -63,7 +52,6 @@ function ff_simple_idct_arm, export=1
stmfd sp!, {r4-r11, r14} @ R14 is also called LR stmfd sp!, {r4-r11, r14} @ R14 is also called LR
@@ at this point, R0=block, other registers are free. @@ at this point, R0=block, other registers are free.
add r14, r0, #112 @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block. add r14, r0, #112 @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block.
adr r12, __constant_ptr__ @ R12=__constant_ptr__, the vector containing the constants, probably not necessary to reserve a register for it
@@ add 2 temporary variables in the stack: R0 and R14 @@ add 2 temporary variables in the stack: R0 and R14
sub sp, sp, #8 @ allow 2 local variables sub sp, sp, #8 @ allow 2 local variables
str r0, [sp, #0] @ save block in sp[0] str r0, [sp, #0] @ save block in sp[0]
...@@ -109,13 +97,13 @@ __b_evaluation: ...@@ -109,13 +97,13 @@ __b_evaluation:
@@ MAC16(b1, -W7, row[3]); @@ MAC16(b1, -W7, row[3]);
@@ MAC16(b2, -W1, row[3]); @@ MAC16(b2, -W1, row[3]);
@@ MAC16(b3, -W5, row[3]); @@ MAC16(b3, -W5, row[3]);
ldr r8, [r12, #offW1] @ R8=W1 ldr r8, =W1 @ R8=W1
mov r2, r2, asr #16 @ R2=ROWr16[3] mov r2, r2, asr #16 @ R2=ROWr16[3]
mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
ldr r9, [r12, #offW3] @ R9=W3 ldr r9, =W3 @ R9=W3
ldr r10, [r12, #offW5] @ R10=W5 ldr r10, =W5 @ R10=W5
mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
ldr r11, [r12, #offW7] @ R11=W7 ldr r11, =W7 @ R11=W7
mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
teq r2, #0 @ if null avoid muls teq r2, #0 @ if null avoid muls
...@@ -177,14 +165,14 @@ __a_evaluation: ...@@ -177,14 +165,14 @@ __a_evaluation:
@@ a2 = a0 - W6 * row[2]; @@ a2 = a0 - W6 * row[2];
@@ a3 = a0 - W2 * row[2]; @@ a3 = a0 - W2 * row[2];
@@ a0 = a0 + W2 * row[2]; @@ a0 = a0 + W2 * row[2];
ldr r9, [r12, #offW4] @ R9=W4 ldr r9, =W4 @ R9=W4
mul r6, r9, r6 @ R6=W4*ROWr16[0] mul r6, r9, r6 @ R6=W4*ROWr16[0]
ldr r10, [r12, #offW6] @ R10=W6 ldr r10, =W6 @ R10=W6
ldrsh r4, [r14, #4] @ R4=ROWr16[2] (a3 not defined yet) ldrsh r4, [r14, #4] @ R4=ROWr16[2] (a3 not defined yet)
add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0) add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0)
mul r11, r10, r4 @ R11=W6*ROWr16[2] mul r11, r10, r4 @ R11=W6*ROWr16[2]
ldr r8, [r12, #offW2] @ R8=W2 ldr r8, =W2 @ R8=W2
sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2) sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2)
@@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];
@@ if (temp != 0) {} @@ if (temp != 0) {}
...@@ -248,7 +236,7 @@ __end_a_evaluation: ...@@ -248,7 +236,7 @@ __end_a_evaluation:
add r9, r2, r1 @ R9=a1+b1 add r9, r2, r1 @ R9=a1+b1
@@ put 2 16 bits half-words in a 32bits word @@ put 2 16 bits half-words in a 32bits word
@@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!) @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!)
ldr r10, [r12, #offMASK_MSHW] @ R10=0xFFFF0000 ldr r10, =MASK_MSHW @ R10=0xFFFF0000
and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5) and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5)
mvn r11, r10 @ R11= NOT R10= 0x0000FFFF mvn r11, r10 @ R11= NOT R10= 0x0000FFFF
and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a0+b0)>>11) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a0+b0)>>11)
...@@ -319,13 +307,13 @@ __b_evaluation2: ...@@ -319,13 +307,13 @@ __b_evaluation2:
@@ MAC16(b1, -W7, col[8x3]); @@ MAC16(b1, -W7, col[8x3]);
@@ MAC16(b2, -W1, col[8x3]); @@ MAC16(b2, -W1, col[8x3]);
@@ MAC16(b3, -W5, col[8x3]); @@ MAC16(b3, -W5, col[8x3]);
ldr r8, [r12, #offW1] @ R8=W1 ldr r8, =W1 @ R8=W1
ldrsh r7, [r14, #16] ldrsh r7, [r14, #16]
mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
ldr r9, [r12, #offW3] @ R9=W3 ldr r9, =W3 @ R9=W3
ldr r10, [r12, #offW5] @ R10=W5 ldr r10, =W5 @ R10=W5
mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
ldr r11, [r12, #offW7] @ R11=W7 ldr r11, =W7 @ R11=W7
mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
ldrsh r2, [r14, #48] ldrsh r2, [r14, #48]
mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
...@@ -381,13 +369,13 @@ __a_evaluation2: ...@@ -381,13 +369,13 @@ __a_evaluation2:
@@ a3 = a0 - W2 * row[2]; @@ a3 = a0 - W2 * row[2];
@@ a0 = a0 + W2 * row[2]; @@ a0 = a0 + W2 * row[2];
ldrsh r6, [r14, #0] ldrsh r6, [r14, #0]
ldr r9, [r12, #offW4] @ R9=W4 ldr r9, =W4 @ R9=W4
mul r6, r9, r6 @ R6=W4*ROWr16[0] mul r6, r9, r6 @ R6=W4*ROWr16[0]
ldr r10, [r12, #offW6] @ R10=W6 ldr r10, =W6 @ R10=W6
ldrsh r4, [r14, #32] @ R4=ROWr16[2] (a3 not defined yet) ldrsh r4, [r14, #32] @ R4=ROWr16[2] (a3 not defined yet)
add r6, r6, #COL_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(COL_SHIFT-1) (a0) add r6, r6, #COL_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(COL_SHIFT-1) (a0)
mul r11, r10, r4 @ R11=W6*ROWr16[2] mul r11, r10, r4 @ R11=W6*ROWr16[2]
ldr r8, [r12, #offW2] @ R8=W2 ldr r8, =W2 @ R8=W2
add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1)
sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2) sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2)
mul r11, r8, r4 @ R11=W2*ROWr16[2] mul r11, r8, r4 @ R11=W2*ROWr16[2]
...@@ -489,15 +477,3 @@ __end_bef_a_evaluation: ...@@ -489,15 +477,3 @@ __end_bef_a_evaluation:
sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3)
add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0)
bal __end_a_evaluation bal __end_a_evaluation
.align
__constant_ptr__: @@ see #defines at the beginning of the source code for values.
.word W1
.word W2
.word W3
.word W4
.word W5
.word W6
.word W7
.word MASK_MSHW
...@@ -37,17 +37,11 @@ ...@@ -37,17 +37,11 @@
#define W26 (W2 | (W6 << 16)) #define W26 (W2 | (W6 << 16))
#define W57 (W5 | (W7 << 16)) #define W57 (W5 | (W7 << 16))
.text
.align
w13: .long W13
w26: .long W26
w57: .long W57
function idct_row_armv5te function idct_row_armv5te
str lr, [sp, #-4]! str lr, [sp, #-4]!
ldrd v1, [a1, #8] ldrd v1, v2, [a1, #8]
ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */ ldrd a3, a4, [a1] /* a3 = row[1:0], a4 = row[3:2] */
orrs v1, v1, v2 orrs v1, v1, v2
itt eq itt eq
cmpeq v1, a4 cmpeq v1, a4
...@@ -58,7 +52,7 @@ function idct_row_armv5te ...@@ -58,7 +52,7 @@ function idct_row_armv5te
mov ip, #16384 mov ip, #16384
sub ip, ip, #1 /* ip = W4 */ sub ip, ip, #1 /* ip = W4 */
smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */ smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
ldr ip, w26 /* ip = W2 | (W6 << 16) */ ldr ip, =W26 /* ip = W2 | (W6 << 16) */
smultb a2, ip, a4 smultb a2, ip, a4
smulbb lr, ip, a4 smulbb lr, ip, a4
add v2, v1, a2 add v2, v1, a2
...@@ -66,8 +60,8 @@ function idct_row_armv5te ...@@ -66,8 +60,8 @@ function idct_row_armv5te
sub v4, v1, lr sub v4, v1, lr
add v1, v1, lr add v1, v1, lr
ldr ip, w13 /* ip = W1 | (W3 << 16) */ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
ldr lr, w57 /* lr = W5 | (W7 << 16) */ ldr lr, =W57 /* lr = W5 | (W7 << 16) */
smulbt v5, ip, a3 smulbt v5, ip, a3
smultt v6, lr, a4 smultt v6, lr, a4
smlatt v5, ip, a4, v5 smlatt v5, ip, a4, v5
...@@ -78,7 +72,7 @@ function idct_row_armv5te ...@@ -78,7 +72,7 @@ function idct_row_armv5te
smultt fp, lr, a3 smultt fp, lr, a3
sub v7, v7, a2 sub v7, v7, a2
smulbt a2, lr, a4 smulbt a2, lr, a4
ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */ ldrd a3, a4, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
sub fp, fp, a2 sub fp, fp, a2
orrs a2, a3, a4 orrs a2, a3, a4
...@@ -94,7 +88,7 @@ function idct_row_armv5te ...@@ -94,7 +88,7 @@ function idct_row_armv5te
smlatt v7, ip, a4, v7 smlatt v7, ip, a4, v7
sub fp, fp, a2 sub fp, fp, a2
ldr ip, w26 /* ip = W2 | (W6 << 16) */ ldr ip, =W26 /* ip = W2 | (W6 << 16) */
mov a2, #16384 mov a2, #16384
sub a2, a2, #1 /* a2 = W4 */ sub a2, a2, #1 /* a2 = W4 */
smulbb a2, a2, a3 /* a2 = W4*row[4] */ smulbb a2, a2, a3 /* a2 = W4*row[4] */
...@@ -121,7 +115,7 @@ function idct_row_armv5te ...@@ -121,7 +115,7 @@ function idct_row_armv5te
add a2, v4, fp add a2, v4, fp
mov a2, a2, lsr #11 mov a2, a2, lsr #11
add a4, a4, a2, lsl #16 add a4, a4, a2, lsl #16
strd a3, [a1] strd a3, a4, [a1]
sub a2, v4, fp sub a2, v4, fp
mov a3, a2, lsr #11 mov a3, a2, lsr #11
...@@ -135,7 +129,7 @@ function idct_row_armv5te ...@@ -135,7 +129,7 @@ function idct_row_armv5te
sub a2, v1, v5 sub a2, v1, v5
mov a2, a2, lsr #11 mov a2, a2, lsr #11
add a4, a4, a2, lsl #16 add a4, a4, a2, lsl #16
strd a3, [a1, #8] strd a3, a4, [a1, #8]
ldr pc, [sp], #4 ldr pc, [sp], #4
...@@ -144,8 +138,8 @@ row_dc_only: ...@@ -144,8 +138,8 @@ row_dc_only:
bic a3, a3, #0xe000 bic a3, a3, #0xe000
mov a3, a3, lsl #3 mov a3, a3, lsl #3
mov a4, a3 mov a4, a3
strd a3, [a1] strd a3, a4, [a1]
strd a3, [a1, #8] strd a3, a4, [a1, #8]
ldr pc, [sp], #4 ldr pc, [sp], #4
endfunc endfunc
...@@ -178,7 +172,7 @@ endfunc ...@@ -178,7 +172,7 @@ endfunc
sub v4, v2, a3 sub v4, v2, a3
sub v6, v2, a3 sub v6, v2, a3
add fp, v2, a3 add fp, v2, a3
ldr ip, w26 ldr ip, =W26
ldr a4, [a1, #(16*2)] ldr a4, [a1, #(16*2)]
add v2, v2, a3 add v2, v2, a3
...@@ -211,9 +205,9 @@ endfunc ...@@ -211,9 +205,9 @@ endfunc
stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp} stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
ldr ip, w13 ldr ip, =W13
ldr a4, [a1, #(16*1)] ldr a4, [a1, #(16*1)]
ldr lr, w57 ldr lr, =W57
smulbb v1, ip, a4 smulbb v1, ip, a4
smultb v3, ip, a4 smultb v3, ip, a4
smulbb v5, lr, a4 smulbb v5, lr, a4
......
...@@ -40,15 +40,6 @@ ...@@ -40,15 +40,6 @@
#define W46 (W4 | (W6 << 16)) #define W46 (W4 | (W6 << 16))
#define W57 (W5 | (W7 << 16)) #define W57 (W5 | (W7 << 16))
.text
.align
w13: .long W13
w26: .long W26
w42: .long W42
w42n: .long W42n
w46: .long W46
w57: .long W57
/* /*
Compute partial IDCT of single row. Compute partial IDCT of single row.
shift = left-shift amount shift = left-shift amount
...@@ -60,12 +51,12 @@ w57: .long W57 ...@@ -60,12 +51,12 @@ w57: .long W57
Output in registers r4--r11 Output in registers r4--r11
*/ */
.macro idct_row shift .macro idct_row shift
ldr lr, w46 /* lr = W4 | (W6 << 16) */ ldr lr, =W46 /* lr = W4 | (W6 << 16) */
mov r1, #(1<<(\shift-1)) mov r1, #(1<<(\shift-1))
smlad r4, r2, ip, r1 smlad r4, r2, ip, r1
smlsd r7, r2, ip, r1 smlsd r7, r2, ip, r1
ldr ip, w13 /* ip = W1 | (W3 << 16) */ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
ldr r10,w57 /* r10 = W5 | (W7 << 16) */ ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
smlad r5, r2, lr, r1 smlad r5, r2, lr, r1
smlsd r6, r2, lr, r1 smlsd r6, r2, lr, r1
...@@ -78,11 +69,11 @@ w57: .long W57 ...@@ -78,11 +69,11 @@ w57: .long W57
smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */ smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */
smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */ smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
ldr r3, w42n /* r3 = -W4 | (-W2 << 16) */ ldr r3, =W42n /* r3 = -W4 | (-W2 << 16) */
smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */ smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */
ldr r2, [r0, #4] /* r2 = row[6,4] */ ldr r2, [r0, #4] /* r2 = row[6,4] */
smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */ smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */
ldr ip, w46 /* ip = W4 | (W6 << 16) */ ldr ip, =W46 /* ip = W4 | (W6 << 16) */
smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */ smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */
smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */ smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */
...@@ -101,12 +92,12 @@ w57: .long W57 ...@@ -101,12 +92,12 @@ w57: .long W57
Output in registers r4--r11 Output in registers r4--r11
*/ */
.macro idct_row4 shift .macro idct_row4 shift
ldr lr, w46 /* lr = W4 | (W6 << 16) */ ldr lr, =W46 /* lr = W4 | (W6 << 16) */
ldr r10,w57 /* r10 = W5 | (W7 << 16) */ ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
mov r1, #(1<<(\shift-1)) mov r1, #(1<<(\shift-1))
smlad r4, r2, ip, r1 smlad r4, r2, ip, r1
smlsd r7, r2, ip, r1 smlsd r7, r2, ip, r1
ldr ip, w13 /* ip = W1 | (W3 << 16) */ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
smlad r5, r2, lr, r1 smlad r5, r2, lr, r1
smlsd r6, r2, lr, r1 smlsd r6, r2, lr, r1
smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
...@@ -205,7 +196,7 @@ function idct_row_armv6 ...@@ -205,7 +196,7 @@ function idct_row_armv6
cmpeq lr, r2, lsr #16 cmpeq lr, r2, lsr #16
beq 1f beq 1f
push {r1} push {r1}
ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
cmp lr, #0 cmp lr, #0
beq 2f beq 2f
...@@ -249,7 +240,7 @@ function idct_col_armv6 ...@@ -249,7 +240,7 @@ function idct_col_armv6
push {r1, lr} push {r1, lr}
ldr r2, [r0] /* r2 = row[2,0] */ ldr r2, [r0] /* r2 = row[2,0] */
ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */ ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT idct_row COL_SHIFT
pop {r1} pop {r1}
...@@ -277,7 +268,7 @@ function idct_col_put_armv6 ...@@ -277,7 +268,7 @@ function idct_col_put_armv6
push {r1, r2, lr} push {r1, r2, lr}
ldr r2, [r0] /* r2 = row[2,0] */ ldr r2, [r0] /* r2 = row[2,0] */
ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */ ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT idct_row COL_SHIFT
pop {r1, r2} pop {r1, r2}
...@@ -307,7 +298,7 @@ function idct_col_add_armv6 ...@@ -307,7 +298,7 @@ function idct_col_add_armv6
push {r1, r2, lr} push {r1, r2, lr}
ldr r2, [r0] /* r2 = row[2,0] */ ldr r2, [r0] /* r2 = row[2,0] */
ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */ ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT idct_row COL_SHIFT
pop {r1, r2} pop {r1, r2}
......
...@@ -159,8 +159,8 @@ function idct_col4_neon ...@@ -159,8 +159,8 @@ function idct_col4_neon
vmull.s16 q15, d30, w4 /* q15 = W4*(col[0]+(1<<COL_SHIFT-1)/W4)*/ vmull.s16 q15, d30, w4 /* q15 = W4*(col[0]+(1<<COL_SHIFT-1)/W4)*/
vld1.64 {d8}, [r2,:64], ip /* d5 = col[3] */ vld1.64 {d8}, [r2,:64], ip /* d5 = col[3] */
ldrd r4, [r2] ldrd r4, r5, [r2]
ldrd r6, [r2, #16] ldrd r6, r7, [r2, #16]
orrs r4, r4, r5 orrs r4, r4, r5
idct_col4_top idct_col4_top
...@@ -176,7 +176,7 @@ function idct_col4_neon ...@@ -176,7 +176,7 @@ function idct_col4_neon
vadd.i32 q14, q14, q7 vadd.i32 q14, q14, q7
1: orrs r6, r6, r7 1: orrs r6, r6, r7
ldrd r4, [r2, #16] ldrd r4, r5, [r2, #16]
it eq it eq
addeq r2, r2, #16 addeq r2, r2, #16
beq 2f beq 2f
...@@ -188,7 +188,7 @@ function idct_col4_neon ...@@ -188,7 +188,7 @@ function idct_col4_neon
vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */ vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */
2: orrs r4, r4, r5 2: orrs r4, r4, r5
ldrd r4, [r2, #16] ldrd r4, r5, [r2, #16]
it eq it eq
addeq r2, r2, #16 addeq r2, r2, #16
beq 3f beq 3f
......
This diff is collapsed.
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#ifndef AVCODEC_DCA_H #ifndef AVCODEC_DCA_H
#define AVCODEC_DCA_H #define AVCODEC_DCA_H
#include <stdint.h>
/** DCA syncwords, also used for bitstream type detection */ /** DCA syncwords, also used for bitstream type detection */
#define DCA_MARKER_RAW_BE 0x7FFE8001 #define DCA_MARKER_RAW_BE 0x7FFE8001
#define DCA_MARKER_RAW_LE 0xFE7F0180 #define DCA_MARKER_RAW_LE 0xFE7F0180
...@@ -34,4 +36,6 @@ ...@@ -34,4 +36,6 @@
/** DCA-HD specific block starts with this marker. */ /** DCA-HD specific block starts with this marker. */
#define DCA_HD_MARKER 0x64582025 #define DCA_HD_MARKER 0x64582025
extern const uint32_t avpriv_dca_sample_rates[16];
#endif /* AVCODEC_DCA_H */ #endif /* AVCODEC_DCA_H */
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include "parser.h" #include "parser.h"
#include "dca.h" #include "dca.h"
#include "dcadata.h"
#include "dca_parser.h" #include "dca_parser.h"
#include "get_bits.h" #include "get_bits.h"
#include "put_bits.h" #include "put_bits.h"
...@@ -160,7 +159,7 @@ static int dca_parse_params(const uint8_t *buf, int buf_size, int *duration, ...@@ -160,7 +159,7 @@ static int dca_parse_params(const uint8_t *buf, int buf_size, int *duration,
skip_bits(&gb, 20); skip_bits(&gb, 20);
sr_code = get_bits(&gb, 4); sr_code = get_bits(&gb, 4);
*sample_rate = dca_sample_rates[sr_code]; *sample_rate = avpriv_dca_sample_rates[sr_code];
if (*sample_rate == 0) if (*sample_rate == 0)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
......
...@@ -28,12 +28,6 @@ ...@@ -28,12 +28,6 @@
/* Generic tables */ /* Generic tables */
static const uint32_t dca_sample_rates[16] =
{
0, 8000, 16000, 32000, 0, 0, 11025, 22050, 44100, 0, 0,
12000, 24000, 48000, 96000, 192000
};
static const uint32_t dca_bit_rates[32] = static const uint32_t dca_bit_rates[32] =
{ {
32000, 56000, 64000, 96000, 112000, 128000, 32000, 56000, 64000, 96000, 112000, 128000,
......
This diff is collapsed.
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "put_bits.h" #include "put_bits.h"
#include "dcaenc.h" #include "dcaenc.h"
#include "dcadata.h" #include "dcadata.h"
#include "dca.h"
#undef NDEBUG #undef NDEBUG
...@@ -569,13 +570,13 @@ static int encode_init(AVCodecContext *avctx) ...@@ -569,13 +570,13 @@ static int encode_init(AVCodecContext *avctx)
} }
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
if (dca_sample_rates[i] && (dca_sample_rates[i] == avctx->sample_rate)) if (avpriv_dca_sample_rates[i] && (avpriv_dca_sample_rates[i] == avctx->sample_rate))
break; break;
} }
if (i == 16) { if (i == 16) {
av_log(avctx, AV_LOG_ERROR, "Sample rate %iHz not supported, only ", avctx->sample_rate); av_log(avctx, AV_LOG_ERROR, "Sample rate %iHz not supported, only ", avctx->sample_rate);
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
av_log(avctx, AV_LOG_ERROR, "%d, ", dca_sample_rates[i]); av_log(avctx, AV_LOG_ERROR, "%d, ", avpriv_dca_sample_rates[i]);
av_log(avctx, AV_LOG_ERROR, "supported.\n"); av_log(avctx, AV_LOG_ERROR, "supported.\n");
return -1; return -1;
} }
......
...@@ -85,7 +85,7 @@ static const struct algo fdct_tab[] = { ...@@ -85,7 +85,7 @@ static const struct algo fdct_tab[] = {
{ "IJG-AAN-INT", ff_fdct_ifast, SCALE_PERM }, { "IJG-AAN-INT", ff_fdct_ifast, SCALE_PERM },
{ "IJG-LLM-INT", ff_jpeg_fdct_islow_8, NO_PERM }, { "IJG-LLM-INT", ff_jpeg_fdct_islow_8, NO_PERM },
#if HAVE_MMX #if HAVE_MMX && HAVE_INLINE_ASM
{ "MMX", ff_fdct_mmx, NO_PERM, AV_CPU_FLAG_MMX }, { "MMX", ff_fdct_mmx, NO_PERM, AV_CPU_FLAG_MMX },
{ "MMX2", ff_fdct_mmx2, NO_PERM, AV_CPU_FLAG_MMX2 }, { "MMX2", ff_fdct_mmx2, NO_PERM, AV_CPU_FLAG_MMX2 },
{ "SSE2", ff_fdct_sse2, NO_PERM, AV_CPU_FLAG_SSE2 }, { "SSE2", ff_fdct_sse2, NO_PERM, AV_CPU_FLAG_SSE2 },
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include "dsputil_mmx.h" #include "dsputil_mmx.h"
#if HAVE_INLINE_ASM
static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size) static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
{ {
__asm__ volatile( __asm__ volatile(
...@@ -323,8 +325,6 @@ static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int ...@@ -323,8 +325,6 @@ static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int
return tmp; return tmp;
} }
int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) { static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
int tmp; int tmp;
__asm__ volatile ( __asm__ volatile (
...@@ -926,17 +926,6 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, c ...@@ -926,17 +926,6 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, c
"paddusw "#t", "#a" \n\t"\ "paddusw "#t", "#a" \n\t"\
"movd "#a", "#dst" \n\t"\ "movd "#a", "#dst" \n\t"\
#define hadamard_func(cpu) \
int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
int stride, int h); \
int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
int stride, int h);
hadamard_func(mmx)
hadamard_func(mmx2)
hadamard_func(sse2)
hadamard_func(ssse3)
#define DCT_SAD4(m,mm,o)\ #define DCT_SAD4(m,mm,o)\
"mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\ "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
"mov"#m" "#o"+16(%1), "#mm"3 \n\t"\ "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
...@@ -1095,10 +1084,26 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si ...@@ -1095,10 +1084,26 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si
#undef PHADDD #undef PHADDD
#endif //HAVE_SSSE3 #endif //HAVE_SSSE3
#endif /* HAVE_INLINE_ASM */
int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
#define hadamard_func(cpu) \
int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
int stride, int h); \
int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
int stride, int h);
hadamard_func(mmx)
hadamard_func(mmx2)
hadamard_func(sse2)
hadamard_func(ssse3)
void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
{ {
int mm_flags = av_get_cpu_flags(); int mm_flags = av_get_cpu_flags();
#if HAVE_INLINE_ASM
int bit_depth = avctx->bits_per_raw_sample; int bit_depth = avctx->bits_per_raw_sample;
if (mm_flags & AV_CPU_FLAG_MMX) { if (mm_flags & AV_CPU_FLAG_MMX) {
...@@ -1122,11 +1127,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1122,11 +1127,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->diff_bytes= diff_bytes_mmx; c->diff_bytes= diff_bytes_mmx;
c->sum_abs_dctelem= sum_abs_dctelem_mmx; c->sum_abs_dctelem= sum_abs_dctelem_mmx;
#if HAVE_YASM
c->hadamard8_diff[0]= ff_hadamard8_diff16_mmx;
c->hadamard8_diff[1]= ff_hadamard8_diff_mmx;
#endif
c->pix_norm1 = pix_norm1_mmx; c->pix_norm1 = pix_norm1_mmx;
c->sse[0] = sse16_mmx; c->sse[0] = sse16_mmx;
c->sse[1] = sse8_mmx; c->sse[1] = sse8_mmx;
...@@ -1147,10 +1147,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1147,10 +1147,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
if (mm_flags & AV_CPU_FLAG_MMX2) { if (mm_flags & AV_CPU_FLAG_MMX2) {
#if HAVE_YASM
c->hadamard8_diff[0]= ff_hadamard8_diff16_mmx2;
c->hadamard8_diff[1]= ff_hadamard8_diff_mmx2;
#endif
c->sum_abs_dctelem= sum_abs_dctelem_mmx2; c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
c->vsad[4]= vsad_intra16_mmx2; c->vsad[4]= vsad_intra16_mmx2;
...@@ -1165,13 +1161,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1165,13 +1161,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
if (bit_depth <= 8) if (bit_depth <= 8)
c->get_pixels = get_pixels_sse2; c->get_pixels = get_pixels_sse2;
c->sum_abs_dctelem= sum_abs_dctelem_sse2; c->sum_abs_dctelem= sum_abs_dctelem_sse2;
#if HAVE_YASM
c->sse[0] = ff_sse16_sse2;
#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0]= ff_hadamard8_diff16_sse2;
c->hadamard8_diff[1]= ff_hadamard8_diff_sse2;
#endif
#endif
} }
#if HAVE_SSSE3 #if HAVE_SSSE3
...@@ -1181,10 +1170,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1181,10 +1170,6 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
} }
c->add_8x8basis= add_8x8basis_ssse3; c->add_8x8basis= add_8x8basis_ssse3;
c->sum_abs_dctelem= sum_abs_dctelem_ssse3; c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
#if HAVE_YASM && HAVE_ALIGNED_STACK
c->hadamard8_diff[0]= ff_hadamard8_diff16_ssse3;
c->hadamard8_diff[1]= ff_hadamard8_diff_ssse3;
#endif
} }
#endif #endif
...@@ -1195,6 +1180,35 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1195,6 +1180,35 @@ void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_8x8basis= add_8x8basis_3dnow; c->add_8x8basis= add_8x8basis_3dnow;
} }
} }
#endif /* HAVE_INLINE_ASM */
#if HAVE_YASM
if (mm_flags & AV_CPU_FLAG_MMX) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
if (mm_flags & AV_CPU_FLAG_MMX2) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx2;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmx2;
}
if (mm_flags & AV_CPU_FLAG_SSE2){
c->sse[0] = ff_sse16_sse2;
#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
#endif
}
#if HAVE_SSSE3 && HAVE_ALIGNED_STACK
if (mm_flags & AV_CPU_FLAG_SSSE3) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
}
#endif
}
#endif /* HAVE_YASM */
ff_dsputil_init_pix_mmx(c, avctx); ff_dsputil_init_pix_mmx(c, avctx);
} }
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#include "libavutil/x86_cpu.h" #include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#if HAVE_INLINE_ASM
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// //
// constants for the forward DCT // constants for the forward DCT
...@@ -579,3 +581,5 @@ void ff_fdct_sse2(int16_t *block) ...@@ -579,3 +581,5 @@ void ff_fdct_sse2(int16_t *block)
fdct_col_sse2(block, block1, 0); fdct_col_sse2(block, block1, 0);
fdct_row_sse2(block1, block); fdct_row_sse2(block1, block);
} }
#endif /* HAVE_INLINE_ASM */
...@@ -25,6 +25,7 @@ av_cold void ff_fft_init_mmx(FFTContext *s) ...@@ -25,6 +25,7 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
{ {
#if HAVE_YASM #if HAVE_YASM
int has_vectors = av_get_cpu_flags(); int has_vectors = av_get_cpu_flags();
#if ARCH_X86_32
if (has_vectors & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) { if (has_vectors & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) {
/* 3DNow! for K6-2/3 */ /* 3DNow! for K6-2/3 */
s->imdct_calc = ff_imdct_calc_3dnow; s->imdct_calc = ff_imdct_calc_3dnow;
...@@ -37,6 +38,7 @@ av_cold void ff_fft_init_mmx(FFTContext *s) ...@@ -37,6 +38,7 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
s->imdct_half = ff_imdct_half_3dnow2; s->imdct_half = ff_imdct_half_3dnow2;
s->fft_calc = ff_fft_calc_3dnow2; s->fft_calc = ff_fft_calc_3dnow2;
} }
#endif
if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) { if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) {
/* SSE for P3/P4/K8 */ /* SSE for P3/P4/K8 */
s->imdct_calc = ff_imdct_calc_sse; s->imdct_calc = ff_imdct_calc_sse;
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include "libavcodec/h264dsp.h" #include "libavcodec/h264dsp.h"
#include "dsputil_mmx.h" #include "dsputil_mmx.h"
DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
/***********************************/ /***********************************/
/* IDCT */ /* IDCT */
#define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \ #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "dsputil_mmx.h" #include "dsputil_mmx.h"
#if HAVE_INLINE_ASM
DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={ DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
0x0000000000000000ULL, 0x0000000000000000ULL,
0x0001000100010001ULL, 0x0001000100010001ULL,
...@@ -422,8 +424,11 @@ static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, ...@@ -422,8 +424,11 @@ static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride,
PIX_SAD(mmx) PIX_SAD(mmx)
PIX_SAD(mmx2) PIX_SAD(mmx2)
#endif /* HAVE_INLINE_ASM */
void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
{ {
#if HAVE_INLINE_ASM
int mm_flags = av_get_cpu_flags(); int mm_flags = av_get_cpu_flags();
if (mm_flags & AV_CPU_FLAG_MMX) { if (mm_flags & AV_CPU_FLAG_MMX) {
...@@ -458,4 +463,5 @@ void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -458,4 +463,5 @@ void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) { if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
c->sad[0]= sad16_sse2; c->sad[0]= sad16_sse2;
} }
#endif /* HAVE_INLINE_ASM */
} }
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideo.h"
#include "dsputil_mmx.h" #include "dsputil_mmx.h"
#if HAVE_INLINE_ASM
extern uint16_t ff_inv_zigzag_direct16[64]; extern uint16_t ff_inv_zigzag_direct16[64];
...@@ -626,8 +628,11 @@ static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){ ...@@ -626,8 +628,11 @@ static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){
#include "mpegvideo_mmx_template.c" #include "mpegvideo_mmx_template.c"
#endif #endif
#endif /* HAVE_INLINE_ASM */
void ff_MPV_common_init_mmx(MpegEncContext *s) void ff_MPV_common_init_mmx(MpegEncContext *s)
{ {
#if HAVE_INLINE_ASM
int mm_flags = av_get_cpu_flags(); int mm_flags = av_get_cpu_flags();
if (mm_flags & AV_CPU_FLAG_MMX) { if (mm_flags & AV_CPU_FLAG_MMX) {
...@@ -662,4 +667,5 @@ void ff_MPV_common_init_mmx(MpegEncContext *s) ...@@ -662,4 +667,5 @@ void ff_MPV_common_init_mmx(MpegEncContext *s)
} }
} }
} }
#endif /* HAVE_INLINE_ASM */
} }
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include "spdif.h" #include "spdif.h"
#include "libavcodec/ac3.h" #include "libavcodec/ac3.h"
#include "libavcodec/dca.h" #include "libavcodec/dca.h"
#include "libavcodec/dcadata.h"
#include "libavcodec/aacadtsdec.h" #include "libavcodec/aacadtsdec.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
...@@ -253,7 +252,7 @@ static int spdif_header_dts(AVFormatContext *s, AVPacket *pkt) ...@@ -253,7 +252,7 @@ static int spdif_header_dts(AVFormatContext *s, AVPacket *pkt)
case DCA_MARKER_RAW_BE: case DCA_MARKER_RAW_BE:
blocks = (AV_RB16(pkt->data + 4) >> 2) & 0x7f; blocks = (AV_RB16(pkt->data + 4) >> 2) & 0x7f;
core_size = ((AV_RB24(pkt->data + 5) >> 4) & 0x3fff) + 1; core_size = ((AV_RB24(pkt->data + 5) >> 4) & 0x3fff) + 1;
sample_rate = dca_sample_rates[(pkt->data[8] >> 2) & 0x0f]; sample_rate = avpriv_dca_sample_rates[(pkt->data[8] >> 2) & 0x0f];
break; break;
case DCA_MARKER_RAW_LE: case DCA_MARKER_RAW_LE:
blocks = (AV_RL16(pkt->data + 4) >> 2) & 0x7f; blocks = (AV_RL16(pkt->data + 4) >> 2) & 0x7f;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment