Commit b315042c authored by Diego Biurrun's avatar Diego Biurrun

Remove libpostproc.

This library does not fit into Libav as a whole and its code is just a
maintenance burden.  Furthermore it is now available as an external project,
which completely obviates any reason to keep it around.

URL: http://git.videolan.org/?p=libpostproc.git
parent 01cb62ab
......@@ -24,7 +24,6 @@ libavfilter/libavfilter*
libavformat/libavformat*
libavutil/avconfig.h
libavutil/libavutil*
libpostproc/libpostproc*
libswscale/libswscale*
tests/audiogen
tests/base64
......
......@@ -9,6 +9,7 @@ version <next>:
- CDXL demuxer and decoder
- Apple ProRes encoder
- Sun Rasterfile Encoder
- remove libpostproc
version 0.8:
......
......@@ -13,7 +13,6 @@ configure to activate them. In this case, Libav's license changes to GPL v2+.
Specifically, the GPL parts of Libav are
- libpostproc
- optional x86 optimizations in the files
libavcodec/x86/idct_mmx.c
- the X11 grabber in libavdevice/x11grab.c
......
......@@ -20,7 +20,7 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_PATH)/%=%)); $(INSTALL))
endif
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale
ALLFFLIBS = avcodec avdevice avfilter avformat avutil swscale
IFLAGS := -I. -I$(SRC_PATH)
CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
......@@ -72,7 +72,6 @@ FFLIBS-$(CONFIG_AVDEVICE) += avdevice
FFLIBS-$(CONFIG_AVFILTER) += avfilter
FFLIBS-$(CONFIG_AVFORMAT) += avformat
FFLIBS-$(CONFIG_AVCODEC) += avcodec
FFLIBS-$(CONFIG_POSTPROC) += postproc
FFLIBS-$(CONFIG_SWSCALE) += swscale
FFLIBS := avutil
......
......@@ -33,9 +33,6 @@
#include "libavfilter/avfilter.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#if CONFIG_POSTPROC
#include "libpostproc/postprocess.h"
#endif
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
......@@ -496,9 +493,6 @@ static void print_all_libs_info(int flags, int level)
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
#if CONFIG_POSTPROC
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
#endif
}
void show_banner(void)
......
......@@ -88,7 +88,6 @@ Configuration options:
--disable-avcodec disable libavcodec build
--disable-avformat disable libavformat build
--disable-swscale disable libswscale build
--enable-postproc enable libpostproc build (deprecated) [no]
--disable-avfilter disable video filter support [no]
--disable-pthreads disable pthreads [auto]
--disable-w32threads disable Win32 threads [auto]
......@@ -974,7 +973,6 @@ CONFIG_LIST="
nonfree
openssl
pic
postproc
rdft
rtpdec
runtime_cpudetect
......@@ -1532,7 +1530,6 @@ yadif_filter_deps="gpl"
# libraries
avdevice_deps="avcodec avformat"
avformat_deps="avcodec"
postproc_deps="gpl"
# programs
avconv_deps="avcodec avformat swscale"
......@@ -3066,7 +3063,7 @@ enabled extra_warnings && check_cflags -Winline
# add some linker flags
check_ldflags -Wl,--warn-common
check_ldflags -Wl,-rpath-link=libpostproc:libswscale:libavfilter:libavdevice:libavformat:libavcodec:libavutil
check_ldflags -Wl,-rpath-link=libswscale:libavfilter:libavdevice:libavformat:libavcodec:libavutil
test_ldflags -Wl,-Bsymbolic && append SHFLAGS -Wl,-Bsymbolic
enabled xmm_clobber_test && \
......@@ -3222,7 +3219,6 @@ echo "optimize for size ${small-no}"
echo "optimizations ${optimizations-no}"
echo "static ${static-no}"
echo "shared ${shared-no}"
echo "postprocessing support ${postproc-no}"
echo "new filter support ${avfilter-no}"
echo "network support ${network-no}"
echo "threading support ${thread_type-no}"
......@@ -3374,7 +3370,6 @@ get_version LIBAVDEVICE libavdevice/avdevice.h
get_version LIBAVFILTER libavfilter/version.h
get_version LIBAVFORMAT libavformat/version.h
get_version LIBAVUTIL libavutil/avutil.h
get_version LIBPOSTPROC libpostproc/postprocess.h
get_version LIBSWSCALE libswscale/swscale.h
cat > $TMPH <<EOF
......@@ -3493,5 +3488,4 @@ pkgconfig_generate libavcodec "Libav codec library" "$LIBAVCODEC_VERSION" "$extr
pkgconfig_generate libavformat "Libav container format library" "$LIBAVFORMAT_VERSION" "$extralibs" "libavcodec = $LIBAVCODEC_VERSION"
pkgconfig_generate libavdevice "Libav device handling library" "$LIBAVDEVICE_VERSION" "$extralibs" "libavformat = $LIBAVFORMAT_VERSION"
pkgconfig_generate libavfilter "Libav video filtering library" "$LIBAVFILTER_VERSION" "$extralibs"
pkgconfig_generate libpostproc "Libav postprocessing library" "$LIBPOSTPROC_VERSION" "" "libavutil = $LIBAVUTIL_VERSION"
pkgconfig_generate libswscale "Libav image rescaling library" "$LIBSWSCALE_VERSION" "$LIBM" "libavutil = $LIBAVUTIL_VERSION"
......@@ -6,7 +6,6 @@ libavcodec: 2012-01-27
libavdevice: 2011-04-18
libavfilter: 2011-04-18
libavformat: 2012-01-27
libpostproc: 2011-04-18 (deprecated, to be removed later)
libswscale: 2011-06-20
libavutil: 2011-04-18
......
......@@ -39,7 +39,6 @@
* @li @ref libavf "libavformat" I/O and muxing/demuxing library
* @li @ref lavd "libavdevice" special devices muxing/demuxing library
* @li @ref lavu "libavutil" common utility library
* @li @subpage libpostproc post processing library
* @li @subpage libswscale color conversion and scaling library
*
*/
......
NAME = postproc
FFLIBS = avutil
HEADERS = postprocess.h
OBJS = postprocess.o
LIBPOSTPROC_$MAJOR {
global: postproc_*; pp_*;
local: *;
};
/*
* Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
*
* AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* postprocessing.
*/
/*
C MMX MMX2 3DNow AltiVec
isVertDC Ec Ec Ec
isVertMinMaxOk Ec Ec Ec
doVertLowPass E e e Ec
doVertDefFilter Ec Ec e e Ec
isHorizDC Ec Ec Ec
isHorizMinMaxOk a E Ec
doHorizLowPass E e e Ec
doHorizDefFilter Ec Ec e e Ec
do_a_deblock Ec E Ec E
deRing E e e* Ecp
Vertical RKAlgo1 E a a
Horizontal RKAlgo1 a a
Vertical X1# a E E
Horizontal X1# a E E
LinIpolDeinterlace e E E*
CubicIpolDeinterlace a e e*
LinBlendDeinterlace e E E*
MedianDeinterlace# E Ec Ec
TempDeNoiser# E e e Ec
* I do not have a 3DNow! CPU -> it is untested, but no one said it does not work so it seems to work
# more or less selfinvented filters so the exactness is not too meaningful
E = Exact implementation
e = almost exact implementation (slightly different rounding,...)
a = alternative / approximate impl
c = checked against the other implementations (-vo md5)
p = partially optimized, still some work to do
*/
/*
TODO:
reduce the time wasted on the mem transfer
unroll stuff if instructions depend too much on the prior one
move YScale thing to the end instead of fixing QP
write a faster and higher quality deblocking filter :)
make the mainloop more flexible (variable number of blocks at once
(the if/else stuff per block is slowing things down)
compare the quality & speed of all filters
split this huge file
optimize c versions
try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
...
*/
//Changelog: use git log
#include "config.h"
#include "libavutil/avutil.h"
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#undef HAVE_MMX2
//#define HAVE_AMD3DNOW
//#undef HAVE_MMX
//#undef ARCH_X86
//#define DEBUG_BRIGHTNESS
#include "postprocess.h"
#include "postprocess_internal.h"
#include "libavutil/avstring.h"
unsigned postproc_version(void)
{
return LIBPOSTPROC_VERSION_INT;
}
const char *postproc_configuration(void)
{
return LIBAV_CONFIGURATION;
}
const char *postproc_license(void)
{
#define LICENSE_PREFIX "libpostproc license: "
return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
#define GET_MODE_BUFFER_SIZE 500
#define OPTIONS_ARRAY_SIZE 10
#define BLOCK_SIZE 8
#define TEMP_STRIDE 8
//#define NUM_BLOCKS_AT_ONCE 16 //not used yet
#if ARCH_X86
DECLARE_ASM_CONST(8, uint64_t, w05)= 0x0005000500050005LL;
DECLARE_ASM_CONST(8, uint64_t, w04)= 0x0004000400040004LL;
DECLARE_ASM_CONST(8, uint64_t, w20)= 0x0020002000200020LL;
DECLARE_ASM_CONST(8, uint64_t, b00)= 0x0000000000000000LL;
DECLARE_ASM_CONST(8, uint64_t, b01)= 0x0101010101010101LL;
DECLARE_ASM_CONST(8, uint64_t, b02)= 0x0202020202020202LL;
DECLARE_ASM_CONST(8, uint64_t, b08)= 0x0808080808080808LL;
DECLARE_ASM_CONST(8, uint64_t, b80)= 0x8080808080808080LL;
#endif
DECLARE_ASM_CONST(8, int, deringThreshold)= 20;
static struct PPFilter filters[]=
{
{"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
{"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
/* {"hr", "rkhdeblock", 1, 1, 3, H_RK1_FILTER},
{"vr", "rkvdeblock", 1, 2, 4, V_RK1_FILTER},*/
{"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
{"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
{"ha", "ahdeblock", 1, 1, 3, H_A_DEBLOCK},
{"va", "avdeblock", 1, 2, 4, V_A_DEBLOCK},
{"dr", "dering", 1, 5, 6, DERING},
{"al", "autolevels", 0, 1, 2, LEVEL_FIX},
{"lb", "linblenddeint", 1, 1, 4, LINEAR_BLEND_DEINT_FILTER},
{"li", "linipoldeint", 1, 1, 4, LINEAR_IPOL_DEINT_FILTER},
{"ci", "cubicipoldeint", 1, 1, 4, CUBIC_IPOL_DEINT_FILTER},
{"md", "mediandeint", 1, 1, 4, MEDIAN_DEINT_FILTER},
{"fd", "ffmpegdeint", 1, 1, 4, FFMPEG_DEINT_FILTER},
{"l5", "lowpass5", 1, 1, 4, LOWPASS5_DEINT_FILTER},
{"tn", "tmpnoise", 1, 7, 8, TEMP_NOISE_FILTER},
{"fq", "forcequant", 1, 0, 0, FORCE_QUANT},
{NULL, NULL,0,0,0,0} //End Marker
};
static const char *replaceTable[]=
{
"default", "hb:a,vb:a,dr:a",
"de", "hb:a,vb:a,dr:a",
"fast", "h1:a,v1:a,dr:a",
"fa", "h1:a,v1:a,dr:a",
"ac", "ha:a:128:7,va:a,dr:a",
NULL //End Marker
};
#if ARCH_X86
static inline void prefetchnta(void *p)
{
__asm__ volatile( "prefetchnta (%0)\n\t"
: : "r" (p)
);
}
static inline void prefetcht0(void *p)
{
__asm__ volatile( "prefetcht0 (%0)\n\t"
: : "r" (p)
);
}
static inline void prefetcht1(void *p)
{
__asm__ volatile( "prefetcht1 (%0)\n\t"
: : "r" (p)
);
}
static inline void prefetcht2(void *p)
{
__asm__ volatile( "prefetcht2 (%0)\n\t"
: : "r" (p)
);
}
#endif
/* The horizontal functions exist only in C because the MMX
* code is faster with vertical filters and transposing. */
/**
* Check if the given 8x8 Block is mostly "flat"
*/
static inline int isHorizDC_C(uint8_t src[], int stride, PPContext *c)
{
int numEq= 0;
int y;
const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
const int dcThreshold= dcOffset*2 + 1;
for(y=0; y<BLOCK_SIZE; y++){
if(((unsigned)(src[0] - src[1] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[1] - src[2] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[2] - src[3] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[3] - src[4] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[4] - src[5] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[5] - src[6] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[6] - src[7] + dcOffset)) < dcThreshold) numEq++;
src+= stride;
}
return numEq > c->ppMode.flatnessThreshold;
}
/**
* Check if the middle 8x8 Block in the given 8x16 block is flat
*/
static inline int isVertDC_C(uint8_t src[], int stride, PPContext *c)
{
int numEq= 0;
int y;
const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
const int dcThreshold= dcOffset*2 + 1;
src+= stride*4; // src points to begin of the 8x8 Block
for(y=0; y<BLOCK_SIZE-1; y++){
if(((unsigned)(src[0] - src[0+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[1] - src[1+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[2] - src[2+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[3] - src[3+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[4] - src[4+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[5] - src[5+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[6] - src[6+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[7] - src[7+stride] + dcOffset)) < dcThreshold) numEq++;
src+= stride;
}
return numEq > c->ppMode.flatnessThreshold;
}
static inline int isHorizMinMaxOk_C(uint8_t src[], int stride, int QP)
{
int i;
for(i=0; i<2; i++){
if((unsigned)(src[0] - src[5] + 2*QP) > 4*QP) return 0;
src += stride;
if((unsigned)(src[2] - src[7] + 2*QP) > 4*QP) return 0;
src += stride;
if((unsigned)(src[4] - src[1] + 2*QP) > 4*QP) return 0;
src += stride;
if((unsigned)(src[6] - src[3] + 2*QP) > 4*QP) return 0;
src += stride;
}
return 1;
}
static inline int isVertMinMaxOk_C(uint8_t src[], int stride, int QP)
{
int x;
src+= stride*4;
for(x=0; x<BLOCK_SIZE; x+=4){
if((unsigned)(src[ x + 0*stride] - src[ x + 5*stride] + 2*QP) > 4*QP) return 0;
if((unsigned)(src[1+x + 2*stride] - src[1+x + 7*stride] + 2*QP) > 4*QP) return 0;
if((unsigned)(src[2+x + 4*stride] - src[2+x + 1*stride] + 2*QP) > 4*QP) return 0;
if((unsigned)(src[3+x + 6*stride] - src[3+x + 3*stride] + 2*QP) > 4*QP) return 0;
}
return 1;
}
static inline int horizClassify_C(uint8_t src[], int stride, PPContext *c)
{
if( isHorizDC_C(src, stride, c) ){
if( isHorizMinMaxOk_C(src, stride, c->QP) )
return 1;
else
return 0;
}else{
return 2;
}
}
static inline int vertClassify_C(uint8_t src[], int stride, PPContext *c)
{
if( isVertDC_C(src, stride, c) ){
if( isVertMinMaxOk_C(src, stride, c->QP) )
return 1;
else
return 0;
}else{
return 2;
}
}
static inline void doHorizDefFilter_C(uint8_t dst[], int stride, PPContext *c)
{
int y;
for(y=0; y<BLOCK_SIZE; y++){
const int middleEnergy= 5*(dst[4] - dst[3]) + 2*(dst[2] - dst[5]);
if(FFABS(middleEnergy) < 8*c->QP){
const int q=(dst[3] - dst[4])/2;
const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
d= FFMAX(d, 0);
d= (5*d + 32) >> 6;
d*= FFSIGN(-middleEnergy);
if(q>0)
{
d= d<0 ? 0 : d;
d= d>q ? q : d;
}
else
{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
dst[3]-= d;
dst[4]+= d;
}
dst+= stride;
}
}
/**
* Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
* using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
*/
static inline void doHorizLowPass_C(uint8_t dst[], int stride, PPContext *c)
{
int y;
for(y=0; y<BLOCK_SIZE; y++){
const int first= FFABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
const int last= FFABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
int sums[10];
sums[0] = 4*first + dst[0] + dst[1] + dst[2] + 4;
sums[1] = sums[0] - first + dst[3];
sums[2] = sums[1] - first + dst[4];
sums[3] = sums[2] - first + dst[5];
sums[4] = sums[3] - first + dst[6];
sums[5] = sums[4] - dst[0] + dst[7];
sums[6] = sums[5] - dst[1] + last;
sums[7] = sums[6] - dst[2] + last;
sums[8] = sums[7] - dst[3] + last;
sums[9] = sums[8] - dst[4] + last;
dst[0]= (sums[0] + sums[2] + 2*dst[0])>>4;
dst[1]= (sums[1] + sums[3] + 2*dst[1])>>4;
dst[2]= (sums[2] + sums[4] + 2*dst[2])>>4;
dst[3]= (sums[3] + sums[5] + 2*dst[3])>>4;
dst[4]= (sums[4] + sums[6] + 2*dst[4])>>4;
dst[5]= (sums[5] + sums[7] + 2*dst[5])>>4;
dst[6]= (sums[6] + sums[8] + 2*dst[6])>>4;
dst[7]= (sums[7] + sums[9] + 2*dst[7])>>4;
dst+= stride;
}
}
/**
* Experimental Filter 1 (Horizontal)
* will not damage linear gradients
* Flat blocks should look like they were passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
* can only smooth blocks at the expected locations (it cannot smooth them if they did move)
* MMX2 version does correct clipping C version does not
* not identical with the vertical one
*/
static inline void horizX1Filter(uint8_t *src, int stride, int QP)
{
int y;
static uint64_t *lut= NULL;
if(lut==NULL)
{
int i;
lut = av_malloc(256*8);
for(i=0; i<256; i++)
{
int v= i < 128 ? 2*i : 2*(i-256);
/*
//Simulate 112242211 9-Tap filter
uint64_t a= (v/16) & 0xFF;
uint64_t b= (v/8) & 0xFF;
uint64_t c= (v/4) & 0xFF;
uint64_t d= (3*v/8) & 0xFF;
*/
//Simulate piecewise linear interpolation
uint64_t a= (v/16) & 0xFF;
uint64_t b= (v*3/16) & 0xFF;
uint64_t c= (v*5/16) & 0xFF;
uint64_t d= (7*v/16) & 0xFF;
uint64_t A= (0x100 - a)&0xFF;
uint64_t B= (0x100 - b)&0xFF;
uint64_t C= (0x100 - c)&0xFF;
uint64_t D= (0x100 - c)&0xFF;
lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
(D<<24) | (C<<16) | (B<<8) | (A);
//lut[i] = (v<<32) | (v<<24);
}
}
for(y=0; y<BLOCK_SIZE; y++){
int a= src[1] - src[2];
int b= src[3] - src[4];
int c= src[5] - src[6];
int d= FFMAX(FFABS(b) - (FFABS(a) + FFABS(c))/2, 0);
if(d < QP){
int v = d * FFSIGN(-b);
src[1] +=v/8;
src[2] +=v/4;
src[3] +=3*v/8;
src[4] -=3*v/8;
src[5] -=v/4;
src[6] -=v/8;
}
src+=stride;
}
}
/**
* accurate deblock filter
*/
static av_always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPContext *c){
int y;
const int QP= c->QP;
const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
const int dcThreshold= dcOffset*2 + 1;
//START_TIMER
src+= step*4; // src points to begin of the 8x8 Block
for(y=0; y<8; y++){
int numEq= 0;
if(((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold) numEq++;
if(numEq > c->ppMode.flatnessThreshold){
int min, max, x;
if(src[0] > src[step]){
max= src[0];
min= src[step];
}else{
max= src[step];
min= src[0];
}
for(x=2; x<8; x+=2){
if(src[x*step] > src[(x+1)*step]){
if(src[x *step] > max) max= src[ x *step];
if(src[(x+1)*step] < min) min= src[(x+1)*step];
}else{
if(src[(x+1)*step] > max) max= src[(x+1)*step];
if(src[ x *step] < min) min= src[ x *step];
}
}
if(max-min < 2*QP){
const int first= FFABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
const int last= FFABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
int sums[10];
sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;
sums[1] = sums[0] - first + src[3*step];
sums[2] = sums[1] - first + src[4*step];
sums[3] = sums[2] - first + src[5*step];
sums[4] = sums[3] - first + src[6*step];
sums[5] = sums[4] - src[0*step] + src[7*step];
sums[6] = sums[5] - src[1*step] + last;
sums[7] = sums[6] - src[2*step] + last;
sums[8] = sums[7] - src[3*step] + last;
sums[9] = sums[8] - src[4*step] + last;
src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4;
src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4;
src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4;
src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4;
src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4;
src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4;
src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4;
src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4;
}
}else{
const int middleEnergy= 5*(src[4*step] - src[3*step]) + 2*(src[2*step] - src[5*step]);
if(FFABS(middleEnergy) < 8*QP){
const int q=(src[3*step] - src[4*step])/2;
const int leftEnergy= 5*(src[2*step] - src[1*step]) + 2*(src[0*step] - src[3*step]);
const int rightEnergy= 5*(src[6*step] - src[5*step]) + 2*(src[4*step] - src[7*step]);
int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
d= FFMAX(d, 0);
d= (5*d + 32) >> 6;
d*= FFSIGN(-middleEnergy);
if(q>0){
d= d<0 ? 0 : d;
d= d>q ? q : d;
}else{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
src[3*step]-= d;
src[4*step]+= d;
}
}
src += stride;
}
/*if(step==16){
STOP_TIMER("step16")
}else{
STOP_TIMER("stepX")
}*/
}
//Note: we have C, MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one
//Plain C versions
#if !(HAVE_MMX || HAVE_ALTIVEC) || CONFIG_RUNTIME_CPUDETECT
#define COMPILE_C
#endif
#if HAVE_ALTIVEC
#define COMPILE_ALTIVEC
#endif //HAVE_ALTIVEC
#if ARCH_X86
#if (HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || CONFIG_RUNTIME_CPUDETECT
#define COMPILE_MMX
#endif
#if HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT
#define COMPILE_MMX2
#endif
#if (HAVE_AMD3DNOW && !HAVE_MMX2) || CONFIG_RUNTIME_CPUDETECT
#define COMPILE_3DNOW
#endif
#endif /* ARCH_X86 */
#undef HAVE_MMX
#define HAVE_MMX 0
#undef HAVE_MMX2
#define HAVE_MMX2 0
#undef HAVE_AMD3DNOW
#define HAVE_AMD3DNOW 0
#undef HAVE_ALTIVEC
#define HAVE_ALTIVEC 0
#ifdef COMPILE_C
#define RENAME(a) a ## _C
#include "postprocess_template.c"
#endif
#ifdef COMPILE_ALTIVEC
#undef RENAME
#undef HAVE_ALTIVEC
#define HAVE_ALTIVEC 1
#define RENAME(a) a ## _altivec
#include "postprocess_altivec_template.c"
#include "postprocess_template.c"
#endif
//MMX versions
#ifdef COMPILE_MMX
#undef RENAME
#undef HAVE_MMX
#define HAVE_MMX 1
#define RENAME(a) a ## _MMX
#include "postprocess_template.c"
#endif
//MMX2 versions
#ifdef COMPILE_MMX2
#undef RENAME
#undef HAVE_MMX
#undef HAVE_MMX2
#define HAVE_MMX 1
#define HAVE_MMX2 1
#define RENAME(a) a ## _MMX2
#include "postprocess_template.c"
#endif
//3DNOW versions
#ifdef COMPILE_3DNOW
#undef RENAME
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_AMD3DNOW
#define HAVE_MMX 1
#define HAVE_MMX2 0
#define HAVE_AMD3DNOW 1
#define RENAME(a) a ## _3DNow
#include "postprocess_template.c"
#endif
// minor note: the HAVE_xyz is messed up after that line so do not use it.
static inline void postProcess(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
const QP_STORE_T QPs[], int QPStride, int isColor, pp_mode *vm, pp_context *vc)
{
PPContext *c= (PPContext *)vc;
PPMode *ppMode= (PPMode *)vm;
c->ppMode= *ppMode; //FIXME
// Using ifs here as they are faster than function pointers although the
// difference would not be measurable here but it is much better because
// someone might exchange the CPU whithout restarting MPlayer ;)
#if CONFIG_RUNTIME_CPUDETECT
#if ARCH_X86
// ordered per speed fastest first
if(c->cpuCaps & PP_CPU_CAPS_MMX2)
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
else if(c->cpuCaps & PP_CPU_CAPS_3DNOW)
postProcess_3DNow(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
else if(c->cpuCaps & PP_CPU_CAPS_MMX)
postProcess_MMX(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
else
postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#else
#if HAVE_ALTIVEC
if(c->cpuCaps & PP_CPU_CAPS_ALTIVEC)
postProcess_altivec(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
else
#endif
postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#endif
#else /* CONFIG_RUNTIME_CPUDETECT */
#if HAVE_MMX2
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#elif HAVE_AMD3DNOW
postProcess_3DNow(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#elif HAVE_MMX
postProcess_MMX(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#elif HAVE_ALTIVEC
postProcess_altivec(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#else
postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#endif
#endif /* !CONFIG_RUNTIME_CPUDETECT */
}
//static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
// QP_STORE_T QPs[], int QPStride, int isColor, struct PPMode *ppMode);
/* -pp Command line Help
*/
const char pp_help[] =
"Available postprocessing filters:\n"
"Filters Options\n"
"short long name short long option Description\n"
"* * a autoq CPU power dependent enabler\n"
" c chrom chrominance filtering enabled\n"
" y nochrom chrominance filtering disabled\n"
" n noluma luma filtering disabled\n"
"hb hdeblock (2 threshold) horizontal deblocking filter\n"
" 1. difference factor: default=32, higher -> more deblocking\n"
" 2. flatness threshold: default=39, lower -> more deblocking\n"
" the h & v deblocking filters share these\n"
" so you can't set different thresholds for h / v\n"
"vb vdeblock (2 threshold) vertical deblocking filter\n"
"ha hadeblock (2 threshold) horizontal deblocking filter\n"
"va vadeblock (2 threshold) vertical deblocking filter\n"
"h1 x1hdeblock experimental h deblock filter 1\n"
"v1 x1vdeblock experimental v deblock filter 1\n"
"dr dering deringing filter\n"
"al autolevels automatic brightness / contrast\n"
" f fullyrange stretch luminance to (0..255)\n"
"lb linblenddeint linear blend deinterlacer\n"
"li linipoldeint linear interpolating deinterlace\n"
"ci cubicipoldeint cubic interpolating deinterlacer\n"
"md mediandeint median deinterlacer\n"
"fd ffmpegdeint ffmpeg deinterlacer\n"
"l5 lowpass5 FIR lowpass deinterlacer\n"
"de default hb:a,vb:a,dr:a\n"
"fa fast h1:a,v1:a,dr:a\n"
"ac ha:a:128:7,va:a,dr:a\n"
"tn tmpnoise (3 threshold) temporal noise reducer\n"
" 1. <= 2. <= 3. larger -> stronger filtering\n"
"fq forceQuant <quantizer> force quantizer\n"
"Usage:\n"
"<filterName>[:<option>[:<option>...]][[,|/][-]<filterName>[:<option>...]]...\n"
"long form example:\n"
"vdeblock:autoq/hdeblock:autoq/linblenddeint default,-vdeblock\n"
"short form example:\n"
"vb:a/hb:a/lb de,-vb\n"
"more examples:\n"
"tn:64:128:256\n"
"\n"
;
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality)
{
char temp[GET_MODE_BUFFER_SIZE];
char *p= temp;
static const char filterDelimiters[] = ",/";
static const char optionDelimiters[] = ":";
struct PPMode *ppMode;
char *filterToken;
ppMode= av_malloc(sizeof(PPMode));
ppMode->lumMode= 0;
ppMode->chromMode= 0;
ppMode->maxTmpNoise[0]= 700;
ppMode->maxTmpNoise[1]= 1500;
ppMode->maxTmpNoise[2]= 3000;
ppMode->maxAllowedY= 234;
ppMode->minAllowedY= 16;
ppMode->baseDcDiff= 256/8;
ppMode->flatnessThreshold= 56-16-1;
ppMode->maxClippedThreshold= 0.01;
ppMode->error=0;
memset(temp, 0, GET_MODE_BUFFER_SIZE);
av_strlcpy(temp, name, GET_MODE_BUFFER_SIZE - 1);
av_log(NULL, AV_LOG_DEBUG, "pp: %s\n", name);
for(;;){
char *filterName;
int q= 1000000; //PP_QUALITY_MAX;
int chrom=-1;
int luma=-1;
char *option;
char *options[OPTIONS_ARRAY_SIZE];
int i;
int filterNameOk=0;
int numOfUnknownOptions=0;
int enable=1; //does the user want us to enabled or disabled the filter
filterToken= strtok(p, filterDelimiters);
if(filterToken == NULL) break;
p+= strlen(filterToken) + 1; // p points to next filterToken
filterName= strtok(filterToken, optionDelimiters);
av_log(NULL, AV_LOG_DEBUG, "pp: %s::%s\n", filterToken, filterName);
if(*filterName == '-'){
enable=0;
filterName++;
}
for(;;){ //for all options
option= strtok(NULL, optionDelimiters);
if(option == NULL) break;
av_log(NULL, AV_LOG_DEBUG, "pp: option: %s\n", option);
if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
else if(!strcmp("noluma", option) || !strcmp("n", option)) luma=0;
else{
options[numOfUnknownOptions] = option;
numOfUnknownOptions++;
}
if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
}
options[numOfUnknownOptions] = NULL;
/* replace stuff from the replace Table */
for(i=0; replaceTable[2*i]!=NULL; i++){
if(!strcmp(replaceTable[2*i], filterName)){
int newlen= strlen(replaceTable[2*i + 1]);
int plen;
int spaceLeft;
if(p==NULL) p= temp, *p=0; //last filter
else p--, *p=','; //not last filter
plen= strlen(p);
spaceLeft= p - temp + plen;
if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE - 1){
ppMode->error++;
break;
}
memmove(p + newlen, p, plen+1);
memcpy(p, replaceTable[2*i + 1], newlen);
filterNameOk=1;
}
}
for(i=0; filters[i].shortName!=NULL; i++){
if( !strcmp(filters[i].longName, filterName)
|| !strcmp(filters[i].shortName, filterName)){
ppMode->lumMode &= ~filters[i].mask;
ppMode->chromMode &= ~filters[i].mask;
filterNameOk=1;
if(!enable) break; // user wants to disable it
if(q >= filters[i].minLumQuality && luma)
ppMode->lumMode|= filters[i].mask;
if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
if(q >= filters[i].minChromQuality)
ppMode->chromMode|= filters[i].mask;
if(filters[i].mask == LEVEL_FIX){
int o;
ppMode->minAllowedY= 16;
ppMode->maxAllowedY= 234;
for(o=0; options[o]!=NULL; o++){
if( !strcmp(options[o],"fullyrange")
||!strcmp(options[o],"f")){
ppMode->minAllowedY= 0;
ppMode->maxAllowedY= 255;
numOfUnknownOptions--;
}
}
}
else if(filters[i].mask == TEMP_NOISE_FILTER)
{
int o;
int numOfNoises=0;
for(o=0; options[o]!=NULL; o++){
char *tail;
ppMode->maxTmpNoise[numOfNoises]=
strtol(options[o], &tail, 0);
if(tail!=options[o]){
numOfNoises++;
numOfUnknownOptions--;
if(numOfNoises >= 3) break;
}
}
}
else if(filters[i].mask == V_DEBLOCK || filters[i].mask == H_DEBLOCK
|| filters[i].mask == V_A_DEBLOCK || filters[i].mask == H_A_DEBLOCK){
int o;
for(o=0; options[o]!=NULL && o<2; o++){
char *tail;
int val= strtol(options[o], &tail, 0);
if(tail==options[o]) break;
numOfUnknownOptions--;
if(o==0) ppMode->baseDcDiff= val;
else ppMode->flatnessThreshold= val;
}
}
else if(filters[i].mask == FORCE_QUANT){
int o;
ppMode->forcedQuant= 15;
for(o=0; options[o]!=NULL && o<1; o++){
char *tail;
int val= strtol(options[o], &tail, 0);
if(tail==options[o]) break;
numOfUnknownOptions--;
ppMode->forcedQuant= val;
}
}
}
}
if(!filterNameOk) ppMode->error++;
ppMode->error += numOfUnknownOptions;
}
av_log(NULL, AV_LOG_DEBUG, "pp: lumMode=%X, chromMode=%X\n", ppMode->lumMode, ppMode->chromMode);
if(ppMode->error){
av_log(NULL, AV_LOG_ERROR, "%d errors in postprocess string \"%s\"\n", ppMode->error, name);
av_free(ppMode);
return NULL;
}
return ppMode;
}
void pp_free_mode(pp_mode *mode){
av_free(mode);
}
static void reallocAlign(void **p, int alignment, int size){
av_free(*p);
*p= av_mallocz(size);
}
static void reallocBuffers(PPContext *c, int width, int height, int stride, int qpStride){
int mbWidth = (width+15)>>4;
int mbHeight= (height+15)>>4;
int i;
c->stride= stride;
c->qpStride= qpStride;
reallocAlign((void **)&c->tempDst, 8, stride*24);
reallocAlign((void **)&c->tempSrc, 8, stride*24);
reallocAlign((void **)&c->tempBlocks, 8, 2*16*8);
reallocAlign((void **)&c->yHistogram, 8, 256*sizeof(uint64_t));
for(i=0; i<256; i++)
c->yHistogram[i]= width*height/64*15/256;
for(i=0; i<3; i++){
//Note: The +17*1024 is just there so I do not have to worry about r/w over the end.
reallocAlign((void **)&c->tempBlurred[i], 8, stride*mbHeight*16 + 17*1024);
reallocAlign((void **)&c->tempBlurredPast[i], 8, 256*((height+7)&(~7))/2 + 17*1024);//FIXME size
}
reallocAlign((void **)&c->deintTemp, 8, 2*width+32);
reallocAlign((void **)&c->nonBQPTable, 8, qpStride*mbHeight*sizeof(QP_STORE_T));
reallocAlign((void **)&c->stdQPTable, 8, qpStride*mbHeight*sizeof(QP_STORE_T));
reallocAlign((void **)&c->forcedQPTable, 8, mbWidth*sizeof(QP_STORE_T));
}
static const char * context_to_name(void * ptr) {
return "postproc";
}
static const AVClass av_codec_context_class = { "Postproc", context_to_name, NULL };
pp_context *pp_get_context(int width, int height, int cpuCaps){
PPContext *c= av_malloc(sizeof(PPContext));
int stride= FFALIGN(width, 16); //assumed / will realloc if needed
int qpStride= (width+15)/16 + 2; //assumed / will realloc if needed
memset(c, 0, sizeof(PPContext));
c->av_class = &av_codec_context_class;
c->cpuCaps= cpuCaps;
if(cpuCaps&PP_FORMAT){
c->hChromaSubSample= cpuCaps&0x3;
c->vChromaSubSample= (cpuCaps>>4)&0x3;
}else{
c->hChromaSubSample= 1;
c->vChromaSubSample= 1;
}
reallocBuffers(c, width, height, stride, qpStride);
c->frameNum=-1;
return c;
}
void pp_free_context(void *vc){
PPContext *c = (PPContext*)vc;
int i;
for(i=0; i<3; i++) av_free(c->tempBlurred[i]);
for(i=0; i<3; i++) av_free(c->tempBlurredPast[i]);
av_free(c->tempBlocks);
av_free(c->yHistogram);
av_free(c->tempDst);
av_free(c->tempSrc);
av_free(c->deintTemp);
av_free(c->stdQPTable);
av_free(c->nonBQPTable);
av_free(c->forcedQPTable);
memset(c, 0, sizeof(PPContext));
av_free(c);
}
void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
uint8_t * dst[3], const int dstStride[3],
int width, int height,
const QP_STORE_T *QP_store, int QPStride,
pp_mode *vm, void *vc, int pict_type)
{
int mbWidth = (width+15)>>4;
int mbHeight= (height+15)>>4;
PPMode *mode = (PPMode*)vm;
PPContext *c = (PPContext*)vc;
int minStride= FFMAX(FFABS(srcStride[0]), FFABS(dstStride[0]));
int absQPStride = FFABS(QPStride);
// c->stride and c->QPStride are always positive
if(c->stride < minStride || c->qpStride < absQPStride)
reallocBuffers(c, width, height,
FFMAX(minStride, c->stride),
FFMAX(c->qpStride, absQPStride));
if(QP_store==NULL || (mode->lumMode & FORCE_QUANT)){
int i;
QP_store= c->forcedQPTable;
absQPStride = QPStride = 0;
if(mode->lumMode & FORCE_QUANT)
for(i=0; i<mbWidth; i++) c->forcedQPTable[i]= mode->forcedQuant;
else
for(i=0; i<mbWidth; i++) c->forcedQPTable[i]= 1;
}
if(pict_type & PP_PICT_TYPE_QP2){
int i;
const int count= mbHeight * absQPStride;
for(i=0; i<(count>>2); i++){
((uint32_t*)c->stdQPTable)[i] = (((const uint32_t*)QP_store)[i]>>1) & 0x7F7F7F7F;
}
for(i<<=2; i<count; i++){
c->stdQPTable[i] = QP_store[i]>>1;
}
QP_store= c->stdQPTable;
QPStride= absQPStride;
}
if(0){
int x,y;
for(y=0; y<mbHeight; y++){
for(x=0; x<mbWidth; x++){
av_log(c, AV_LOG_INFO, "%2d ", QP_store[x + y*QPStride]);
}
av_log(c, AV_LOG_INFO, "\n");
}
av_log(c, AV_LOG_INFO, "\n");
}
if((pict_type&7)!=3){
if (QPStride >= 0){
int i;
const int count= mbHeight * QPStride;
for(i=0; i<(count>>2); i++){
((uint32_t*)c->nonBQPTable)[i] = ((const uint32_t*)QP_store)[i] & 0x3F3F3F3F;
}
for(i<<=2; i<count; i++){
c->nonBQPTable[i] = QP_store[i] & 0x3F;
}
} else {
int i,j;
for(i=0; i<mbHeight; i++) {
for(j=0; j<absQPStride; j++) {
c->nonBQPTable[i*absQPStride+j] = QP_store[i*QPStride+j] & 0x3F;
}
}
}
}
av_log(c, AV_LOG_DEBUG, "using npp filters 0x%X/0x%X\n",
mode->lumMode, mode->chromMode);
postProcess(src[0], srcStride[0], dst[0], dstStride[0],
width, height, QP_store, QPStride, 0, mode, c);
width = (width )>>c->hChromaSubSample;
height = (height)>>c->vChromaSubSample;
if(mode->chromMode){
postProcess(src[1], srcStride[1], dst[1], dstStride[1],
width, height, QP_store, QPStride, 1, mode, c);
postProcess(src[2], srcStride[2], dst[2], dstStride[2],
width, height, QP_store, QPStride, 2, mode, c);
}
else if(srcStride[1] == dstStride[1] && srcStride[2] == dstStride[2]){
linecpy(dst[1], src[1], height, srcStride[1]);
linecpy(dst[2], src[2], height, srcStride[2]);
}else{
int y;
for(y=0; y<height; y++){
memcpy(&(dst[1][y*dstStride[1]]), &(src[1][y*srcStride[1]]), width);
memcpy(&(dst[2][y*dstStride[2]]), &(src[2][y*srcStride[2]]), width);
}
}
}
/*
* Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef POSTPROC_POSTPROCESS_H
#define POSTPROC_POSTPROCESS_H
/**
* @file
* @brief
* external postprocessing API
*/
#include "libavutil/avutil.h"
#define LIBPOSTPROC_VERSION_MAJOR 52
#define LIBPOSTPROC_VERSION_MINOR 0
#define LIBPOSTPROC_VERSION_MICRO 0
#define LIBPOSTPROC_VERSION_INT AV_VERSION_INT(LIBPOSTPROC_VERSION_MAJOR, \
LIBPOSTPROC_VERSION_MINOR, \
LIBPOSTPROC_VERSION_MICRO)
#define LIBPOSTPROC_VERSION AV_VERSION(LIBPOSTPROC_VERSION_MAJOR, \
LIBPOSTPROC_VERSION_MINOR, \
LIBPOSTPROC_VERSION_MICRO)
#define LIBPOSTPROC_BUILD LIBPOSTPROC_VERSION_INT
#define LIBPOSTPROC_IDENT "postproc" AV_STRINGIFY(LIBPOSTPROC_VERSION)
/**
* Return the LIBPOSTPROC_VERSION_INT constant.
*/
unsigned postproc_version(void);
/**
* Return the libpostproc build-time configuration.
*/
const char *postproc_configuration(void);
/**
* Return the libpostproc license.
*/
const char *postproc_license(void);
#define PP_QUALITY_MAX 6
#define QP_STORE_T int8_t
#include <inttypes.h>
typedef void pp_context;
typedef void pp_mode;
extern const char pp_help[]; ///< a simple help text
void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
uint8_t * dst[3], const int dstStride[3],
int horizontalSize, int verticalSize,
const QP_STORE_T *QP_store, int QP_stride,
pp_mode *mode, pp_context *ppContext, int pict_type);
/**
* Return a pp_mode or NULL if an error occurred.
*
* @param name the string after "-pp" on the command line
* @param quality a number from 0 to PP_QUALITY_MAX
*/
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
void pp_free_mode(pp_mode *mode);
pp_context *pp_get_context(int width, int height, int flags);
void pp_free_context(pp_context *ppContext);
#define PP_CPU_CAPS_MMX 0x80000000
#define PP_CPU_CAPS_MMX2 0x20000000
#define PP_CPU_CAPS_3DNOW 0x40000000
#define PP_CPU_CAPS_ALTIVEC 0x10000000
#define PP_FORMAT 0x00000008
#define PP_FORMAT_420 (0x00000011|PP_FORMAT)
#define PP_FORMAT_422 (0x00000001|PP_FORMAT)
#define PP_FORMAT_411 (0x00000002|PP_FORMAT)
#define PP_FORMAT_444 (0x00000000|PP_FORMAT)
#define PP_PICT_TYPE_QP2 0x00000010 ///< MPEG2 style QScale
#endif /* POSTPROC_POSTPROCESS_H */
/*
* AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
*
* based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avutil.h"
#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
do { \
__typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
__typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
__typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
__typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
tempA1 = vec_mergeh (src_a, src_e); \
tempB1 = vec_mergel (src_a, src_e); \
tempC1 = vec_mergeh (src_b, src_f); \
tempD1 = vec_mergel (src_b, src_f); \
tempE1 = vec_mergeh (src_c, src_g); \
tempF1 = vec_mergel (src_c, src_g); \
tempG1 = vec_mergeh (src_d, src_h); \
tempH1 = vec_mergel (src_d, src_h); \
tempA2 = vec_mergeh (tempA1, tempE1); \
tempB2 = vec_mergel (tempA1, tempE1); \
tempC2 = vec_mergeh (tempB1, tempF1); \
tempD2 = vec_mergel (tempB1, tempF1); \
tempE2 = vec_mergeh (tempC1, tempG1); \
tempF2 = vec_mergel (tempC1, tempG1); \
tempG2 = vec_mergeh (tempD1, tempH1); \
tempH2 = vec_mergel (tempD1, tempH1); \
src_a = vec_mergeh (tempA2, tempE2); \
src_b = vec_mergel (tempA2, tempE2); \
src_c = vec_mergeh (tempB2, tempF2); \
src_d = vec_mergel (tempB2, tempF2); \
src_e = vec_mergeh (tempC2, tempG2); \
src_f = vec_mergel (tempC2, tempG2); \
src_g = vec_mergeh (tempD2, tempH2); \
src_h = vec_mergel (tempD2, tempH2); \
} while (0)
static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true.
*/
short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
DECLARE_ALIGNED(16, short, data)[8] =
{
data_0,
data_0 * 2 + 1,
c->QP * 2,
c->QP * 4
};
int numEq;
uint8_t *src2 = src;
vector signed short v_dcOffset;
vector signed short v2QP;
vector unsigned short v4QP;
vector unsigned short v_dcThreshold;
const int properStride = (stride % 16);
const int srcAlign = ((unsigned long)src2 % 16);
const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
const vector signed int zero = vec_splat_s32(0);
const vector signed short mask = vec_splat_s16(1);
vector signed int v_numEq = vec_splat_s32(0);
vector signed short v_data = vec_ld(0, data);
vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
//FIXME avoid this mess if possible
register int j0 = 0,
j1 = stride,
j2 = 2 * stride,
j3 = 3 * stride,
j4 = 4 * stride,
j5 = 5 * stride,
j6 = 6 * stride,
j7 = 7 * stride;
vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
v_srcA4, v_srcA5, v_srcA6, v_srcA7;
v_dcOffset = vec_splat(v_data, 0);
v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
v2QP = vec_splat(v_data, 2);
v4QP = (vector unsigned short)vec_splat(v_data, 3);
src2 += stride * 4;
#define LOAD_LINE(i) \
{ \
vector unsigned char perm##i = vec_lvsl(j##i, src2); \
vector unsigned char v_srcA2##i; \
vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
if (two_vectors) \
v_srcA2##i = vec_ld(j##i + 16, src2); \
v_srcA##i = \
vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i); }
#define LOAD_LINE_ALIGNED(i) \
v_srcA##i = vec_ld(j##i, src2); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i)
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
LOAD_LINE_ALIGNED(2);
LOAD_LINE_ALIGNED(3);
LOAD_LINE_ALIGNED(4);
LOAD_LINE_ALIGNED(5);
LOAD_LINE_ALIGNED(6);
LOAD_LINE_ALIGNED(7);
} else {
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
}
#undef LOAD_LINE
#undef LOAD_LINE_ALIGNED
#define ITER(i, j) \
const vector signed short v_diff##i = \
vec_sub(v_srcAss##i, v_srcAss##j); \
const vector signed short v_sum##i = \
vec_add(v_diff##i, v_dcOffset); \
const vector signed short v_comp##i = \
(vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
v_dcThreshold); \
const vector signed short v_part##i = vec_and(mask, v_comp##i);
{
ITER(0, 1)
ITER(1, 2)
ITER(2, 3)
ITER(3, 4)
ITER(4, 5)
ITER(5, 6)
ITER(6, 7)
v_numEq = vec_sum4s(v_part0, v_numEq);
v_numEq = vec_sum4s(v_part1, v_numEq);
v_numEq = vec_sum4s(v_part2, v_numEq);
v_numEq = vec_sum4s(v_part3, v_numEq);
v_numEq = vec_sum4s(v_part4, v_numEq);
v_numEq = vec_sum4s(v_part5, v_numEq);
v_numEq = vec_sum4s(v_part6, v_numEq);
}
#undef ITER
v_numEq = vec_sums(v_numEq, zero);
v_numEq = vec_splat(v_numEq, 3);
vec_ste(v_numEq, 0, &numEq);
if (numEq > c->ppMode.flatnessThreshold){
const vector unsigned char mmoP1 = (const vector unsigned char)
{0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
const vector unsigned char mmoP2 = (const vector unsigned char)
{0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
const vector unsigned char mmoP = (const vector unsigned char)
vec_lvsl(8, (unsigned char*)0);
vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
vector signed short mmoDiff = vec_sub(mmoL, mmoR);
vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
if (vec_any_gt(mmoSum, v4QP))
return 0;
else
return 1;
}
else return 2;
}
static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src;
const vector signed int zero = vec_splat_s32(0);
const int properStride = (stride % 16);
const int srcAlign = ((unsigned long)src2 % 16);
DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
vector signed short vqp = vec_ld(0, qp);
vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
vector unsigned char perml0, perml1, perml2, perml3, perml4,
perml5, perml6, perml7, perml8, perml9;
register int j0 = 0,
j1 = stride,
j2 = 2 * stride,
j3 = 3 * stride,
j4 = 4 * stride,
j5 = 5 * stride,
j6 = 6 * stride,
j7 = 7 * stride,
j8 = 8 * stride,
j9 = 9 * stride;
vqp = vec_splat(vqp, 0);
src2 += stride*3;
#define LOAD_LINE(i) \
perml##i = vec_lvsl(i * stride, src2); \
vbA##i = vec_ld(i * stride, src2); \
vbB##i = vec_ld(i * stride + 16, src2); \
vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
#define LOAD_LINE_ALIGNED(i) \
vbT##i = vec_ld(j##i, src2); \
vb##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)vbT##i)
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
LOAD_LINE_ALIGNED(2);
LOAD_LINE_ALIGNED(3);
LOAD_LINE_ALIGNED(4);
LOAD_LINE_ALIGNED(5);
LOAD_LINE_ALIGNED(6);
LOAD_LINE_ALIGNED(7);
LOAD_LINE_ALIGNED(8);
LOAD_LINE_ALIGNED(9);
} else {
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
LOAD_LINE(8);
LOAD_LINE(9);
}
#undef LOAD_LINE
#undef LOAD_LINE_ALIGNED
{
const vector unsigned short v_2 = vec_splat_u16(2);
const vector unsigned short v_4 = vec_splat_u16(4);
const vector signed short v_diff01 = vec_sub(vb0, vb1);
const vector unsigned short v_cmp01 =
(const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
const vector signed short v_diff89 = vec_sub(vb8, vb9);
const vector unsigned short v_cmp89 =
(const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
const vector signed short temp02 = vec_add(vb2, vb3);
const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
const vector signed short v_sumsB0 = vec_add(temp02, temp03);
const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
const vector signed short v_sumsB1 = vec_add(temp11, vb4);
const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
const vector signed short v_sumsB2 = vec_add(temp21, vb5);
const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
const vector signed short v_sumsB3 = vec_add(temp31, vb6);
const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
const vector signed short v_sumsB4 = vec_add(temp41, vb7);
const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
const vector signed short v_sumsB5 = vec_add(temp51, vb8);
const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
const vector signed short v_sumsB6 = vec_add(temp61, v_last);
const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
const vector signed short v_sumsB7 = vec_add(temp71, v_last);
const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
const vector signed short v_sumsB8 = vec_add(temp81, v_last);
const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
const vector signed short v_sumsB9 = vec_add(temp91, v_last);
#define COMPUTE_VR(i, j, k) \
const vector signed short temps1##i = \
vec_add(v_sumsB##i, v_sumsB##k); \
const vector signed short temps2##i = \
vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
const vector signed short vr##j = vec_sra(temps2##i, v_4)
COMPUTE_VR(0, 1, 2);
COMPUTE_VR(1, 2, 3);
COMPUTE_VR(2, 3, 4);
COMPUTE_VR(3, 4, 5);
COMPUTE_VR(4, 5, 6);
COMPUTE_VR(5, 6, 7);
COMPUTE_VR(6, 7, 8);
COMPUTE_VR(7, 8, 9);
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
#define PACK_AND_STORE(i) \
{ const vector unsigned char perms##i = \
vec_lvsr(i * stride, src2); \
const vector unsigned char vf##i = \
vec_packsu(vr##i, (vector signed short)zero); \
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
vec_sel(vbA##i, vg2##i, mask##i); \
const vector unsigned char svB##i = \
vec_sel(vg2##i, vbB##i, mask##i); \
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2);}
#define PACK_AND_STORE_ALIGNED(i) \
{ const vector unsigned char vf##i = \
vec_packsu(vr##i, (vector signed short)zero); \
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
vec_st(vg##i, i * stride, src2);}
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
PACK_AND_STORE_ALIGNED(1)
PACK_AND_STORE_ALIGNED(2)
PACK_AND_STORE_ALIGNED(3)
PACK_AND_STORE_ALIGNED(4)
PACK_AND_STORE_ALIGNED(5)
PACK_AND_STORE_ALIGNED(6)
PACK_AND_STORE_ALIGNED(7)
PACK_AND_STORE_ALIGNED(8)
} else {
PACK_AND_STORE(1)
PACK_AND_STORE(2)
PACK_AND_STORE(3)
PACK_AND_STORE(4)
PACK_AND_STORE(5)
PACK_AND_STORE(6)
PACK_AND_STORE(7)
PACK_AND_STORE(8)
}
#undef PACK_AND_STORE
#undef PACK_AND_STORE_ALIGNED
}
}
static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src + stride*3;
const vector signed int zero = vec_splat_s32(0);
DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
vector signed short vqp = vec_splat(
(vector signed short)vec_ld(0, qp), 0);
#define LOAD_LINE(i) \
const vector unsigned char perm##i = \
vec_lvsl(i * stride, src2); \
const vector unsigned char vbA##i = \
vec_ld(i * stride, src2); \
const vector unsigned char vbB##i = \
vec_ld(i * stride + 16, src2); \
const vector unsigned char vbT##i = \
vec_perm(vbA##i, vbB##i, perm##i); \
const vector signed short vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
LOAD_LINE(8);
#undef LOAD_LINE
const vector signed short v_1 = vec_splat_s16(1);
const vector signed short v_2 = vec_splat_s16(2);
const vector signed short v_5 = vec_splat_s16(5);
const vector signed short v_32 = vec_sl(v_1,
(vector unsigned short)v_5);
/* middle energy */
const vector signed short l3minusl6 = vec_sub(vb3, vb6);
const vector signed short l5minusl4 = vec_sub(vb5, vb4);
const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
const vector signed short absmE = vec_abs(mE);
/* left & right energy */
const vector signed short l1minusl4 = vec_sub(vb1, vb4);
const vector signed short l3minusl2 = vec_sub(vb3, vb2);
const vector signed short l5minusl8 = vec_sub(vb5, vb8);
const vector signed short l7minusl6 = vec_sub(vb7, vb6);
const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
/* d */
const vector signed short ddiff = vec_sub(absmE,
vec_min(vec_abs(lE),
vec_abs(rE)));
const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
const vector signed short minusd = vec_sub((vector signed short)zero, d);
const vector signed short finald = vec_sel(minusd,
d,
vec_cmpgt(vec_sub((vector signed short)zero, mE),
(vector signed short)zero));
/* q */
const vector signed short qtimes2 = vec_sub(vb4, vb5);
/* for a shift right to behave like /2, we need to add one
to all negative integer */
const vector signed short rounddown = vec_sel((vector signed short)zero,
v_1,
vec_cmplt(qtimes2, (vector signed short)zero));
const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
/* clamp */
const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
const vector signed short dclamp_P = vec_min(dclamp_P1, q);
const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
const vector signed short dclamp_N = vec_max(dclamp_N1, q);
const vector signed short dclampedfinal = vec_sel(dclamp_N,
dclamp_P,
vec_cmpgt(q, (vector signed short)zero));
const vector signed short dornotd = vec_sel((vector signed short)zero,
dclampedfinal,
vec_cmplt(absmE, vqp));
/* add/subtract to l4 and l5 */
const vector signed short vb4minusd = vec_sub(vb4, dornotd);
const vector signed short vb5plusd = vec_add(vb5, dornotd);
/* finally, stores */
const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
#define STORE(i) \
{ const vector unsigned char perms##i = \
vec_lvsr(i * stride, src2); \
const vector unsigned char vg##i = \
vec_perm(st##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
vec_sel(vbA##i, vg2##i, mask##i); \
const vector unsigned char svB##i = \
vec_sel(vg2##i, vbB##i, mask##i); \
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2);}
STORE(4)
STORE(5)
}
static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
const vector signed int vsint32_8 = vec_splat_s32(8);
const vector unsigned int vuint32_4 = vec_splat_u32(4);
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permA1 = (vector unsigned char)
{0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
const vector unsigned char permA2 = (vector unsigned char)
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
const vector unsigned char permA1inc = (vector unsigned char)
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char permA2inc = (vector unsigned char)
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char magic = (vector unsigned char)
{0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char extractPerm = (vector unsigned char)
{0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
const vector unsigned char extractPermInc = (vector unsigned char)
{0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
const vector unsigned char tenRight = (vector unsigned char)
{0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char eightLeft = (vector unsigned char)
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *srcCopy = src;
DECLARE_ALIGNED(16, uint8_t, dt)[16] = { deringThreshold };
const vector signed int zero = vec_splat_s32(0);
vector unsigned char v_dt = vec_splat(vec_ld(0, dt), 0);
#define LOAD_LINE(i) \
const vector unsigned char perm##i = \
vec_lvsl(i * stride, srcCopy); \
vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
LOAD_LINE(8);
LOAD_LINE(9);
#undef LOAD_LINE
vector unsigned char v_avg;
DECLARE_ALIGNED(16, signed int, S)[8];
DECLARE_ALIGNED(16, int, tQP2)[4] = { c->QP/2 + 1 };
vector signed int vQP2 = vec_ld(0, tQP2);
vQP2 = vec_splat(vQP2, 0);
{
const vector unsigned char trunc_perm = (vector unsigned char)
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
#define EXTRACT(op) do { \
const vector unsigned char s_1 = vec_##op(trunc_src12, trunc_src34); \
const vector unsigned char s_2 = vec_##op(trunc_src56, trunc_src78); \
const vector unsigned char s_6 = vec_##op(s_1, s_2); \
const vector unsigned char s_8h = vec_mergeh(s_6, s_6); \
const vector unsigned char s_8l = vec_mergel(s_6, s_6); \
const vector unsigned char s_9 = vec_##op(s_8h, s_8l); \
const vector unsigned char s_9h = vec_mergeh(s_9, s_9); \
const vector unsigned char s_9l = vec_mergel(s_9, s_9); \
const vector unsigned char s_10 = vec_##op(s_9h, s_9l); \
const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
const vector unsigned char s_11 = vec_##op(s_10h, s_10l); \
const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
v_##op = vec_##op(s_11h, s_11l); \
} while (0)
vector unsigned char v_min;
vector unsigned char v_max;
EXTRACT(min);
EXTRACT(max);
#undef EXTRACT
if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
return;
v_avg = vec_avg(v_min, v_max);
}
{
const vector unsigned short mask1 = (vector unsigned short)
{0x0001, 0x0002, 0x0004, 0x0008,
0x0010, 0x0020, 0x0040, 0x0080};
const vector unsigned short mask2 = (vector unsigned short)
{0x0100, 0x0200, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000};
const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
const vector unsigned int vuint32_1 = vec_splat_u32(1);
vector signed int sumA2;
vector signed int sumB2;
vector signed int sum0, sum1, sum2, sum3, sum4;
vector signed int sum5, sum6, sum7, sum8, sum9;
#define COMPARE(i) \
do { \
const vector unsigned char cmp = \
(vector unsigned char)vec_cmpgt(src##i, v_avg); \
const vector unsigned short cmpHi = \
(vector unsigned short)vec_mergeh(cmp, cmp); \
const vector unsigned short cmpLi = \
(vector unsigned short)vec_mergel(cmp, cmp); \
const vector signed short cmpHf = \
(vector signed short)vec_and(cmpHi, mask1); \
const vector signed short cmpLf = \
(vector signed short)vec_and(cmpLi, mask2); \
const vector signed int sump = vec_sum4s(cmpHf, zero); \
const vector signed int sumq = vec_sum4s(cmpLf, sump); \
sum##i = vec_sums(sumq, zero); \
} while (0)
COMPARE(0);
COMPARE(1);
COMPARE(2);
COMPARE(3);
COMPARE(4);
COMPARE(5);
COMPARE(6);
COMPARE(7);
COMPARE(8);
COMPARE(9);
#undef COMPARE
{
const vector signed int sump02 = vec_mergel(sum0, sum2);
const vector signed int sump13 = vec_mergel(sum1, sum3);
const vector signed int sumA = vec_mergel(sump02, sump13);
const vector signed int sump46 = vec_mergel(sum4, sum6);
const vector signed int sump57 = vec_mergel(sum5, sum7);
const vector signed int sumB = vec_mergel(sump46, sump57);
const vector signed int sump8A = vec_mergel(sum8, zero);
const vector signed int sump9B = vec_mergel(sum9, zero);
const vector signed int sumC = vec_mergel(sump8A, sump9B);
const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
const vector signed int t2A = vec_or(sumA, tA);
const vector signed int t2B = vec_or(sumB, tB);
const vector signed int t2C = vec_or(sumC, tC);
const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
vec_sl(t2A, vuint32_1));
const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
vec_sl(t2B, vuint32_1));
const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
vec_sl(t2C, vuint32_1));
const vector signed int yA = vec_and(t2A, t3A);
const vector signed int yB = vec_and(t2B, t3B);
const vector signed int yC = vec_and(t2C, t3C);
const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
const vector signed int sumAp = vec_and(yA,
vec_and(sumAd4,sumAd8));
const vector signed int sumBp = vec_and(yB,
vec_and(sumBd4,sumBd8));
sumA2 = vec_or(sumAp,
vec_sra(sumAp,
vuint32_16));
sumB2 = vec_or(sumBp,
vec_sra(sumBp,
vuint32_16));
}
vec_st(sumA2, 0, S);
vec_st(sumB2, 16, S);
}
/* I'm not sure the following is actually faster
than straight, unvectorized C code :-( */
#define F_INIT() \
vector unsigned char tenRightM = tenRight; \
vector unsigned char permA1M = permA1; \
vector unsigned char permA2M = permA2; \
vector unsigned char extractPermM = extractPerm
#define F2(i, j, k, l) \
if (S[i] & (1 << (l+1))) { \
const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
const vector signed int a_sump = \
(vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
const vector signed int p = \
(vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
extractPermM); \
const vector signed int sum = vec_add(p, vQP2); \
const vector signed int diff = vec_sub(p, vQP2); \
vector signed int newpm; \
vector unsigned char newpm2, mask; \
F = vec_splat(F, 3); \
if (vec_all_lt(sum, F)) \
newpm = sum; \
else if (vec_all_gt(diff, F)) \
newpm = diff; \
else newpm = F; \
newpm2 = vec_splat((vector unsigned char)newpm, 15); \
mask = vec_add(identity, tenRightM); \
src##j = vec_perm(src##j, newpm2, mask); \
} \
permA1M = vec_add(permA1M, permA1inc); \
permA2M = vec_add(permA2M, permA2inc); \
tenRightM = vec_sro(tenRightM, eightLeft); \
extractPermM = vec_add(extractPermM, extractPermInc)
#define ITER(i, j, k) do { \
F_INIT(); \
F2(i, j, k, 0); \
F2(i, j, k, 1); \
F2(i, j, k, 2); \
F2(i, j, k, 3); \
F2(i, j, k, 4); \
F2(i, j, k, 5); \
F2(i, j, k, 6); \
F2(i, j, k, 7); \
} while (0)
ITER(0, 1, 2);
ITER(1, 2, 3);
ITER(2, 3, 4);
ITER(3, 4, 5);
ITER(4, 5, 6);
ITER(5, 6, 7);
ITER(6, 7, 8);
ITER(7, 8, 9);
#define STORE_LINE(i) do { \
const vector unsigned char permST = \
vec_lvsr(i * stride, srcCopy); \
const vector unsigned char maskST = \
vec_perm((vector unsigned char)zero, \
(vector unsigned char)neg1, permST); \
src##i = vec_perm(src##i ,src##i, permST); \
sA##i= vec_sel(sA##i, src##i, maskST); \
sB##i= vec_sel(src##i, sB##i, maskST); \
vec_st(sA##i, i * stride, srcCopy); \
vec_st(sB##i, i * stride + 16, srcCopy); \
} while (0)
STORE_LINE(1);
STORE_LINE(2);
STORE_LINE(3);
STORE_LINE(4);
STORE_LINE(5);
STORE_LINE(6);
STORE_LINE(7);
STORE_LINE(8);
#undef STORE_LINE
#undef ITER
#undef F2
}
#define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
#define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
#define do_a_deblock_altivec(a...) do_a_deblock_C(a)
static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
{
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
const vector signed int zero = vec_splat_s32(0);
const vector signed short vsint16_1 = vec_splat_s16(1);
vector signed int v_dp = zero;
vector signed int v_sysdp = zero;
int d, sysd, i;
#define LOAD_LINE(src, i) \
register int j##src##i = i * stride; \
vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
const vector unsigned char v_##src##A##i = \
vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
vector signed short v_##src##Ass##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_##src##A##i)
LOAD_LINE(src, 0);
LOAD_LINE(src, 1);
LOAD_LINE(src, 2);
LOAD_LINE(src, 3);
LOAD_LINE(src, 4);
LOAD_LINE(src, 5);
LOAD_LINE(src, 6);
LOAD_LINE(src, 7);
LOAD_LINE(tempBlurred, 0);
LOAD_LINE(tempBlurred, 1);
LOAD_LINE(tempBlurred, 2);
LOAD_LINE(tempBlurred, 3);
LOAD_LINE(tempBlurred, 4);
LOAD_LINE(tempBlurred, 5);
LOAD_LINE(tempBlurred, 6);
LOAD_LINE(tempBlurred, 7);
#undef LOAD_LINE
#define ACCUMULATE_DIFFS(i) do { \
vector signed short v_d = vec_sub(v_tempBlurredAss##i, \
v_srcAss##i); \
v_dp = vec_msums(v_d, v_d, v_dp); \
v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp); \
} while (0)
ACCUMULATE_DIFFS(0);
ACCUMULATE_DIFFS(1);
ACCUMULATE_DIFFS(2);
ACCUMULATE_DIFFS(3);
ACCUMULATE_DIFFS(4);
ACCUMULATE_DIFFS(5);
ACCUMULATE_DIFFS(6);
ACCUMULATE_DIFFS(7);
#undef ACCUMULATE_DIFFS
tempBlurredPast[127]= maxNoise[0];
tempBlurredPast[128]= maxNoise[1];
tempBlurredPast[129]= maxNoise[2];
v_dp = vec_sums(v_dp, zero);
v_sysdp = vec_sums(v_sysdp, zero);
v_dp = vec_splat(v_dp, 3);
v_sysdp = vec_splat(v_sysdp, 3);
vec_ste(v_dp, 0, &d);
vec_ste(v_sysdp, 0, &sysd);
i = d;
d = (4*d
+(*(tempBlurredPast-256))
+(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
+(*(tempBlurredPast+256))
+4)>>3;
*tempBlurredPast=i;
if (d > maxNoise[1]) {
if (d < maxNoise[2]) {
#define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
} else {
#define OP(i) v_tempBlurredAss##i = v_srcAss##i;
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
}
} else {
if (d < maxNoise[0]) {
const vector signed short vsint16_7 = vec_splat_s16(7);
const vector signed short vsint16_4 = vec_splat_s16(4);
const vector unsigned short vuint16_3 = vec_splat_u16(3);
#define OP(i) do { \
const vector signed short v_temp = \
vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3); \
} while (0)
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
} else {
const vector signed short vsint16_3 = vec_splat_s16(3);
const vector signed short vsint16_2 = vec_splat_s16(2);
#define OP(i) do { \
const vector signed short v_temp = \
vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
v_tempBlurredAss##i = \
vec_sr(v_temp2, (vector unsigned short)vsint16_2); \
} while (0)
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
}
}
#define PACK_AND_STORE(src, i) do { \
const vector unsigned char perms = vec_lvsr(i * stride, src); \
const vector unsigned char vf = \
vec_packsu(v_tempBlurredAss##1, (vector signed short)zero); \
const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
const vector unsigned char mask = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
const vector unsigned char vg2 = vec_perm(vg, vg, perms); \
const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
vec_st(svA, i * stride, src); \
vec_st(svB, i * stride + 16, src); \
} while (0)
PACK_AND_STORE(src, 0);
PACK_AND_STORE(src, 1);
PACK_AND_STORE(src, 2);
PACK_AND_STORE(src, 3);
PACK_AND_STORE(src, 4);
PACK_AND_STORE(src, 5);
PACK_AND_STORE(src, 6);
PACK_AND_STORE(src, 7);
PACK_AND_STORE(tempBlurred, 0);
PACK_AND_STORE(tempBlurred, 1);
PACK_AND_STORE(tempBlurred, 2);
PACK_AND_STORE(tempBlurred, 3);
PACK_AND_STORE(tempBlurred, 4);
PACK_AND_STORE(tempBlurred, 5);
PACK_AND_STORE(tempBlurred, 6);
PACK_AND_STORE(tempBlurred, 7);
#undef PACK_AND_STORE
}
static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
const vector unsigned char zero = vec_splat_u8(0);
#define LOAD_DOUBLE_LINE(i, j) \
vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
vector unsigned char srcA##i = vec_ld(i * stride, src); \
vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
vector unsigned char srcC##i = vec_ld(j * stride, src); \
vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
LOAD_DOUBLE_LINE(0, 1);
LOAD_DOUBLE_LINE(2, 3);
LOAD_DOUBLE_LINE(4, 5);
LOAD_DOUBLE_LINE(6, 7);
#undef LOAD_DOUBLE_LINE
vector unsigned char tempA = vec_mergeh(src0, zero);
vector unsigned char tempB = vec_mergel(src0, zero);
vector unsigned char tempC = vec_mergeh(src1, zero);
vector unsigned char tempD = vec_mergel(src1, zero);
vector unsigned char tempE = vec_mergeh(src2, zero);
vector unsigned char tempF = vec_mergel(src2, zero);
vector unsigned char tempG = vec_mergeh(src3, zero);
vector unsigned char tempH = vec_mergel(src3, zero);
vector unsigned char tempI = vec_mergeh(src4, zero);
vector unsigned char tempJ = vec_mergel(src4, zero);
vector unsigned char tempK = vec_mergeh(src5, zero);
vector unsigned char tempL = vec_mergel(src5, zero);
vector unsigned char tempM = vec_mergeh(src6, zero);
vector unsigned char tempN = vec_mergel(src6, zero);
vector unsigned char tempO = vec_mergeh(src7, zero);
vector unsigned char tempP = vec_mergel(src7, zero);
vector unsigned char temp0 = vec_mergeh(tempA, tempI);
vector unsigned char temp1 = vec_mergel(tempA, tempI);
vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
vector unsigned char temp3 = vec_mergel(tempB, tempJ);
vector unsigned char temp4 = vec_mergeh(tempC, tempK);
vector unsigned char temp5 = vec_mergel(tempC, tempK);
vector unsigned char temp6 = vec_mergeh(tempD, tempL);
vector unsigned char temp7 = vec_mergel(tempD, tempL);
vector unsigned char temp8 = vec_mergeh(tempE, tempM);
vector unsigned char temp9 = vec_mergel(tempE, tempM);
vector unsigned char temp10 = vec_mergeh(tempF, tempN);
vector unsigned char temp11 = vec_mergel(tempF, tempN);
vector unsigned char temp12 = vec_mergeh(tempG, tempO);
vector unsigned char temp13 = vec_mergel(tempG, tempO);
vector unsigned char temp14 = vec_mergeh(tempH, tempP);
vector unsigned char temp15 = vec_mergel(tempH, tempP);
tempA = vec_mergeh(temp0, temp8);
tempB = vec_mergel(temp0, temp8);
tempC = vec_mergeh(temp1, temp9);
tempD = vec_mergel(temp1, temp9);
tempE = vec_mergeh(temp2, temp10);
tempF = vec_mergel(temp2, temp10);
tempG = vec_mergeh(temp3, temp11);
tempH = vec_mergel(temp3, temp11);
tempI = vec_mergeh(temp4, temp12);
tempJ = vec_mergel(temp4, temp12);
tempK = vec_mergeh(temp5, temp13);
tempL = vec_mergel(temp5, temp13);
tempM = vec_mergeh(temp6, temp14);
tempN = vec_mergel(temp6, temp14);
tempO = vec_mergeh(temp7, temp15);
tempP = vec_mergel(temp7, temp15);
temp0 = vec_mergeh(tempA, tempI);
temp1 = vec_mergel(tempA, tempI);
temp2 = vec_mergeh(tempB, tempJ);
temp3 = vec_mergel(tempB, tempJ);
temp4 = vec_mergeh(tempC, tempK);
temp5 = vec_mergel(tempC, tempK);
temp6 = vec_mergeh(tempD, tempL);
temp7 = vec_mergel(tempD, tempL);
temp8 = vec_mergeh(tempE, tempM);
temp9 = vec_mergel(tempE, tempM);
temp10 = vec_mergeh(tempF, tempN);
temp11 = vec_mergel(tempF, tempN);
temp12 = vec_mergeh(tempG, tempO);
temp13 = vec_mergel(tempG, tempO);
temp14 = vec_mergeh(tempH, tempP);
temp15 = vec_mergel(tempH, tempP);
vec_st(temp0, 0, dst);
vec_st(temp1, 16, dst);
vec_st(temp2, 32, dst);
vec_st(temp3, 48, dst);
vec_st(temp4, 64, dst);
vec_st(temp5, 80, dst);
vec_st(temp6, 96, dst);
vec_st(temp7, 112, dst);
vec_st(temp8, 128, dst);
vec_st(temp9, 144, dst);
vec_st(temp10, 160, dst);
vec_st(temp11, 176, dst);
vec_st(temp12, 192, dst);
vec_st(temp13, 208, dst);
vec_st(temp14, 224, dst);
vec_st(temp15, 240, dst);
}
static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
const vector unsigned char zero = vec_splat_u8(0);
const vector signed char neg1 = vec_splat_s8(-1);
#define LOAD_DOUBLE_LINE(i, j) \
vector unsigned char src##i = vec_ld(i * 16, src); \
vector unsigned char src##j = vec_ld(j * 16, src)
LOAD_DOUBLE_LINE(0, 1);
LOAD_DOUBLE_LINE(2, 3);
LOAD_DOUBLE_LINE(4, 5);
LOAD_DOUBLE_LINE(6, 7);
LOAD_DOUBLE_LINE(8, 9);
LOAD_DOUBLE_LINE(10, 11);
LOAD_DOUBLE_LINE(12, 13);
LOAD_DOUBLE_LINE(14, 15);
#undef LOAD_DOUBLE_LINE
vector unsigned char tempA = vec_mergeh(src0, src8);
vector unsigned char tempB;
vector unsigned char tempC = vec_mergeh(src1, src9);
vector unsigned char tempD;
vector unsigned char tempE = vec_mergeh(src2, src10);
vector unsigned char tempG = vec_mergeh(src3, src11);
vector unsigned char tempI = vec_mergeh(src4, src12);
vector unsigned char tempJ;
vector unsigned char tempK = vec_mergeh(src5, src13);
vector unsigned char tempL;
vector unsigned char tempM = vec_mergeh(src6, src14);
vector unsigned char tempO = vec_mergeh(src7, src15);
vector unsigned char temp0 = vec_mergeh(tempA, tempI);
vector unsigned char temp1 = vec_mergel(tempA, tempI);
vector unsigned char temp2;
vector unsigned char temp3;
vector unsigned char temp4 = vec_mergeh(tempC, tempK);
vector unsigned char temp5 = vec_mergel(tempC, tempK);
vector unsigned char temp6;
vector unsigned char temp7;
vector unsigned char temp8 = vec_mergeh(tempE, tempM);
vector unsigned char temp9 = vec_mergel(tempE, tempM);
vector unsigned char temp12 = vec_mergeh(tempG, tempO);
vector unsigned char temp13 = vec_mergel(tempG, tempO);
tempA = vec_mergeh(temp0, temp8);
tempB = vec_mergel(temp0, temp8);
tempC = vec_mergeh(temp1, temp9);
tempD = vec_mergel(temp1, temp9);
tempI = vec_mergeh(temp4, temp12);
tempJ = vec_mergel(temp4, temp12);
tempK = vec_mergeh(temp5, temp13);
tempL = vec_mergel(temp5, temp13);
temp0 = vec_mergeh(tempA, tempI);
temp1 = vec_mergel(tempA, tempI);
temp2 = vec_mergeh(tempB, tempJ);
temp3 = vec_mergel(tempB, tempJ);
temp4 = vec_mergeh(tempC, tempK);
temp5 = vec_mergel(tempC, tempK);
temp6 = vec_mergeh(tempD, tempL);
temp7 = vec_mergel(tempD, tempL);
#define STORE_DOUBLE_LINE(i, j) do { \
vector unsigned char dstAi = vec_ld(i * stride, dst); \
vector unsigned char dstBi = vec_ld(i * stride + 16, dst); \
vector unsigned char dstAj = vec_ld(j * stride, dst); \
vector unsigned char dstBj = vec_ld(j * stride+ 16, dst); \
vector unsigned char aligni = vec_lvsr(i * stride, dst); \
vector unsigned char alignj = vec_lvsr(j * stride, dst); \
vector unsigned char maski = \
vec_perm(zero, (vector unsigned char)neg1, aligni); \
vector unsigned char maskj = \
vec_perm(zero, (vector unsigned char)neg1, alignj); \
vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni); \
vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj); \
vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski); \
vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski); \
vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj); \
vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj); \
vec_st(dstAFi, i * stride, dst); \
vec_st(dstBFi, i * stride + 16, dst); \
vec_st(dstAFj, j * stride, dst); \
vec_st(dstBFj, j * stride + 16, dst); \
} while (0)
STORE_DOUBLE_LINE(0,1);
STORE_DOUBLE_LINE(2,3);
STORE_DOUBLE_LINE(4,5);
STORE_DOUBLE_LINE(6,7);
}
/*
* Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* internal api header.
*/
#ifndef POSTPROC_POSTPROCESS_INTERNAL_H
#define POSTPROC_POSTPROCESS_INTERNAL_H
#include <string.h>
#include "libavutil/avutil.h"
#include "libavutil/log.h"
#include "postprocess.h"
#define V_DEBLOCK 0x01
#define H_DEBLOCK 0x02
#define DERING 0x04
#define LEVEL_FIX 0x08 ///< Brightness & Contrast
#define LUM_V_DEBLOCK V_DEBLOCK // 1
#define LUM_H_DEBLOCK H_DEBLOCK // 2
#define CHROM_V_DEBLOCK (V_DEBLOCK<<4) // 16
#define CHROM_H_DEBLOCK (H_DEBLOCK<<4) // 32
#define LUM_DERING DERING // 4
#define CHROM_DERING (DERING<<4) // 64
#define LUM_LEVEL_FIX LEVEL_FIX // 8
#define CHROM_LEVEL_FIX (LEVEL_FIX<<4) // 128 (not implemented yet)
// Experimental vertical filters
#define V_X1_FILTER 0x0200 // 512
#define V_A_DEBLOCK 0x0400
// Experimental horizontal filters
#define H_X1_FILTER 0x2000 // 8192
#define H_A_DEBLOCK 0x4000
/// select between full y range (255-0) or standart one (234-16)
#define FULL_Y_RANGE 0x8000 // 32768
//Deinterlacing Filters
#define LINEAR_IPOL_DEINT_FILTER 0x10000 // 65536
#define LINEAR_BLEND_DEINT_FILTER 0x20000 // 131072
#define CUBIC_BLEND_DEINT_FILTER 0x8000 // (not implemented yet)
#define CUBIC_IPOL_DEINT_FILTER 0x40000 // 262144
#define MEDIAN_DEINT_FILTER 0x80000 // 524288
#define FFMPEG_DEINT_FILTER 0x400000
#define LOWPASS5_DEINT_FILTER 0x800000
#define TEMP_NOISE_FILTER 0x100000
#define FORCE_QUANT 0x200000
//use if you want a faster postprocessing code
//cannot differentiate between chroma & luma filters (both on or both off)
//obviously the -pp option on the command line has no effect except turning the here selected
//filters on
//#define COMPILE_TIME_MODE 0x77
static inline int CLIP(int a){
if(a&256) return ((a)>>31)^(-1);
else return a;
}
/**
* Postprocessng filter.
*/
struct PPFilter{
const char *shortName;
const char *longName;
int chromDefault; ///< is chrominance filtering on by default if this filter is manually activated
int minLumQuality; ///< minimum quality to turn luminance filtering on
int minChromQuality; ///< minimum quality to turn chrominance filtering on
int mask; ///< Bitmask to turn this filter on
};
/**
* Postprocessng mode.
*/
typedef struct PPMode{
int lumMode; ///< acivates filters for luminance
int chromMode; ///< acivates filters for chrominance
int error; ///< non zero on error
int minAllowedY; ///< for brigtness correction
int maxAllowedY; ///< for brihtness correction
float maxClippedThreshold; ///< amount of "black" you are willing to lose to get a brightness-corrected picture
int maxTmpNoise[3]; ///< for Temporal Noise Reducing filter (Maximal sum of abs differences)
int baseDcDiff;
int flatnessThreshold;
int forcedQuant; ///< quantizer if FORCE_QUANT is used
} PPMode;
/**
* postprocess context.
*/
typedef struct PPContext{
/**
* info on struct for av_log
*/
const AVClass *av_class;
uint8_t *tempBlocks; ///<used for the horizontal code
/**
* luma histogram.
* we need 64bit here otherwise we'll going to have a problem
* after watching a black picture for 5 hours
*/
uint64_t *yHistogram;
DECLARE_ALIGNED(8, uint64_t, packedYOffset);
DECLARE_ALIGNED(8, uint64_t, packedYScale);
/** Temporal noise reducing buffers */
uint8_t *tempBlurred[3];
int32_t *tempBlurredPast[3];
/** Temporary buffers for handling the last row(s) */
uint8_t *tempDst;
uint8_t *tempSrc;
uint8_t *deintTemp;
DECLARE_ALIGNED(8, uint64_t, pQPb);
DECLARE_ALIGNED(8, uint64_t, pQPb2);
DECLARE_ALIGNED(8, uint64_t, mmxDcOffset)[64];
DECLARE_ALIGNED(8, uint64_t, mmxDcThreshold)[64];
QP_STORE_T *stdQPTable; ///< used to fix MPEG2 style qscale
QP_STORE_T *nonBQPTable;
QP_STORE_T *forcedQPTable;
int QP;
int nonBQP;
int frameNum;
int cpuCaps;
int qpStride; ///<size of qp buffers (needed to realloc them if needed)
int stride; ///<size of some buffers (needed to realloc them if needed)
int hChromaSubSample;
int vChromaSubSample;
PPMode ppMode;
} PPContext;
static inline void linecpy(void *dest, const void *src, int lines, int stride) {
if (stride > 0) {
memcpy(dest, src, lines*stride);
} else {
memcpy((uint8_t*)dest+(lines-1)*stride, (const uint8_t*)src+(lines-1)*stride, -lines*stride);
}
}
#endif /* POSTPROC_POSTPROCESS_INTERNAL_H */
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment