Commit 87f40364 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master: (21 commits)
  build: simplify commands for clean target
  swscale: split swscale.c in unscaled and generic conversion routines.
  swscale: cosmetics.
  swscale: integrate (literally) swscale_template.c in swscale.c.
  swscale: split out x86/swscale_template.c from swscale.c.
  swscale: enable hScale_altivec_real.
  swscale: split out ppc _template.c files from main swscale.c.
  swscale: remove indirections in ppc/swscale_template.c.
  swscale: split out unscaled altivec YUV converters in their own file.
  mpegvideoenc: fix multislice fate tests with threading disabled.
  mpegts: Wrap #ifdef DEBUG and av_hex_dump_log() combination in a macro.
  build: Simplify texi2html invocation through the --output option.
  Mark some variables with av_unused
  Replace avcodec_get_pix_fmt_name() by av_get_pix_fmt_name().
  svq3: Check negative mb_type to fix potential crash.
  svq3: Move svq3-specific fields to their own context.
  rawdec: initialize return value to 0.
  Remove unused get_psnr() prototype
  rawdec: don't leak option strings.
  bktr: get default framerate from video standard.
  ...
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents e4e2db9c dc6632f1
......@@ -123,7 +123,7 @@ TEXIDEP = awk '/^@include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
doc/%.html: TAG = HTML
doc/%.html: doc/%.texi $(SRC_PATH_BARE)/doc/t2h.init
$(Q)$(TEXIDEP)
$(M)cd doc && texi2html -monolithic --init-file $(SRC_PATH_BARE)/doc/t2h.init $(<:doc/%=%)
$(M)texi2html -monolithic --init-file $(SRC_PATH_BARE)/doc/t2h.init --output $@ $<
doc/%.pod: TAG = POD
doc/%.pod: doc/%.texi
......
......@@ -1208,8 +1208,8 @@ static void do_video_out(AVFormatContext *s,
av_log(NULL, AV_LOG_INFO,
"Input stream #%d.%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
ist->file_index, ist->st->index,
ost->resample_width, ost->resample_height, avcodec_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , avcodec_get_pix_fmt_name(dec->pix_fmt));
ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt));
ost->resample_width = dec->width;
ost->resample_height = dec->height;
ost->resample_pix_fmt = dec->pix_fmt;
......
......@@ -687,11 +687,6 @@ void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx);
# define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
#endif
/* PSNR */
void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3],
int orig_linesize[3], int coded_linesize,
AVCodecContext *avctx);
#define WRAPPER8_16(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
return name8(s, dst , src , stride, h)\
......
......@@ -38,6 +38,7 @@
* DV codec.
*/
#define ALT_BITSTREAM_READER
#include "libavutil/pixdesc.h"
#include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
......@@ -350,7 +351,7 @@ static av_cold int dvvideo_init_encoder(AVCodecContext *avctx)
{
if (!ff_dv_codec_profile(avctx)) {
av_log(avctx, AV_LOG_ERROR, "Found no DV profile for %ix%i %s video\n",
avctx->width, avctx->height, avcodec_get_pix_fmt_name(avctx->pix_fmt));
avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
return -1;
}
......
......@@ -1002,7 +1002,7 @@ static int encode_residual_ch(FlacEncodeContext *s, int ch)
static int count_frame_header(FlacEncodeContext *s)
{
uint8_t tmp;
uint8_t av_unused tmp;
int count;
/*
......
......@@ -586,8 +586,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
return -1;
}
if((s->avctx->active_thread_type & FF_THREAD_SLICE) &&
if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
return -1;
......@@ -746,7 +745,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->context_initialized = 1;
s->thread_context[0]= s;
if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE) {
if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
threads = s->avctx->thread_count;
for(i=1; i<threads; i++){
......@@ -778,7 +777,7 @@ void MPV_common_end(MpegEncContext *s)
{
int i, j, k;
if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE) {
if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
for(i=0; i<s->avctx->thread_count; i++){
free_duplicate_context(s->thread_context[i]);
}
......
......@@ -1226,7 +1226,7 @@ int MPV_encode_picture(AVCodecContext *avctx,
{
MpegEncContext *s = avctx->priv_data;
AVFrame *pic_arg = data;
int i, stuffing_count, context_count = avctx->active_thread_type&FF_THREAD_SLICE ? avctx->thread_count : 1;
int i, stuffing_count, context_count = avctx->thread_count;
for(i=0; i<context_count; i++){
int start_y= s->thread_context[i]->start_mb_y;
......@@ -2759,7 +2759,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
{
int i;
int bits;
int context_count = (s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1;
int context_count = s->avctx->thread_count;
s->picture_number = picture_number;
......
......@@ -821,9 +821,10 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
if (!s->context_initialized) {
h->chroma_qp[0] = h->chroma_qp[1] = 4;
svq3->halfpel_flag = 1;
svq3->halfpel_flag = 1;
svq3->thirdpel_flag = 1;
svq3->unknown_flag = 0;
svq3->unknown_flag = 0;
/* prowl for the "SEQH" marker in the extradata */
extradata = (unsigned char *)avctx->extradata;
......
......@@ -20,6 +20,7 @@
*/
#include "libavutil/intreadwrite.h"
#include "libavutil/pixdesc.h"
#include "avcodec.h"
#include "rle.h"
#include "targa.h"
......@@ -119,7 +120,7 @@ static int targa_encode_frame(AVCodecContext *avctx,
break;
default:
av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n",
avcodec_get_pix_fmt_name(avctx->pix_fmt));
av_get_pix_fmt_name(avctx->pix_fmt));
return AVERROR(EINVAL);
}
bpp = outbuf[16] >> 3;
......
......@@ -1012,7 +1012,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
if (enc->pix_fmt != PIX_FMT_NONE) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %s",
avcodec_get_pix_fmt_name(enc->pix_fmt));
av_get_pix_fmt_name(enc->pix_fmt));
}
if (enc->width) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
......
......@@ -1320,7 +1320,7 @@ static int decode_frame(WMAProDecodeCtx *s)
/** no idea what these are for, might be the number of samples
that need to be skipped at the beginning or end of a stream */
if (get_bits1(gb)) {
int skip;
int av_unused skip;
/** usually true for the first frame */
if (get_bits1(gb)) {
......
......@@ -256,10 +256,32 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
goto out;
}
#if FF_API_FORMAT_PARAMETERS
if (ap->standard) {
if (!strcasecmp(ap->standard, "pal"))
s->standard = PAL;
else if (!strcasecmp(ap->standard, "secam"))
s->standard = SECAM;
else if (!strcasecmp(ap->standard, "ntsc"))
s->standard = NTSC;
}
#endif
if ((ret = av_parse_video_size(&width, &height, s->video_size)) < 0) {
av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto out;
}
if (!s->framerate)
switch (s->standard) {
case PAL: s->framerate = av_strdup("pal"); break;
case NTSC: s->framerate = av_strdup("ntsc"); break;
case SECAM: s->framerate = av_strdup("25"); break;
default:
av_log(s1, AV_LOG_ERROR, "Unknown standard.\n");
ret = AVERROR(EINVAL);
goto out;
}
if ((ret = av_parse_video_rate(&fps, s->framerate)) < 0) {
av_log(s1, AV_LOG_ERROR, "Couldn't parse framerate.\n");
goto out;
......@@ -292,16 +314,6 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st->codec->time_base.den = fps.num;
st->codec->time_base.num = fps.den;
#if FF_API_FORMAT_PARAMETERS
if (ap->standard) {
if (!strcasecmp(ap->standard, "pal"))
s->standard = PAL;
else if (!strcasecmp(ap->standard, "secam"))
s->standard = SECAM;
else if (!strcasecmp(ap->standard, "ntsc"))
s->standard = NTSC;
}
#endif
if (bktr_init(s1->filename, width, height, s->standard,
&(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0) {
......@@ -347,7 +359,7 @@ static const AVOption options[] = {
{ "PALM", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
......
......@@ -162,7 +162,7 @@ static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap,
break;
if (!fps->frame_rate || !fmt->width) {
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", avcodec_get_pix_fmt_name(pix_fmt),
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt),
width, height, dc1394->frame_rate);
ret = AVERROR(EINVAL);
goto out;
......
......@@ -26,6 +26,12 @@
#define MAX_URL_SIZE 4096
#ifdef DEBUG
# define hex_dump_debug(class, buf, size) av_hex_dump_log(class, AV_LOG_DEBUG, buf, size)
#else
# define hex_dump_debug(class, buf, size)
#endif
typedef struct AVCodecTag {
enum CodecID id;
unsigned int tag;
......
......@@ -433,7 +433,7 @@ static int mov_read_hdlr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
uint32_t type;
uint32_t ctype;
uint32_t av_unused ctype;
if (c->fc->nb_streams < 1) // meta before first trak
return 0;
......
......@@ -1029,10 +1029,8 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
int mp4_dec_config_descr_len = 0;
int mp4_es_id = 0;
#ifdef DEBUG
av_dlog(ts->stream, "PMT: len %i\n", section_len);
av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
hex_dump_debug(ts->stream, (uint8_t *)section, section_len);
p_end = section + section_len - 4;
p = section;
......@@ -1151,10 +1149,9 @@ static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
int sid, pmt_pid;
AVProgram *program;
#ifdef DEBUG
av_dlog(ts->stream, "PAT:\n");
av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
hex_dump_debug(ts->stream, (uint8_t *)section, section_len);
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
......@@ -1199,10 +1196,8 @@ static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
int onid, val, sid, desc_list_len, desc_tag, desc_len, service_type;
char *name, *provider_name;
#ifdef DEBUG
av_dlog(ts->stream, "SDT:\n");
av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
hex_dump_debug(ts->stream, (uint8_t *)section, section_len);
p_end = section + section_len - 4;
p = section;
......
......@@ -267,7 +267,8 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int file_size, size;
unsigned int av_unused file_size;
unsigned int size;
int64_t duration;
int strings_size;
int table_entries;
......@@ -546,7 +547,7 @@ static int nsv_read_chunk(AVFormatContext *s, int fill_header)
uint32_t vsize;
uint16_t asize;
uint16_t auxsize;
uint32_t auxtag;
uint32_t av_unused auxtag;
av_dlog(s, "%s(%d)\n", __FUNCTION__, fill_header);
......
......@@ -52,7 +52,8 @@ static int r3d_read_red1(AVFormatContext *s)
{
AVStream *st = av_new_stream(s, 0);
char filename[258];
int tmp, tmp2;
int tmp;
int av_unused tmp2;
if (!st)
return AVERROR(ENOMEM);
......@@ -139,7 +140,7 @@ static int r3d_read_rdvo(AVFormatContext *s, Atom *atom)
static void r3d_read_reos(AVFormatContext *s)
{
R3DContext *r3d = s->priv_data;
int tmp;
int av_unused tmp;
r3d->rdvo_offset = avio_rb32(s->pb);
avio_rb32(s->pb); // rdvs offset
......@@ -209,7 +210,8 @@ static int r3d_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int r3d_read_redv(AVFormatContext *s, AVPacket *pkt, Atom *atom)
{
AVStream *st = s->streams[0];
int tmp, tmp2;
int tmp;
int av_unused tmp2;
uint64_t pos = avio_tell(s->pb);
unsigned dts;
int ret;
......@@ -263,7 +265,8 @@ static int r3d_read_redv(AVFormatContext *s, AVPacket *pkt, Atom *atom)
static int r3d_read_reda(AVFormatContext *s, AVPacket *pkt, Atom *atom)
{
AVStream *st = s->streams[1];
int tmp, tmp2, samples, size;
int av_unused tmp, tmp2;
int samples, size;
uint64_t pos = avio_tell(s->pb);
unsigned dts;
int ret;
......
......@@ -70,7 +70,7 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
}
case AVMEDIA_TYPE_VIDEO: {
FFRawVideoDemuxerContext *s1 = s->priv_data;
int width = 0, height = 0, ret;
int width = 0, height = 0, ret = 0;
enum PixelFormat pix_fmt;
if(ap->time_base.num)
......@@ -97,7 +97,6 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec->width = width;
st->codec->height = height;
st->codec->pix_fmt = pix_fmt;
break;
fail:
av_freep(&s1->video_size);
av_freep(&s1->pixel_format);
......
......@@ -321,7 +321,7 @@ int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
avio_flush(pb);
len = avio_close_dyn_buf(pb, &buf);
if ((len > 0) && buf) {
int result;
int av_unused result;
av_dlog(s->ic, "sending %d bytes of RR\n", len);
result= ffurl_write(s->rtp_ctx, buf, len);
av_dlog(s->ic, "result from ffurl_write: %d\n", result);
......
......@@ -2019,7 +2019,7 @@ static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
#if 0
{
int i;
AVStream *st;
AVStream av_unused *st;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
printf("%d: start_time: %0.3f duration: %0.3f\n",
......
......@@ -5,14 +5,18 @@ FFLIBS = avutil
HEADERS = swscale.h
OBJS = options.o rgb2rgb.o swscale.o utils.o yuv2rgb.o
OBJS = options.o rgb2rgb.o swscale.o utils.o yuv2rgb.o \
swscale_unscaled.o
OBJS-$(ARCH_BFIN) += bfin/internal_bfin.o \
bfin/swscale_bfin.o \
bfin/yuv2rgb_bfin.o
OBJS-$(CONFIG_MLIB) += mlib/yuv2rgb_mlib.o
OBJS-$(HAVE_ALTIVEC) += ppc/yuv2rgb_altivec.o
OBJS-$(HAVE_ALTIVEC) += ppc/swscale_altivec.o \
ppc/yuv2rgb_altivec.o \
ppc/yuv2yuv_altivec.o
OBJS-$(HAVE_MMX) += x86/rgb2rgb.o \
x86/swscale_mmx.o \
x86/yuv2rgb_mmx.o
OBJS-$(HAVE_VIS) += sparc/yuv2rgb_vis.o
......
......@@ -21,6 +21,13 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <inttypes.h>
#include "config.h"
#include "libswscale/swscale.h"
#include "libswscale/swscale_internal.h"
#include "libavutil/cpu.h"
#include "yuv2rgb_altivec.h"
#define vzero vec_splat_s32(0)
static inline void
......@@ -85,12 +92,15 @@ altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW)
}
}
static inline void
yuv2yuvX_altivec_real(const int16_t *lumFilter, const int16_t **lumSrc,
static void
yuv2yuvX_altivec_real(SwsContext *c,
const int16_t *lumFilter, const int16_t **lumSrc,
int lumFilterSize, const int16_t *chrFilter,
const int16_t **chrUSrc, const int16_t **chrVSrc,
int chrFilterSize, uint8_t *dest, uint8_t *uDest,
uint8_t *vDest, int dstW, int chrDstW)
int chrFilterSize, const int16_t **alpSrc,
uint8_t *dest, uint8_t *uDest,
uint8_t *vDest, uint8_t *aDest,
int dstW, int chrDstW)
{
const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
register int i, j;
......@@ -211,10 +221,10 @@ yuv2yuvX_altivec_real(const int16_t *lumFilter, const int16_t **lumSrc,
}
}
static inline void hScale_altivec_real(int16_t *dst, int dstW,
const uint8_t *src, int srcW,
int xInc, const int16_t *filter,
const int16_t *filterPos, int filterSize)
static void hScale_altivec_real(int16_t *dst, int dstW,
const uint8_t *src, int srcW,
int xInc, const int16_t *filter,
const int16_t *filterPos, int filterSize)
{
register int i;
DECLARE_ALIGNED(16, int, tempo)[4];
......@@ -391,157 +401,20 @@ static inline void hScale_altivec_real(int16_t *dst, int dstW,
}
}
static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride_a[])
void ff_sws_init_swScale_altivec(SwsContext *c)
{
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
const uint8_t *ysrc = src[0];
const uint8_t *usrc = src[1];
const uint8_t *vsrc = src[2];
const int width = c->srcW;
const int height = srcSliceH;
const int lumStride = srcStride[0];
const int chromStride = srcStride[1];
const int dstStride = dstStride_a[0];
const vector unsigned char yperm = vec_lvsl(0, ysrc);
const int vertLumPerChroma = 2;
register unsigned int y;
if (width&15) {
yv12toyuy2(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
return srcSliceH;
}
/* This code assumes:
1) dst is 16 bytes-aligned
2) dstStride is a multiple of 16
3) width is a multiple of 16
4) lum & chrom stride are multiples of 8
*/
for (y=0; y<height; y++) {
int i;
for (i = 0; i < width - 31; i+= 32) {
const unsigned int j = i >> 1;
vector unsigned char v_yA = vec_ld(i, ysrc);
vector unsigned char v_yB = vec_ld(i + 16, ysrc);
vector unsigned char v_yC = vec_ld(i + 32, ysrc);
vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
vector unsigned char v_uA = vec_ld(j, usrc);
vector unsigned char v_uB = vec_ld(j + 16, usrc);
vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
vector unsigned char v_vA = vec_ld(j, vsrc);
vector unsigned char v_vB = vec_ld(j + 16, vsrc);
vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
vec_st(v_yuy2_0, (i << 1), dst);
vec_st(v_yuy2_1, (i << 1) + 16, dst);
vec_st(v_yuy2_2, (i << 1) + 32, dst);
vec_st(v_yuy2_3, (i << 1) + 48, dst);
}
if (i < width) {
const unsigned int j = i >> 1;
vector unsigned char v_y1 = vec_ld(i, ysrc);
vector unsigned char v_u = vec_ld(j, usrc);
vector unsigned char v_v = vec_ld(j, vsrc);
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
vec_st(v_yuy2_0, (i << 1), dst);
vec_st(v_yuy2_1, (i << 1) + 16, dst);
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
return;
c->hScale = hScale_altivec_real;
c->yuv2yuvX = yuv2yuvX_altivec_real;
/* The following list of supported dstFormat values should
* match what's found in the body of ff_yuv2packedX_altivec() */
if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) {
c->yuv2packedX = ff_yuv2packedX_altivec;
}
if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
usrc += chromStride;
vsrc += chromStride;
}
ysrc += lumStride;
dst += dstStride;
}
return srcSliceH;
}
static inline int yv12touyvy_unscaled_altivec(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride_a[])
{
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
const uint8_t *ysrc = src[0];
const uint8_t *usrc = src[1];
const uint8_t *vsrc = src[2];
const int width = c->srcW;
const int height = srcSliceH;
const int lumStride = srcStride[0];
const int chromStride = srcStride[1];
const int dstStride = dstStride_a[0];
const int vertLumPerChroma = 2;
const vector unsigned char yperm = vec_lvsl(0, ysrc);
register unsigned int y;
if (width&15) {
yv12touyvy(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
return srcSliceH;
}
/* This code assumes:
1) dst is 16 bytes-aligned
2) dstStride is a multiple of 16
3) width is a multiple of 16
4) lum & chrom stride are multiples of 8
*/
for (y=0; y<height; y++) {
int i;
for (i = 0; i < width - 31; i+= 32) {
const unsigned int j = i >> 1;
vector unsigned char v_yA = vec_ld(i, ysrc);
vector unsigned char v_yB = vec_ld(i + 16, ysrc);
vector unsigned char v_yC = vec_ld(i + 32, ysrc);
vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
vector unsigned char v_uA = vec_ld(j, usrc);
vector unsigned char v_uB = vec_ld(j + 16, usrc);
vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
vector unsigned char v_vA = vec_ld(j, vsrc);
vector unsigned char v_vB = vec_ld(j + 16, vsrc);
vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
vec_st(v_uyvy_0, (i << 1), dst);
vec_st(v_uyvy_1, (i << 1) + 16, dst);
vec_st(v_uyvy_2, (i << 1) + 32, dst);
vec_st(v_uyvy_3, (i << 1) + 48, dst);
}
if (i < width) {
const unsigned int j = i >> 1;
vector unsigned char v_y1 = vec_ld(i, ysrc);
vector unsigned char v_u = vec_ld(j, usrc);
vector unsigned char v_v = vec_ld(j, vsrc);
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
vec_st(v_uyvy_0, (i << 1), dst);
vec_st(v_uyvy_1, (i << 1) + 16, dst);
}
if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
usrc += chromStride;
vsrc += chromStride;
}
ysrc += lumStride;
dst += dstStride;
}
return srcSliceH;
}
/*
* Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#if COMPILE_TEMPLATE_ALTIVEC
#include "swscale_altivec_template.c"
#endif
#if COMPILE_TEMPLATE_ALTIVEC
static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc, int chrFilterSize,
const int16_t **alpSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
uint8_t *aDest, int dstW, int chrDstW)
{
yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
chrFilter, chrUSrc, chrVSrc, chrFilterSize,
dest, uDest, vDest, dstW, chrDstW);
}
/**
* vertical scale YV12 to RGB
*/
static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc, int chrFilterSize,
const int16_t **alpSrc, uint8_t *dest,
int dstW, int dstY)
{
/* The following list of supported dstFormat values should
match what's found in the body of ff_yuv2packedX_altivec() */
if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,
chrFilter, chrUSrc, chrVSrc, chrFilterSize,
dest, dstW, dstY);
else
yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
chrFilter, chrUSrc, chrVSrc, chrFilterSize,
alpSrc, dest, dstW, dstY);
}
#endif
static void RENAME(sws_init_swScale)(SwsContext *c)
{
c->yuv2yuvX = RENAME(yuv2yuvX );
c->yuv2packedX = RENAME(yuv2packedX );
}
......@@ -96,6 +96,7 @@ adjustment.
#include "libswscale/swscale_internal.h"
#include "libavutil/cpu.h"
#include "libavutil/pixdesc.h"
#include "yuv2rgb_altivec.h"
#undef PROFILE_THE_BEAST
#undef INC_SCALING
......@@ -631,7 +632,8 @@ ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc, int chrFilterSize,
uint8_t *dest, int dstW, int dstY)
const int16_t **alpSrc, uint8_t *dest,
int dstW, int dstY)
{
int i,j;
vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
......
/*
* AltiVec-enhanced yuv2yuvX
*
* Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
* based on the equivalent C code in swscale.c
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef PPC_YUV2RGB_ALTIVEC_H
#define PPC_YUV2RGB_ALTIVEC_H 1
void ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc, int chrFilterSize,
const int16_t **alpSrc, uint8_t *dest,
int dstW, int dstY);
#endif /* PPC_YUV2RGB_ALTIVEC_H */
/*
* AltiVec-enhanced yuv-to-yuv convertion routines.
*
* Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
* based on the equivalent C code in swscale.c
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <inttypes.h>
#include "config.h"
#include "libswscale/swscale.h"
#include "libswscale/swscale_internal.h"
#include "libavutil/cpu.h"
static int yv12toyuy2_unscaled_altivec(SwsContext *c, const uint8_t* src[],
int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[],
int dstStride_a[])
{
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
const uint8_t *ysrc = src[0];
const uint8_t *usrc = src[1];
const uint8_t *vsrc = src[2];
const int width = c->srcW;
const int height = srcSliceH;
const int lumStride = srcStride[0];
const int chromStride = srcStride[1];
const int dstStride = dstStride_a[0];
const vector unsigned char yperm = vec_lvsl(0, ysrc);
const int vertLumPerChroma = 2;
register unsigned int y;
/* This code assumes:
1) dst is 16 bytes-aligned
2) dstStride is a multiple of 16
3) width is a multiple of 16
4) lum & chrom stride are multiples of 8
*/
for (y=0; y<height; y++) {
int i;
for (i = 0; i < width - 31; i+= 32) {
const unsigned int j = i >> 1;
vector unsigned char v_yA = vec_ld(i, ysrc);
vector unsigned char v_yB = vec_ld(i + 16, ysrc);
vector unsigned char v_yC = vec_ld(i + 32, ysrc);
vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
vector unsigned char v_uA = vec_ld(j, usrc);
vector unsigned char v_uB = vec_ld(j + 16, usrc);
vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
vector unsigned char v_vA = vec_ld(j, vsrc);
vector unsigned char v_vB = vec_ld(j + 16, vsrc);
vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
vec_st(v_yuy2_0, (i << 1), dst);
vec_st(v_yuy2_1, (i << 1) + 16, dst);
vec_st(v_yuy2_2, (i << 1) + 32, dst);
vec_st(v_yuy2_3, (i << 1) + 48, dst);
}
if (i < width) {
const unsigned int j = i >> 1;
vector unsigned char v_y1 = vec_ld(i, ysrc);
vector unsigned char v_u = vec_ld(j, usrc);
vector unsigned char v_v = vec_ld(j, vsrc);
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
vec_st(v_yuy2_0, (i << 1), dst);
vec_st(v_yuy2_1, (i << 1) + 16, dst);
}
if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
usrc += chromStride;
vsrc += chromStride;
}
ysrc += lumStride;
dst += dstStride;
}
return srcSliceH;
}
static int yv12touyvy_unscaled_altivec(SwsContext *c, const uint8_t* src[],
int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[],
int dstStride_a[])
{
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
const uint8_t *ysrc = src[0];
const uint8_t *usrc = src[1];
const uint8_t *vsrc = src[2];
const int width = c->srcW;
const int height = srcSliceH;
const int lumStride = srcStride[0];
const int chromStride = srcStride[1];
const int dstStride = dstStride_a[0];
const int vertLumPerChroma = 2;
const vector unsigned char yperm = vec_lvsl(0, ysrc);
register unsigned int y;
/* This code assumes:
1) dst is 16 bytes-aligned
2) dstStride is a multiple of 16
3) width is a multiple of 16
4) lum & chrom stride are multiples of 8
*/
for (y=0; y<height; y++) {
int i;
for (i = 0; i < width - 31; i+= 32) {
const unsigned int j = i >> 1;
vector unsigned char v_yA = vec_ld(i, ysrc);
vector unsigned char v_yB = vec_ld(i + 16, ysrc);
vector unsigned char v_yC = vec_ld(i + 32, ysrc);
vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
vector unsigned char v_uA = vec_ld(j, usrc);
vector unsigned char v_uB = vec_ld(j + 16, usrc);
vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
vector unsigned char v_vA = vec_ld(j, vsrc);
vector unsigned char v_vB = vec_ld(j + 16, vsrc);
vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
vec_st(v_uyvy_0, (i << 1), dst);
vec_st(v_uyvy_1, (i << 1) + 16, dst);
vec_st(v_uyvy_2, (i << 1) + 32, dst);
vec_st(v_uyvy_3, (i << 1) + 48, dst);
}
if (i < width) {
const unsigned int j = i >> 1;
vector unsigned char v_y1 = vec_ld(i, ysrc);
vector unsigned char v_u = vec_ld(j, usrc);
vector unsigned char v_v = vec_ld(j, vsrc);
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
vec_st(v_uyvy_0, (i << 1), dst);
vec_st(v_uyvy_1, (i << 1) + 16, dst);
}
if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
usrc += chromStride;
vsrc += chromStride;
}
ysrc += lumStride;
dst += dstStride;
}
return srcSliceH;
}
void ff_swscale_get_unscaled_altivec(SwsContext *c)
{
if ((av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) && !(c->srcW & 15) &&
!(c->flags & SWS_BITEXACT) && c->srcFormat == PIX_FMT_YUV420P) {
enum PixelFormat dstFormat = c->dstFormat;
// unscaled YV12 -> packed YUV, we want speed
if (dstFormat == PIX_FMT_YUYV422)
c->swScale= yv12toyuy2_unscaled_altivec;
else if (dstFormat == PIX_FMT_UYVY422)
c->swScale= yv12touyvy_unscaled_altivec;
}
}
This diff is collapsed.
......@@ -35,6 +35,8 @@
#define MAX_FILTER_SIZE 256
#define DITHER1XBPP
#if HAVE_BIGENDIAN
#define ALT32_CORR (-1)
#else
......@@ -337,17 +339,15 @@ int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
void ff_yuv2rgb_init_tables_altivec(SwsContext *c, const int inv_table[4],
int brightness, int contrast, int saturation);
void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
int lastInLumBuf, int lastInChrBuf);
SwsFunc ff_yuv2rgb_init_mmx(SwsContext *c);
SwsFunc ff_yuv2rgb_init_vis(SwsContext *c);
SwsFunc ff_yuv2rgb_init_mlib(SwsContext *c);
SwsFunc ff_yuv2rgb_init_altivec(SwsContext *c);
SwsFunc ff_yuv2rgb_get_func_ptr_bfin(SwsContext *c);
void ff_bfin_get_unscaled_swscale(SwsContext *c);
void ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc, int chrFilterSize,
uint8_t *dest, int dstW, int dstY);
#if FF_API_SWS_FORMAT_NAME
/**
......@@ -486,10 +486,20 @@ const char *sws_format_name(enum PixelFormat format);
|| (x)==PIX_FMT_GRAY8A \
|| (x)==PIX_FMT_YUVA420P \
)
#define isPacked(x) ( \
(x)==PIX_FMT_PAL8 \
|| (x)==PIX_FMT_YUYV422 \
|| (x)==PIX_FMT_UYVY422 \
|| (x)==PIX_FMT_Y400A \
|| isAnyRGB(x) \
)
#define usePal(x) ((av_pix_fmt_descriptors[x].flags & PIX_FMT_PAL) || (x) == PIX_FMT_GRAY8A)
extern const uint64_t ff_dither4[2];
extern const uint64_t ff_dither8[2];
extern const uint8_t dithers[8][8][8];
extern uint16_t dither_scale[15][16];
extern const AVClass sws_context_class;
......@@ -499,10 +509,15 @@ extern const AVClass sws_context_class;
*/
void ff_get_unscaled_swscale(SwsContext *c);
void ff_swscale_get_unscaled_altivec(SwsContext *c);
/**
* Returns function pointer to fastest main scaler path function depending
* on architecture and available optimizations.
*/
SwsFunc ff_getSwsFunc(SwsContext *c);
void ff_sws_init_swScale_altivec(SwsContext *c);
void ff_sws_init_swScale_mmx(SwsContext *c);
#endif /* SWSCALE_SWSCALE_INTERNAL_H */
......@@ -242,93 +242,6 @@ static inline void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
nvXXtoUV_c(dstV, dstU, src1, width);
}
static inline void bgr24ToY_c(int16_t *dst, const uint8_t *src,
int width, uint32_t *unused)
{
int i;
for (i=0; i<width; i++) {
int b= src[i*3+0];
int g= src[i*3+1];
int r= src[i*3+2];
dst[i]= ((RY*r + GY*g + BY*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
}
}
static inline void bgr24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *src1,
const uint8_t *src2, int width, uint32_t *unused)
{
int i;
for (i=0; i<width; i++) {
int b= src1[3*i + 0];
int g= src1[3*i + 1];
int r= src1[3*i + 2];
dstU[i]= (RU*r + GU*g + BU*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
}
assert(src1 == src2);
}
static inline void bgr24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *src1,
const uint8_t *src2, int width, uint32_t *unused)
{
int i;
for (i=0; i<width; i++) {
int b= src1[6*i + 0] + src1[6*i + 3];
int g= src1[6*i + 1] + src1[6*i + 4];
int r= src1[6*i + 2] + src1[6*i + 5];
dstU[i]= (RU*r + GU*g + BU*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
dstV[i]= (RV*r + GV*g + BV*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
}
assert(src1 == src2);
}
static inline void rgb24ToY_c(int16_t *dst, const uint8_t *src, int width,
uint32_t *unused)
{
int i;
for (i=0; i<width; i++) {
int r= src[i*3+0];
int g= src[i*3+1];
int b= src[i*3+2];
dst[i]= ((RY*r + GY*g + BY*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
}
}
static inline void rgb24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *src1,
const uint8_t *src2, int width, uint32_t *unused)
{
int i;
assert(src1==src2);
for (i=0; i<width; i++) {
int r= src1[3*i + 0];
int g= src1[3*i + 1];
int b= src1[3*i + 2];
dstU[i]= (RU*r + GU*g + BU*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
}
}
static inline void rgb24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *src1,
const uint8_t *src2, int width, uint32_t *unused)
{
int i;
assert(src1==src2);
for (i=0; i<width; i++) {
int r= src1[6*i + 0] + src1[6*i + 3];
int g= src1[6*i + 1] + src1[6*i + 4];
int b= src1[6*i + 2] + src1[6*i + 5];
dstU[i]= (RU*r + GU*g + BU*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
dstV[i]= (RV*r + GV*g + BV*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
}
}
// bilinear / bicubic scaling
static inline void hScale_c(int16_t *dst, int dstW, const uint8_t *src,
int srcW, int xInc,
......@@ -349,162 +262,6 @@ static inline void hScale_c(int16_t *dst, int dstW, const uint8_t *src,
}
}
static inline void hScale16_c(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc,
const int16_t *filter, const int16_t *filterPos, long filterSize, int shift)
{
int i, j;
for (i=0; i<dstW; i++) {
int srcPos= filterPos[i];
int val=0;
for (j=0; j<filterSize; j++) {
val += ((int)src[srcPos + j])*filter[filterSize*i + j];
}
dst[i] = FFMIN(val>>shift, (1<<15)-1); // the cubic equation does overflow ...
}
}
static inline void hScale16X_c(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc,
const int16_t *filter, const int16_t *filterPos, long filterSize, int shift)
{
int i, j;
for (i=0; i<dstW; i++) {
int srcPos= filterPos[i];
int val=0;
for (j=0; j<filterSize; j++) {
val += ((int)av_bswap16(src[srcPos + j]))*filter[filterSize*i + j];
}
dst[i] = FFMIN(val>>shift, (1<<15)-1); // the cubic equation does overflow ...
}
}
//FIXME all pal and rgb srcFormats could do this convertion as well
//FIXME all scalers more complex than bilinear could do half of this transform
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
{
int i;
for (i = 0; i < width; i++) {
dstU[i] = (FFMIN(dstU[i],30775)*4663 - 9289992)>>12; //-264
dstV[i] = (FFMIN(dstV[i],30775)*4663 - 9289992)>>12; //-264
}
}
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width)
{
int i;
for (i = 0; i < width; i++) {
dstU[i] = (dstU[i]*1799 + 4081085)>>11; //1469
dstV[i] = (dstV[i]*1799 + 4081085)>>11; //1469
}
}
static void lumRangeToJpeg_c(int16_t *dst, int width)
{
int i;
for (i = 0; i < width; i++)
dst[i] = (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
}
static void lumRangeFromJpeg_c(int16_t *dst, int width)
{
int i;
for (i = 0; i < width; i++)
dst[i] = (dst[i]*14071 + 33561947)>>14;
}
static inline void hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth,
const uint8_t *src, int srcW, int xInc)
{
int i;
unsigned int xpos=0;
for (i=0;i<dstWidth;i++) {
register unsigned int xx=xpos>>16;
register unsigned int xalpha=(xpos&0xFFFF)>>9;
dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
xpos+=xInc;
}
for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
dst[i] = src[srcW-1]*128;
}
// *** horizontal scale Y line to temp buffer
static inline void hyscale_c(SwsContext *c, uint16_t *dst, int dstWidth,
const uint8_t *src, int srcW, int xInc,
const int16_t *hLumFilter,
const int16_t *hLumFilterPos, int hLumFilterSize,
uint8_t *formatConvBuffer,
uint32_t *pal, int isAlpha)
{
void (*toYV12)(uint8_t *, const uint8_t *, int, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12;
void (*convertRange)(int16_t *, int) = isAlpha ? NULL : c->lumConvertRange;
src += isAlpha ? c->alpSrcOffset : c->lumSrcOffset;
if (toYV12) {
toYV12(formatConvBuffer, src, srcW, pal);
src= formatConvBuffer;
}
if (c->hScale16) {
int shift= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
c->hScale16(dst, dstWidth, (const uint16_t*)src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize, shift);
} else if (!c->hyscale_fast) {
c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
} else { // fast bilinear upscale / crap downscale
c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
}
if (convertRange)
convertRange(dst, dstWidth);
}
static inline void hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2,
int dstWidth, const uint8_t *src1,
const uint8_t *src2, int srcW, int xInc)
{
int i;
unsigned int xpos=0;
for (i=0;i<dstWidth;i++) {
register unsigned int xx=xpos>>16;
register unsigned int xalpha=(xpos&0xFFFF)>>9;
dst1[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
dst2[i]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
xpos+=xInc;
}
for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
dst1[i] = src1[srcW-1]*128;
dst2[i] = src2[srcW-1]*128;
}
}
inline static void hcscale_c(SwsContext *c, uint16_t *dst1, uint16_t *dst2, int dstWidth,
const uint8_t *src1, const uint8_t *src2,
int srcW, int xInc, const int16_t *hChrFilter,
const int16_t *hChrFilterPos, int hChrFilterSize,
uint8_t *formatConvBuffer, uint32_t *pal)
{
src1 += c->chrSrcOffset;
src2 += c->chrSrcOffset;
if (c->chrToYV12) {
uint8_t *buf2 = formatConvBuffer + FFALIGN(srcW*2+78, 16);
c->chrToYV12(formatConvBuffer, buf2, src1, src2, srcW, pal);
src1= formatConvBuffer;
src2= buf2;
}
if (c->hScale16) {
int shift= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
c->hScale16(dst1, dstWidth, (const uint16_t*)src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize, shift);
c->hScale16(dst2, dstWidth, (const uint16_t*)src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize, shift);
} else if (!c->hcscale_fast) {
c->hScale(dst1, dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
c->hScale(dst2, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
} else { // fast bilinear upscale / crap downscale
c->hcscale_fast(c, dst1, dst2, dstWidth, src1, src2, srcW, xInc);
}
if (c->chrConvertRange)
c->chrConvertRange(dst1, dst2, dstWidth);
}
#define DEBUG_SWSCALE_BUFFERS 0
#define DEBUG_BUFFERS(...) if (DEBUG_SWSCALE_BUFFERS) av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
......
This diff is collapsed.
......@@ -18,9 +18,14 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SWSCALE_X86_SWSCALE_TEMPLATE_H
#define SWSCALE_X86_SWSCALE_TEMPLATE_H
#include <inttypes.h>
#include "config.h"
#include "libswscale/swscale.h"
#include "libswscale/swscale_internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/x86_cpu.h"
#include "libavutil/cpu.h"
#include "libavutil/pixdesc.h"
DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL;
DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL;
......@@ -32,12 +37,12 @@ DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL;
DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL;
const DECLARE_ALIGNED(8, uint64_t, ff_dither4)[2] = {
0x0103010301030103LL,
0x0200020002000200LL,};
0x0103010301030103LL,
0x0200020002000200LL,};
const DECLARE_ALIGNED(8, uint64_t, ff_dither8)[2] = {
0x0602060206020602LL,
0x0004000400040004LL,};
0x0602060206020602LL,
0x0004000400040004LL,};
DECLARE_ASM_CONST(8, uint64_t, b16Mask)= 0x001F001F001F001FLL;
DECLARE_ASM_CONST(8, uint64_t, g16Mask)= 0x07E007E007E007E0LL;
......@@ -76,4 +81,107 @@ DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUV)[2][4] = {
DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUVOffset)= 0x0040010000400100ULL;
#endif /* SWSCALE_X86_SWSCALE_TEMPLATE_H */
//MMX versions
#if HAVE_MMX
#undef RENAME
#define COMPILE_TEMPLATE_MMX2 0
#define RENAME(a) a ## _MMX
#include "swscale_template.c"
#endif
//MMX2 versions
#if HAVE_MMX2
#undef RENAME
#undef COMPILE_TEMPLATE_MMX2
#define COMPILE_TEMPLATE_MMX2 1
#define RENAME(a) a ## _MMX2
#include "swscale_template.c"
#endif
void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
int lastInLumBuf, int lastInChrBuf)
{
const int dstH= c->dstH;
const int flags= c->flags;
int16_t **lumPixBuf= c->lumPixBuf;
int16_t **chrUPixBuf= c->chrUPixBuf;
int16_t **alpPixBuf= c->alpPixBuf;
const int vLumBufSize= c->vLumBufSize;
const int vChrBufSize= c->vChrBufSize;
int16_t *vLumFilterPos= c->vLumFilterPos;
int16_t *vChrFilterPos= c->vChrFilterPos;
int16_t *vLumFilter= c->vLumFilter;
int16_t *vChrFilter= c->vChrFilter;
int32_t *lumMmxFilter= c->lumMmxFilter;
int32_t *chrMmxFilter= c->chrMmxFilter;
int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
const int vLumFilterSize= c->vLumFilterSize;
const int vChrFilterSize= c->vChrFilterSize;
const int chrDstY= dstY>>c->chrDstVSubSample;
const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
c->blueDither= ff_dither8[dstY&1];
if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
c->greenDither= ff_dither8[dstY&1];
else
c->greenDither= ff_dither4[dstY&1];
c->redDither= ff_dither8[(dstY+1)&1];
if (dstY < dstH - 2) {
const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
const int16_t **chrUSrcPtr= (const int16_t **) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
int i;
if (flags & SWS_ACCURATE_RND) {
int s= APCK_SIZE / 8;
for (i=0; i<vLumFilterSize; i+=2) {
*(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
*(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
lumMmxFilter[s*i+APCK_COEF/4 ]=
lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
+ (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
*(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
*(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
alpMmxFilter[s*i+APCK_COEF/4 ]=
alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
}
}
for (i=0; i<vChrFilterSize; i+=2) {
*(const void**)&chrMmxFilter[s*i ]= chrUSrcPtr[i ];
*(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrUSrcPtr[i+(vChrFilterSize>1)];
chrMmxFilter[s*i+APCK_COEF/4 ]=
chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
+ (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
}
} else {
for (i=0; i<vLumFilterSize; i++) {
*(const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i];
lumMmxFilter[4*i+2]=
lumMmxFilter[4*i+3]=
((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
*(const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i];
alpMmxFilter[4*i+2]=
alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
}
}
for (i=0; i<vChrFilterSize; i++) {
*(const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i];
chrMmxFilter[4*i+2]=
chrMmxFilter[4*i+3]=
((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
}
}
}
}
void ff_sws_init_swScale_mmx(SwsContext *c)
{
int cpu_flags = av_get_cpu_flags();
if (cpu_flags & AV_CPU_FLAG_MMX)
sws_init_swScale_MMX(c);
if (cpu_flags & AV_CPU_FLAG_MMX2)
sws_init_swScale_MMX2(c);
}
......@@ -18,8 +18,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "swscale_template.h"
#undef REAL_MOVNTQ
#undef MOVNTQ
#undef PREFETCH
......@@ -2351,86 +2349,6 @@ static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *d
}
#endif /* COMPILE_TEMPLATE_MMX2 */
#if !COMPILE_TEMPLATE_MMX2
static void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
int lastInLumBuf, int lastInChrBuf)
{
const int dstH= c->dstH;
const int flags= c->flags;
int16_t **lumPixBuf= c->lumPixBuf;
int16_t **chrUPixBuf= c->chrUPixBuf;
int16_t **alpPixBuf= c->alpPixBuf;
const int vLumBufSize= c->vLumBufSize;
const int vChrBufSize= c->vChrBufSize;
int16_t *vLumFilterPos= c->vLumFilterPos;
int16_t *vChrFilterPos= c->vChrFilterPos;
int16_t *vLumFilter= c->vLumFilter;
int16_t *vChrFilter= c->vChrFilter;
int32_t *lumMmxFilter= c->lumMmxFilter;
int32_t *chrMmxFilter= c->chrMmxFilter;
int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
const int vLumFilterSize= c->vLumFilterSize;
const int vChrFilterSize= c->vChrFilterSize;
const int chrDstY= dstY>>c->chrDstVSubSample;
const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
c->blueDither= ff_dither8[dstY&1];
if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
c->greenDither= ff_dither8[dstY&1];
else
c->greenDither= ff_dither4[dstY&1];
c->redDither= ff_dither8[(dstY+1)&1];
if (dstY < dstH - 2) {
const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
const int16_t **chrUSrcPtr= (const int16_t **) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
int i;
if (flags & SWS_ACCURATE_RND) {
int s= APCK_SIZE / 8;
for (i=0; i<vLumFilterSize; i+=2) {
*(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
*(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
lumMmxFilter[s*i+APCK_COEF/4 ]=
lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
+ (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
*(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
*(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
alpMmxFilter[s*i+APCK_COEF/4 ]=
alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
}
}
for (i=0; i<vChrFilterSize; i+=2) {
*(const void**)&chrMmxFilter[s*i ]= chrUSrcPtr[i ];
*(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrUSrcPtr[i+(vChrFilterSize>1)];
chrMmxFilter[s*i+APCK_COEF/4 ]=
chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
+ (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
}
} else {
for (i=0; i<vLumFilterSize; i++) {
*(const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i];
lumMmxFilter[4*i+2]=
lumMmxFilter[4*i+3]=
((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
*(const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i];
alpMmxFilter[4*i+2]=
alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
}
}
for (i=0; i<vChrFilterSize; i++) {
*(const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i];
chrMmxFilter[4*i+2]=
chrMmxFilter[4*i+3]=
((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
}
}
}
}
#endif /* !COMPILE_TEMPLATE_MMX2 */
static void RENAME(sws_init_swScale)(SwsContext *c)
{
enum PixelFormat srcFormat = c->srcFormat;
......
......@@ -50,12 +50,12 @@ endif
clean::
$(RM) $(addprefix $(SUBDIR),*-example$(EXESUF) *-test$(EXESUF) $(CLEANFILES) $(CLEANSUFFIXES) $(LIBSUFFIXES)) \
$(addprefix $(SUBDIR), $(foreach suffix,$(CLEANSUFFIXES),$(addsuffix /$(suffix),$(DIRS)))) \
$(foreach dir,$(DIRS),$(CLEANSUFFIXES:%=$(SUBDIR)$(dir)/%)) \
$(HOSTOBJS) $(HOSTPROGS)
distclean:: clean
$(RM) $(addprefix $(SUBDIR),$(DISTCLEANSUFFIXES)) \
$(addprefix $(SUBDIR), $(foreach suffix,$(DISTCLEANSUFFIXES),$(addsuffix /$(suffix),$(DIRS))))
$(RM) $(DISTCLEANSUFFIXES:%=$(SUBDIR)%) \
$(foreach dir,$(DIRS),$(DISTCLEANSUFFIXES:%=$(SUBDIR)$(dir)/%))
install-lib$(NAME)-shared: $(SUBDIR)$(SLIBNAME)
$(Q)mkdir -p "$(SHLIBDIR)"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment