Commit 32421602 authored by Shiyou Yin's avatar Shiyou Yin Committed by Michael Niedermayer

avcodec/mips: [loongson] optimize put_hevc_pel_bi_pixels_8 with mmi.

Optimize put_hevc_pel_bi_pixels_8 with mmi in the case width=8/16/24/32/48/64.
This optimization improved HEVC decoding performance 2%(1.77x to 1.81x, tested on loongson 3A3000).
Signed-off-by: 's avatarMichael Niedermayer <michael@niedermayer.cc>
parent 32fb83e4
......@@ -86,3 +86,4 @@ MMI-OBJS-$(CONFIG_VP8_DECODER) += mips/vp8dsp_mmi.o
MMI-OBJS-$(CONFIG_HPELDSP) += mips/hpeldsp_mmi.o
MMI-OBJS-$(CONFIG_VC1_DECODER) += mips/vc1dsp_mmi.o
MMI-OBJS-$(CONFIG_WMV2DSP) += mips/wmv2dsp_mmi.o
MMI-OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_mmi.o
......@@ -20,6 +20,26 @@
#include "libavcodec/mips/hevcdsp_mips.h"
#if HAVE_MMI
static av_cold void hevc_dsp_init_mmi(HEVCDSPContext *c,
const int bit_depth)
{
if (8 == bit_depth) {
c->put_hevc_qpel_bi[3][0][0] = ff_hevc_put_hevc_pel_bi_pixels8_8_mmi;
c->put_hevc_qpel_bi[5][0][0] = ff_hevc_put_hevc_pel_bi_pixels16_8_mmi;
c->put_hevc_qpel_bi[6][0][0] = ff_hevc_put_hevc_pel_bi_pixels24_8_mmi;
c->put_hevc_qpel_bi[7][0][0] = ff_hevc_put_hevc_pel_bi_pixels32_8_mmi;
c->put_hevc_qpel_bi[8][0][0] = ff_hevc_put_hevc_pel_bi_pixels48_8_mmi;
c->put_hevc_qpel_bi[9][0][0] = ff_hevc_put_hevc_pel_bi_pixels64_8_mmi;
c->put_hevc_epel_bi[3][0][0] = ff_hevc_put_hevc_pel_bi_pixels8_8_mmi;
c->put_hevc_epel_bi[5][0][0] = ff_hevc_put_hevc_pel_bi_pixels16_8_mmi;
c->put_hevc_epel_bi[6][0][0] = ff_hevc_put_hevc_pel_bi_pixels24_8_mmi;
c->put_hevc_epel_bi[7][0][0] = ff_hevc_put_hevc_pel_bi_pixels32_8_mmi;
}
}
#endif // #if HAVE_MMI
#if HAVE_MSA
static av_cold void hevc_dsp_init_msa(HEVCDSPContext *c,
const int bit_depth)
......@@ -448,6 +468,9 @@ static av_cold void hevc_dsp_init_msa(HEVCDSPContext *c,
void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth)
{
#if HAVE_MMI
hevc_dsp_init_mmi(c, bit_depth);
#endif // #if HAVE_MMI
#if HAVE_MSA
hevc_dsp_init_msa(c, bit_depth);
#endif // #if HAVE_MSA
......
......@@ -479,4 +479,23 @@ void ff_hevc_addblk_32x32_msa(uint8_t *dst, int16_t *pi16Coeffs,
ptrdiff_t stride);
void ff_hevc_idct_luma_4x4_msa(int16_t *pi16Coeffs);
/* Loongson optimization */
#define L_BI_MC(PEL, DIR, WIDTH, TYPE) \
void ff_hevc_put_hevc_##PEL##_bi_##DIR##WIDTH##_8_##TYPE(uint8_t *dst, \
ptrdiff_t dst_stride, \
uint8_t *src, \
ptrdiff_t src_stride, \
int16_t *src_16bit, \
int height, \
intptr_t mx, \
intptr_t my, \
int width)
L_BI_MC(pel, pixels, 8, mmi);
L_BI_MC(pel, pixels, 16, mmi);
L_BI_MC(pel, pixels, 24, mmi);
L_BI_MC(pel, pixels, 32, mmi);
L_BI_MC(pel, pixels, 48, mmi);
L_BI_MC(pel, pixels, 64, mmi);
#undef L_BI_MC
#endif // #ifndef AVCODEC_MIPS_HEVCDSP_MIPS_H
/*
* Copyright (c) 2019 Shiyou Yin (yinshiyou-hf@loongson.cn)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/bit_depth_template.c"
#include "libavcodec/mips/hevcdsp_mips.h"
#include "libavutil/mips/mmiutils.h"
#define PUT_HEVC_PEL_BI_PIXELS(w, x_step, src_step, dst_step, src2_step) \
void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \
ptrdiff_t _dststride, \
uint8_t *_src, \
ptrdiff_t _srcstride, \
int16_t *src2, int height, \
intptr_t mx, intptr_t my, \
int width) \
{ \
int x, y; \
pixel *src = (pixel *)_src; \
ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
pixel *dst = (pixel *)_dst; \
ptrdiff_t dststride = _dststride / sizeof(pixel); \
uint64_t ftmp[12]; \
uint64_t rtmp[1]; \
int shift = 7; \
\
y = height; \
x = width >> 3; \
__asm__ volatile( \
"xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
"li %[rtmp0], 0x06 \n\t" \
"dmtc1 %[rtmp0], %[ftmp1] \n\t" \
"li %[rtmp0], 0x10 \n\t" \
"dmtc1 %[rtmp0], %[ftmp10] \n\t" \
"li %[rtmp0], 0x40 \n\t" \
"dmtc1 %[rtmp0], %[offset] \n\t" \
"punpcklhw %[offset], %[offset], %[offset] \n\t" \
"punpcklwd %[offset], %[offset], %[offset] \n\t" \
\
"1: \n\t" \
"2: \n\t" \
"gsldlc1 %[ftmp5], 0x07(%[src]) \n\t" \
"gsldrc1 %[ftmp5], 0x00(%[src]) \n\t" \
"gsldlc1 %[ftmp2], 0x07(%[src2]) \n\t" \
"gsldrc1 %[ftmp2], 0x00(%[src2]) \n\t" \
"gsldlc1 %[ftmp3], 0x0f(%[src2]) \n\t" \
"gsldrc1 %[ftmp3], 0x08(%[src2]) \n\t" \
"punpcklbh %[ftmp4], %[ftmp5], %[ftmp0] \n\t" \
"punpckhbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
"psllh %[ftmp4], %[ftmp4], %[ftmp1] \n\t" \
"psllh %[ftmp5], %[ftmp5], %[ftmp1] \n\t" \
"paddh %[ftmp4], %[ftmp4], %[offset] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[offset] \n\t" \
"punpcklhw %[ftmp6], %[ftmp4], %[ftmp0] \n\t" \
"punpckhhw %[ftmp7], %[ftmp4], %[ftmp0] \n\t" \
"punpcklhw %[ftmp8], %[ftmp5], %[ftmp0] \n\t" \
"punpckhhw %[ftmp9], %[ftmp5], %[ftmp0] \n\t" \
"punpcklhw %[ftmp4], %[ftmp0], %[ftmp3] \n\t" \
"punpckhhw %[ftmp5], %[ftmp0], %[ftmp3] \n\t" \
"punpckhhw %[ftmp3], %[ftmp0], %[ftmp2] \n\t" \
"punpcklhw %[ftmp2], %[ftmp0], %[ftmp2] \n\t" \
"psraw %[ftmp2], %[ftmp2], %[ftmp10] \n\t" \
"psraw %[ftmp3], %[ftmp3], %[ftmp10] \n\t" \
"psraw %[ftmp4], %[ftmp4], %[ftmp10] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[ftmp10] \n\t" \
"paddw %[ftmp2], %[ftmp2], %[ftmp6] \n\t" \
"paddw %[ftmp3], %[ftmp3], %[ftmp7] \n\t" \
"paddw %[ftmp4], %[ftmp4], %[ftmp8] \n\t" \
"paddw %[ftmp5], %[ftmp5], %[ftmp9] \n\t" \
"psraw %[ftmp2], %[ftmp2], %[shift] \n\t" \
"psraw %[ftmp3], %[ftmp3], %[shift] \n\t" \
"psraw %[ftmp4], %[ftmp4], %[shift] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[shift] \n\t" \
"packsswh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" \
"packsswh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
"pcmpgth %[ftmp3], %[ftmp2], %[ftmp0] \n\t" \
"pcmpgth %[ftmp5], %[ftmp4], %[ftmp0] \n\t" \
"and %[ftmp2], %[ftmp2], %[ftmp3] \n\t" \
"and %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
"packushb %[ftmp2], %[ftmp2], %[ftmp4] \n\t" \
"gssdlc1 %[ftmp2], 0x07(%[dst]) \n\t" \
"gssdrc1 %[ftmp2], 0x00(%[dst]) \n\t" \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x08 \n\t" \
PTR_ADDIU "%[dst], %[dst], 0x08 \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x10 \n\t" \
"bnez %[x], 2b \n\t" \
\
PTR_ADDIU "%[src], %[src], " #src_step " \n\t" \
PTR_ADDIU "%[dst], %[dst], " #dst_step " \n\t" \
PTR_ADDIU "%[src2], %[src2], " #src2_step " \n\t" \
"li %[x], " #x_step " \n\t" \
"daddi %[y], %[y], -0x01 \n\t" \
PTR_ADDU "%[src], %[src], %[srcstride] \n\t" \
PTR_ADDU "%[dst], %[dst], %[dststride] \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
: [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
[ftmp10]"=&f"(ftmp[10]), [offset]"=&f"(ftmp[11]), \
[src2]"+&r"(src2), [dst]"+&r"(dst), [src]"+&r"(src), \
[x]"+&r"(x), [y]"+&r"(y), [rtmp0]"=&r"(rtmp[0]) \
: [dststride]"r"(dststride), [shift]"f"(shift), \
[srcstride]"r"(srcstride) \
: "memory" \
); \
} \
PUT_HEVC_PEL_BI_PIXELS(8, 1, -8, -8, -16);
PUT_HEVC_PEL_BI_PIXELS(16, 2, -16, -16, -32);
PUT_HEVC_PEL_BI_PIXELS(24, 3, -24, -24, -48);
PUT_HEVC_PEL_BI_PIXELS(32, 4, -32, -32, -64);
PUT_HEVC_PEL_BI_PIXELS(48, 6, -48, -48, -96);
PUT_HEVC_PEL_BI_PIXELS(64, 8, -64, -64, -128);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment