Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
a0ad6c85
Commit
a0ad6c85
authored
Jul 09, 2015
by
周晓勇
Committed by
Michael Niedermayer
Jul 15, 2015
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
avcodec: loongson optimized h264pred with mmi
Signed-off-by:
Michael Niedermayer
<
michael@niedermayer.cc
>
parent
2c343895
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
911 additions
and
0 deletions
+911
-0
Makefile
libavcodec/mips/Makefile
+1
-0
h264pred_init_mips.c
libavcodec/mips/h264pred_init_mips.c
+53
-0
h264pred_mips.h
libavcodec/mips/h264pred_mips.h
+53
-0
h264pred_mmi.c
libavcodec/mips/h264pred_mmi.c
+804
-0
No files found.
libavcodec/mips/Makefile
View file @
a0ad6c85
...
...
@@ -58,3 +58,4 @@ MSA-OBJS-$(CONFIG_MPEGVIDEOENC) += mips/mpegvideoencdsp_msa.o
MSA-OBJS-$(CONFIG_ME_CMP)
+=
mips/me_cmp_msa.o
MMI-OBJS-$(CONFIG_H264DSP)
+=
mips/h264dsp_mmi.o
MMI-OBJS-$(CONFIG_H264CHROMA)
+=
mips/h264chroma_mmi.o
MMI-OBJS-$(CONFIG_H264PRED)
+=
mips/h264pred_mmi.o
libavcodec/mips/h264pred_init_mips.c
View file @
a0ad6c85
/*
* Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com)
* Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
*
* This file is part of FFmpeg.
*
...
...
@@ -20,6 +21,7 @@
#include "config.h"
#include "h264dsp_mips.h"
#include "h264pred_mips.h"
#if HAVE_MSA
static
av_cold
void
h264_pred_init_msa
(
H264PredContext
*
h
,
int
codec_id
,
...
...
@@ -94,6 +96,54 @@ static av_cold void h264_pred_init_msa(H264PredContext *h, int codec_id,
}
#endif // #if HAVE_MSA
#if HAVE_MMI
static
av_cold
void
h264_pred_init_mmi
(
H264PredContext
*
h
,
int
codec_id
,
const
int
bit_depth
,
const
int
chroma_format_idc
)
{
if
(
bit_depth
==
8
)
{
if
(
chroma_format_idc
==
1
)
{
h
->
pred8x8
[
VERT_PRED8x8
]
=
ff_pred8x8_vertical_8_mmi
;
h
->
pred8x8
[
HOR_PRED8x8
]
=
ff_pred8x8_horizontal_8_mmi
;
}
else
{
h
->
pred8x8
[
VERT_PRED8x8
]
=
ff_pred8x16_vertical_8_mmi
;
h
->
pred8x8
[
HOR_PRED8x8
]
=
ff_pred8x16_horizontal_8_mmi
;
}
h
->
pred16x16
[
DC_PRED8x8
]
=
ff_pred16x16_dc_8_mmi
;
h
->
pred16x16
[
VERT_PRED8x8
]
=
ff_pred16x16_vertical_8_mmi
;
h
->
pred16x16
[
HOR_PRED8x8
]
=
ff_pred16x16_horizontal_8_mmi
;
h
->
pred8x8l
[
TOP_DC_PRED
]
=
ff_pred8x8l_top_dc_8_mmi
;
h
->
pred8x8l
[
DC_PRED
]
=
ff_pred8x8l_dc_8_mmi
;
h
->
pred8x8l
[
HOR_PRED
]
=
ff_pred8x8l_horizontal_8_mmi
;
switch
(
codec_id
)
{
case
AV_CODEC_ID_SVQ3
:
h
->
pred16x16
[
PLANE_PRED8x8
]
=
ff_pred16x16_plane_svq3_8_mmi
;
;
break
;
case
AV_CODEC_ID_RV40
:
h
->
pred16x16
[
PLANE_PRED8x8
]
=
ff_pred16x16_plane_rv40_8_mmi
;
;
break
;
case
AV_CODEC_ID_VP7
:
case
AV_CODEC_ID_VP8
:
;
break
;
default:
h
->
pred16x16
[
PLANE_PRED8x8
]
=
ff_pred16x16_plane_h264_8_mmi
;
break
;
}
if
(
codec_id
==
AV_CODEC_ID_SVQ3
||
codec_id
==
AV_CODEC_ID_H264
)
{
if
(
chroma_format_idc
==
1
)
{
h
->
pred8x8
[
TOP_DC_PRED8x8
]
=
ff_pred8x8_top_dc_8_mmi
;
h
->
pred8x8
[
DC_PRED8x8
]
=
ff_pred8x8_dc_8_mmi
;
}
}
}
}
#endif
/* HAVE_MMI */
av_cold
void
ff_h264_pred_init_mips
(
H264PredContext
*
h
,
int
codec_id
,
int
bit_depth
,
const
int
chroma_format_idc
)
...
...
@@ -101,4 +151,7 @@ av_cold void ff_h264_pred_init_mips(H264PredContext *h, int codec_id,
#if HAVE_MSA
h264_pred_init_msa
(
h
,
codec_id
,
bit_depth
,
chroma_format_idc
);
#endif // #if HAVE_MSA
#if HAVE_MMI
h264_pred_init_mmi
(
h
,
codec_id
,
bit_depth
,
chroma_format_idc
);
#endif
/* HAVE_MMI */
}
libavcodec/mips/h264pred_mips.h
0 → 100644
View file @
a0ad6c85
/*
* Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef H264_PRED_MIPS_H
#define H264_PRED_MIPS_H
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/h264pred.h"
#include "libavcodec/bit_depth_template.c"
void
ff_pred16x16_vertical_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred16x16_horizontal_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred16x16_dc_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred8x8l_top_dc_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
);
void
ff_pred8x8l_dc_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
);
void
ff_pred8x8l_horizontal_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
);
void
ff_pred8x8l_vertical_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
);
void
ff_pred4x4_dc_8_mmi
(
uint8_t
*
src
,
const
uint8_t
*
topright
,
ptrdiff_t
stride
);
void
ff_pred8x8_vertical_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred8x8_horizontal_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred16x16_plane_svq3_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred16x16_plane_rv40_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred16x16_plane_h264_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred8x8_top_dc_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred8x8_dc_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred8x16_vertical_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
void
ff_pred8x16_horizontal_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
);
#endif
/* H264_PRED_MIPS_H */
libavcodec/mips/h264pred_mmi.c
0 → 100644
View file @
a0ad6c85
/*
* Loongson SIMD optimized h264pred
*
* Copyright (c) 2015 Loongson Technology Corporation Limited
* Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
* Zhang Shuangshuang <zhangshuangshuang@ict.ac.cn>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "h264pred_mips.h"
void
ff_pred16x16_vertical_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
"dsubu $2, %0, %1
\r\n
"
"daddu $3, %0, $0
\r\n
"
"ldl $4, 7($2)
\r\n
"
"ldr $4, 0($2)
\r\n
"
"ldl $5, 15($2)
\r\n
"
"ldr $5, 8($2)
\r\n
"
"dli $6, 0x10
\r\n
"
"1:
\r\n
"
"sdl $4, 7($3)
\r\n
"
"sdr $4, 0($3)
\r\n
"
"sdl $5, 15($3)
\r\n
"
"sdr $5, 8($3)
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $6, -1
\r\n
"
"bnez $6, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"memory"
);
}
void
ff_pred16x16_horizontal_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
".set arch=loongson3a
\r\n
"
"daddiu $2, %0, -1
\r\n
"
"daddu $3, %0, $0
\r\n
"
"dli $6, 0x10
\r\n
"
"dli $7, 0x0101010101010101
\r\n
"
"1:
\r\n
"
"lbu $4, 0($2)
\r\n
"
"dmul $5, $4, $7
\r\n
"
"sdl $5, 7($3)
\r\n
"
"sdr $5, 0($3)
\r\n
"
"sdl $5, 15($3)
\r\n
"
"sdr $5, 8($3)
\r\n
"
"daddu $2, %1
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $6, -1
\r\n
"
"bnez $6, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"memory"
);
}
void
ff_pred16x16_dc_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
".set arch=loongson3a
\r\n
"
"daddiu $2, %0, -1
\r\n
"
"dli $6, 0x10
\r\n
"
"xor $8, $8, $8
\r\n
"
"1:
\r\n
"
"lbu $4, 0($2)
\r\n
"
"daddu $8, $8, $4
\r\n
"
"daddu $2, $2, %1
\r\n
"
"daddiu $6, $6, -1
\r\n
"
"bnez $6, 1b
\r\n
"
"dli $6, 0x10
\r\n
"
"negu $3, %1
\r\n
"
"daddu $2, %0, $3
\r\n
"
"2:
\r\n
"
"lbu $4, 0($2)
\r\n
"
"daddu $8, $8, $4
\r\n
"
"daddiu $2, $2, 1
\r\n
"
"daddiu $6, $6, -1
\r\n
"
"bnez $6, 2b
\r\n
"
"dli $7, 0x0101010101010101
\r\n
"
"daddiu $8, $8, 0x10
\r\n
"
"dsra $8, 5
\r\n
"
"dmul $5, $8, $7
\r\n
"
"daddu $2, %0, $0
\r\n
"
"dli $6, 0x10
\r\n
"
"3:
\r\n
"
"sdl $5, 7($2)
\r\n
"
"sdr $5, 0($2)
\r\n
"
"sdl $5, 15($2)
\r\n
"
"sdr $5, 8($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"daddiu $6, $6, -1
\r\n
"
"bnez $6, 3b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"$7"
,
"$8"
,
"memory"
);
}
void
ff_pred8x8l_top_dc_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
)
{
int
y
;
uint32_t
dc
;
__asm__
volatile
(
"ldl $8, 7(%1)
\r\n
"
"ldr $8, 0(%1)
\r\n
"
"ldl $9, 7(%2)
\r\n
"
"ldr $9, 0(%2)
\r\n
"
"ldl $10, 7(%3)
\r\n
"
"ldr $10, 0(%3)
\r\n
"
"dmtc1 $8, $f2
\r\n
"
"dmtc1 $9, $f4
\r\n
"
"dmtc1 $10, $f6
\r\n
"
"dmtc1 $0, $f0
\r\n
"
"punpcklbh $f8, $f2, $f0
\r\n
"
"punpckhbh $f10, $f2, $f0
\r\n
"
"punpcklbh $f12, $f4, $f0
\r\n
"
"punpckhbh $f14, $f4, $f0
\r\n
"
"punpcklbh $f16, $f6, $f0
\r\n
"
"punpckhbh $f18, $f6, $f0
\r\n
"
"bnez %4, 1f
\r\n
"
"pinsrh_0 $f8, $f8, $f12
\r\n
"
"1:
\r\n
"
"bnez %5, 2f
\r\n
"
"pinsrh_3 $f18, $f18, $f14
\r\n
"
"2:
\r\n
"
"daddiu $8, $0, 2
\r\n
"
"dmtc1 $8, $f20
\r\n
"
"pshufh $f22, $f20, $f0
\r\n
"
"pmullh $f12, $f12, $f22
\r\n
"
"pmullh $f14, $f14, $f22
\r\n
"
"paddh $f8, $f8, $f12
\r\n
"
"paddh $f10, $f10, $f14
\r\n
"
"paddh $f8, $f8, $f16
\r\n
"
"paddh $f10, $f10, $f18
\r\n
"
"paddh $f8, $f8, $f22
\r\n
"
"paddh $f10, $f10, $f22
\r\n
"
"psrah $f8, $f8, $f20
\r\n
"
"psrah $f10, $f10, $f20
\r\n
"
"packushb $f4, $f8, $f10
\r\n
"
"biadd $f2, $f4
\r\n
"
"mfc1 $9, $f2
\r\n
"
"addiu $9, $9, 4
\r\n
"
"dsrl $9, $9, 3
\r\n
"
"li $8, 0x01010101
\r\n
"
"mul %0, $9, $8
\r\n
"
:
"=r"
(
dc
)
:
"r"
(
src
-
stride
-
1
),
"r"
(
src
-
stride
),
"r"
(
src
-
stride
+
1
),
"r"
(
has_topleft
),
"r"
(
has_topright
)
:
"$8"
,
"$9"
,
"$10"
);
for
(
y
=
0
;
y
<
8
;
y
++
)
{
AV_WN4PA
(((
uint32_t
*
)
src
)
+
0
,
dc
);
AV_WN4PA
(((
uint32_t
*
)
src
)
+
1
,
dc
);
src
+=
stride
;
}
}
void
ff_pred8x8l_dc_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
)
{
int
y
;
uint32_t
dc
,
dc1
,
dc2
;
const
int
l0
=
((
has_topleft
?
src
[
-
1
+-
1
*
stride
]
:
src
[
-
1
+
0
*
stride
])
+
2
*
src
[
-
1
+
0
*
stride
]
+
src
[
-
1
+
1
*
stride
]
+
2
)
>>
2
;
const
int
l1
=
(
src
[
-
1
+
0
*
stride
]
+
2
*
src
[
-
1
+
1
*
stride
]
+
src
[
-
1
+
2
*
stride
]
+
2
)
>>
2
;
const
int
l2
=
(
src
[
-
1
+
1
*
stride
]
+
2
*
src
[
-
1
+
2
*
stride
]
+
src
[
-
1
+
3
*
stride
]
+
2
)
>>
2
;
const
int
l3
=
(
src
[
-
1
+
2
*
stride
]
+
2
*
src
[
-
1
+
3
*
stride
]
+
src
[
-
1
+
4
*
stride
]
+
2
)
>>
2
;
const
int
l4
=
(
src
[
-
1
+
3
*
stride
]
+
2
*
src
[
-
1
+
4
*
stride
]
+
src
[
-
1
+
5
*
stride
]
+
2
)
>>
2
;
const
int
l5
=
(
src
[
-
1
+
4
*
stride
]
+
2
*
src
[
-
1
+
5
*
stride
]
+
src
[
-
1
+
6
*
stride
]
+
2
)
>>
2
;
const
int
l6
=
(
src
[
-
1
+
5
*
stride
]
+
2
*
src
[
-
1
+
6
*
stride
]
+
src
[
-
1
+
7
*
stride
]
+
2
)
>>
2
;
const
int
l7
=
(
src
[
-
1
+
6
*
stride
]
+
2
*
src
[
-
1
+
7
*
stride
]
+
src
[
-
1
+
7
*
stride
]
+
2
)
>>
2
;
__asm__
volatile
(
"ldl $8, 7(%1)
\r\n
"
"ldr $8, 0(%1)
\r\n
"
"ldl $9, 7(%2)
\r\n
"
"ldr $9, 0(%2)
\r\n
"
"ldl $10, 7(%3)
\r\n
"
"ldr $10, 0(%3)
\r\n
"
"dmtc1 $8, $f2
\r\n
"
"dmtc1 $9, $f4
\r\n
"
"dmtc1 $10, $f6
\r\n
"
"dmtc1 $0, $f0
\r\n
"
"punpcklbh $f8, $f2, $f0
\r\n
"
"punpckhbh $f10, $f2, $f0
\r\n
"
"punpcklbh $f12, $f4, $f0
\r\n
"
"punpckhbh $f14, $f4, $f0
\r\n
"
"punpcklbh $f16, $f6, $f0
\r\n
"
"punpckhbh $f18, $f6, $f0
\r\n
"
"daddiu $8, $0, 3
\r\n
"
"dmtc1 $8, $f20
\r\n
"
"pshufh $f28, $f10, $f20
\r\n
"
"pshufh $f30, $f18, $f20
\r\n
"
"pinsrh_3 $f10, $f10, $f30
\r\n
"
"pinsrh_3 $f18, $f18, $f28
\r\n
"
"bnez %4, 1f
\r\n
"
"pinsrh_0 $f8, $f8, $f12
\r\n
"
"1:
\r\n
"
"bnez %5, 2f
\r\n
"
"pshufh $f30, $f14, $f20
\r\n
"
"pinsrh_3 $f10, $f10, $f30
\r\n
"
"2:
\r\n
"
"daddiu $8, $0, 2
\r\n
"
"dmtc1 $8, $f20
\r\n
"
"pshufh $f22, $f20, $f0
\r\n
"
"pmullh $f12, $f12, $f22
\r\n
"
"pmullh $f14, $f14, $f22
\r\n
"
"paddh $f8, $f8, $f12
\r\n
"
"paddh $f10, $f10, $f14
\r\n
"
"paddh $f8, $f8, $f16
\r\n
"
"paddh $f10, $f10, $f18
\r\n
"
"paddh $f8, $f8, $f22
\r\n
"
"paddh $f10, $f10, $f22
\r\n
"
"psrah $f8, $f8, $f20
\r\n
"
"psrah $f10, $f10, $f20
\r\n
"
"packushb $f4, $f8, $f10
\r\n
"
"biadd $f2, $f4
\r\n
"
"mfc1 %0, $f2
\r\n
"
:
"=r"
(
dc2
)
:
"r"
(
src
-
stride
-
1
),
"r"
(
src
-
stride
),
"r"
(
src
-
stride
+
1
),
"r"
(
has_topleft
),
"r"
(
has_topright
)
:
"$8"
,
"$9"
,
"$10"
);
dc1
=
l0
+
l1
+
l2
+
l3
+
l4
+
l5
+
l6
+
l7
;
dc
=
PIXEL_SPLAT_X4
((
dc1
+
dc2
+
8
)
>>
4
);
for
(
y
=
0
;
y
<
8
;
y
++
)
{
AV_WN4PA
(((
uint32_t
*
)
src
)
+
0
,
dc
);
AV_WN4PA
(((
uint32_t
*
)
src
)
+
1
,
dc
);
src
+=
stride
;
}
}
void
ff_pred8x8l_horizontal_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
)
{
const
int
l0
=
((
has_topleft
?
src
[
-
1
+-
1
*
stride
]
:
src
[
-
1
+
0
*
stride
])
+
2
*
src
[
-
1
+
0
*
stride
]
+
src
[
-
1
+
1
*
stride
]
+
2
)
>>
2
;
const
int
l1
=
(
src
[
-
1
+
0
*
stride
]
+
2
*
src
[
-
1
+
1
*
stride
]
+
src
[
-
1
+
2
*
stride
]
+
2
)
>>
2
;
const
int
l2
=
(
src
[
-
1
+
1
*
stride
]
+
2
*
src
[
-
1
+
2
*
stride
]
+
src
[
-
1
+
3
*
stride
]
+
2
)
>>
2
;
const
int
l3
=
(
src
[
-
1
+
2
*
stride
]
+
2
*
src
[
-
1
+
3
*
stride
]
+
src
[
-
1
+
4
*
stride
]
+
2
)
>>
2
;
const
int
l4
=
(
src
[
-
1
+
3
*
stride
]
+
2
*
src
[
-
1
+
4
*
stride
]
+
src
[
-
1
+
5
*
stride
]
+
2
)
>>
2
;
const
int
l5
=
(
src
[
-
1
+
4
*
stride
]
+
2
*
src
[
-
1
+
5
*
stride
]
+
src
[
-
1
+
6
*
stride
]
+
2
)
>>
2
;
const
int
l6
=
(
src
[
-
1
+
5
*
stride
]
+
2
*
src
[
-
1
+
6
*
stride
]
+
src
[
-
1
+
7
*
stride
]
+
2
)
>>
2
;
const
int
l7
=
(
src
[
-
1
+
6
*
stride
]
+
2
*
src
[
-
1
+
7
*
stride
]
+
src
[
-
1
+
7
*
stride
]
+
2
)
>>
2
;
AV_WN4PA
(
src
+
0
*
stride
,
PIXEL_SPLAT_X4
(
l0
));
AV_WN4PA
(
src
+
0
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l0
));
AV_WN4PA
(
src
+
1
*
stride
,
PIXEL_SPLAT_X4
(
l1
));
AV_WN4PA
(
src
+
1
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l1
));
AV_WN4PA
(
src
+
2
*
stride
,
PIXEL_SPLAT_X4
(
l2
));
AV_WN4PA
(
src
+
2
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l2
));
AV_WN4PA
(
src
+
3
*
stride
,
PIXEL_SPLAT_X4
(
l3
));
AV_WN4PA
(
src
+
3
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l3
));
AV_WN4PA
(
src
+
4
*
stride
,
PIXEL_SPLAT_X4
(
l4
));
AV_WN4PA
(
src
+
4
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l4
));
AV_WN4PA
(
src
+
5
*
stride
,
PIXEL_SPLAT_X4
(
l5
));
AV_WN4PA
(
src
+
5
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l5
));
AV_WN4PA
(
src
+
6
*
stride
,
PIXEL_SPLAT_X4
(
l6
));
AV_WN4PA
(
src
+
6
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l6
));
AV_WN4PA
(
src
+
7
*
stride
,
PIXEL_SPLAT_X4
(
l7
));
AV_WN4PA
(
src
+
7
*
stride
+
4
,
PIXEL_SPLAT_X4
(
l7
));
}
void
ff_pred8x8l_vertical_8_mmi
(
uint8_t
*
src
,
int
has_topleft
,
int
has_topright
,
ptrdiff_t
stride
)
{
int
y
;
uint32_t
a
,
b
;
__asm__
volatile
(
"ldl $8, 7(%1)
\r\n
"
"ldr $8, 0(%1)
\r\n
"
"ldl $9, 7(%2)
\r\n
"
"ldr $9, 0(%2)
\r\n
"
"ldl $10, 7(%3)
\r\n
"
"ldr $10, 0(%3)
\r\n
"
"dmtc1 $8, $f2
\r\n
"
"dmtc1 $9, $f4
\r\n
"
"dmtc1 $10, $f6
\r\n
"
"dmtc1 $0, $f0
\r\n
"
"punpcklbh $f8, $f2, $f0
\r\n
"
"punpckhbh $f10, $f2, $f0
\r\n
"
"punpcklbh $f12, $f4, $f0
\r\n
"
"punpckhbh $f14, $f4, $f0
\r\n
"
"punpcklbh $f16, $f6, $f0
\r\n
"
"punpckhbh $f18, $f6, $f0
\r\n
"
"bnez %4, 1f
\r\n
"
"pinsrh_0 $f8, $f8, $f12
\r\n
"
"1:
\r\n
"
"bnez %5, 2f
\r\n
"
"pinsrh_3 $f18, $f18, $f14
\r\n
"
"2:
\r\n
"
"daddiu $8, $0, 2
\r\n
"
"dmtc1 $8, $f20
\r\n
"
"pshufh $f22, $f20, $f0
\r\n
"
"pmullh $f12, $f12, $f22
\r\n
"
"pmullh $f14, $f14, $f22
\r\n
"
"paddh $f8, $f8, $f12
\r\n
"
"paddh $f10, $f10, $f14
\r\n
"
"paddh $f8, $f8, $f16
\r\n
"
"paddh $f10, $f10, $f18
\r\n
"
"paddh $f8, $f8, $f22
\r\n
"
"paddh $f10, $f10, $f22
\r\n
"
"psrah $f8, $f8, $f20
\r\n
"
"psrah $f10, $f10, $f20
\r\n
"
"packushb $f4, $f8, $f10
\r\n
"
"sdc1 $f4, %0
\r\n
"
:
"=m"
(
*
src
)
:
"r"
(
src
-
stride
-
1
),
"r"
(
src
-
stride
),
"r"
(
src
-
stride
+
1
),
"r"
(
has_topleft
),
"r"
(
has_topright
)
:
"$8"
,
"$9"
,
"$10"
);
a
=
AV_RN4PA
(((
uint32_t
*
)
src
)
+
0
);
b
=
AV_RN4PA
(((
uint32_t
*
)
src
)
+
1
);
for
(
y
=
1
;
y
<
8
;
y
++
)
{
AV_WN4PA
(((
uint32_t
*
)(
src
+
y
*
stride
))
+
0
,
a
);
AV_WN4PA
(((
uint32_t
*
)(
src
+
y
*
stride
))
+
1
,
b
);
}
}
void
ff_pred4x4_dc_8_mmi
(
uint8_t
*
src
,
const
uint8_t
*
topright
,
ptrdiff_t
stride
)
{
const
int
dc
=
(
src
[
-
stride
]
+
src
[
1
-
stride
]
+
src
[
2
-
stride
]
+
src
[
3
-
stride
]
+
src
[
-
1
+
0
*
stride
]
+
src
[
-
1
+
1
*
stride
]
+
src
[
-
1
+
2
*
stride
]
+
src
[
-
1
+
3
*
stride
]
+
4
)
>>
3
;
__asm__
volatile
(
".set arch=loongson3a
\r\n
"
"dli $4, 0x01010101010101
\r\n
"
"daddu $2, %2, $0
\r\n
"
"dmul $3, $2, $4
\r\n
"
"xor $4, $4, $4
\r\n
"
"gsswx $3, 0(%0,$4)
\r\n
"
"daddu $4, %1
\r\n
"
"gsswx $3, 0(%0,$4)
\r\n
"
"daddu $4, %1
\r\n
"
"gsswx $3, 0(%0,$4)
\r\n
"
"daddu $4, %1
\r\n
"
"gsswx $3, 0(%0,$4)
\r\n
"
::
"r"
(
src
),
"r"
(
stride
),
"r"
(
dc
)
:
"$2"
,
"$3"
,
"$4"
,
"memory"
);
}
void
ff_pred8x8_vertical_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
"dsubu $2, %0, %1
\r\n
"
"daddu $3, %0, $0
\r\n
"
"ldl $4, 7($2)
\r\n
"
"ldr $4, 0($2)
\r\n
"
"dli $5, 0x8
\r\n
"
"1:
\r\n
"
"sdl $4, 7($3)
\r\n
"
"sdr $4, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $5, -1
\r\n
"
"bnez $5, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"memory"
);
}
void
ff_pred8x8_horizontal_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
".set arch=loongson3a
\r\n
"
"daddiu $2, %0, -1
\r\n
"
"daddu $3, %0, $0
\r\n
"
"dli $6, 0x8
\r\n
"
"dli $7, 0x0101010101010101
\r\n
"
"1:
\r\n
"
"lbu $4, 0($2)
\r\n
"
"dmul $5, $4, $7
\r\n
"
"sdl $5, 7($3)
\r\n
"
"sdr $5, 0($3)
\r\n
"
"daddu $2, %1
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $6, -1
\r\n
"
"bnez $6, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"memory"
);
}
static
void
ff_pred16x16_plane_compat_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
,
const
int
svq3
,
const
int
rv40
)
{
__asm__
volatile
(
"negu $2, %1
\r\n
"
"daddu $3, %0, $2
\r\n
"
"xor $f8, $f8, $f8
\r\n
"
"gslwlc1 $f0, 2($3)
\r\n
"
"gslwrc1 $f0, -1($3)
\r\n
"
"gslwlc1 $f2, 6($3)
\r\n
"
"gslwrc1 $f2, 3($3)
\r\n
"
"gslwlc1 $f4, 11($3)
\r\n
"
"gslwrc1 $f4, 8($3)
\r\n
"
"gslwlc1 $f6, 15($3)
\r\n
"
"gslwrc1 $f6, 12($3)
\r\n
"
"punpcklbh $f0, $f0, $f8
\r\n
"
"punpcklbh $f2, $f2, $f8
\r\n
"
"punpcklbh $f4, $f4, $f8
\r\n
"
"punpcklbh $f6, $f6, $f8
\r\n
"
"dli $4, 0xfffbfffafff9fff8
\r\n
"
"dmtc1 $4, $f20
\r\n
"
"dli $4, 0xfffffffefffdfffc
\r\n
"
"dmtc1 $4, $f22
\r\n
"
"dli $4, 0x0004000300020001
\r\n
"
"dmtc1 $4, $f24
\r\n
"
"dli $4, 0x0008000700060005
\r\n
"
"dmtc1 $4, $f26
\r\n
"
"pmullh $f0, $f0, $f20
\r\n
"
"pmullh $f2, $f2, $f22
\r\n
"
"pmullh $f4, $f4, $f24
\r\n
"
"pmullh $f6, $f6, $f26
\r\n
"
"paddsh $f0, $f0, $f4
\r\n
"
"paddsh $f2, $f2, $f6
\r\n
"
"paddsh $f0, $f0, $f2
\r\n
"
"dli $4, 0xE
\r\n
"
"dmtc1 $4, $f28
\r\n
"
"pshufh $f2, $f0, $f28
\r\n
"
"paddsh $f0, $f0, $f2
\r\n
"
"dli $4, 0x1
\r\n
"
"dmtc1 $4, $f30
\r\n
"
"pshufh $f2, $f0, $f30
\r\n
"
"paddsh $f10, $f0, $f2
\r\n
"
"daddiu $3, %0, -1
\r\n
"
"daddu $3, $2
\r\n
"
"lbu $4, 0($3)
\r\n
"
"lbu $8, 16($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $5, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $6, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $7, 0($3)
\r\n
"
"dsll $5, 16
\r\n
"
"dsll $6, 32
\r\n
"
"dsll $7, 48
\r\n
"
"or $6, $7
\r\n
"
"or $4, $5
\r\n
"
"or $4, $6
\r\n
"
"dmtc1 $4, $f0
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $4, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $5, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $6, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $7, 0($3)
\r\n
"
"dsll $5, 16
\r\n
"
"dsll $6, 32
\r\n
"
"dsll $7, 48
\r\n
"
"or $6, $7
\r\n
"
"or $4, $5
\r\n
"
"or $4, $6
\r\n
"
"dmtc1 $4, $f2
\r\n
"
"daddu $3, %1
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $4, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $5, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $6, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $7, 0($3)
\r\n
"
"dsll $5, 16
\r\n
"
"dsll $6, 32
\r\n
"
"dsll $7, 48
\r\n
"
"or $6, $7
\r\n
"
"or $4, $5
\r\n
"
"or $4, $6
\r\n
"
"dmtc1 $4, $f4
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $4, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $5, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $6, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"lbu $7, 0($3)
\r\n
"
"daddu $8, $7
\r\n
"
"daddiu $8, 1
\r\n
"
"dsll $8, 4
\r\n
"
"dsll $5, 16
\r\n
"
"dsll $6, 32
\r\n
"
"dsll $7, 48
\r\n
"
"or $6, $7
\r\n
"
"or $4, $5
\r\n
"
"or $4, $6
\r\n
"
"dmtc1 $4, $f6
\r\n
"
"pmullh $f0, $f0, $f20
\r\n
"
"pmullh $f2, $f2, $f22
\r\n
"
"pmullh $f4, $f4, $f24
\r\n
"
"pmullh $f6, $f6, $f26
\r\n
"
"paddsh $f0, $f0, $f4
\r\n
"
"paddsh $f2, $f2, $f6
\r\n
"
"paddsh $f0, $f0, $f2
\r\n
"
"pshufh $f2, $f0, $f28
\r\n
"
"paddsh $f0, $f0, $f2
\r\n
"
"pshufh $f2, $f0, $f30
\r\n
"
"paddsh $f12, $f0, $f2
\r\n
"
"dmfc1 $2, $f10
\r\n
"
"dsll $2, 48
\r\n
"
"dsra $2, 48
\r\n
"
"dmfc1 $3, $f12
\r\n
"
"dsll $3, 48
\r\n
"
"dsra $3, 48
\r\n
"
"beqz %2, 1f
\r\n
"
"dli $4, 4
\r\n
"
"ddiv $2, $4
\r\n
"
"ddiv $3, $4
\r\n
"
"dli $4, 5
\r\n
"
"dmul $2, $4
\r\n
"
"dmul $3, $4
\r\n
"
"dli $4, 16
\r\n
"
"ddiv $2, $4
\r\n
"
"ddiv $3, $4
\r\n
"
"daddu $4, $2, $0
\r\n
"
"daddu $2, $3, $0
\r\n
"
"daddu $3, $4, $0
\r\n
"
"b 2f
\r\n
"
"1:
\r\n
"
"beqz %3, 1f
\r\n
"
"dsra $4, $2, 2
\r\n
"
"daddu $2, $4
\r\n
"
"dsra $4, $3, 2
\r\n
"
"daddu $3, $4
\r\n
"
"dsra $2, 4
\r\n
"
"dsra $3, 4
\r\n
"
"b 2f
\r\n
"
"1:
\r\n
"
"dli $4, 5
\r\n
"
"dmul $2, $4
\r\n
"
"dmul $3, $4
\r\n
"
"daddiu $2, 32
\r\n
"
"daddiu $3, 32
\r\n
"
"dsra $2, 6
\r\n
"
"dsra $3, 6
\r\n
"
"2:
\r\n
"
"daddu $5, $2, $3
\r\n
"
"dli $4, 7
\r\n
"
"dmul $5, $4
\r\n
"
"dsubu $8, $5
\r\n
"
"dmtc1 $0, $f8
\r\n
"
"dmtc1 $2, $f0
\r\n
"
"pshufh $f0, $f0, $f8
\r\n
"
"dmtc1 $3, $f10
\r\n
"
"pshufh $f10, $f10, $f8
\r\n
"
"dmtc1 $8, $f12
\r\n
"
"pshufh $f12, $f12, $f8
\r\n
"
"dli $4, 5
\r\n
"
"dmtc1 $4, $f14
\r\n
"
"dli $4, 0x0003000200010000
\r\n
"
"dmtc1 $4, $f2
\r\n
"
"pmullh $f2, $f2, $f0
\r\n
"
"dli $4, 0x0007000600050004
\r\n
"
"dmtc1 $4, $f4
\r\n
"
"pmullh $f4, $f4, $f0
\r\n
"
"dli $4, 0x000b000a00090008
\r\n
"
"dmtc1 $4, $f6
\r\n
"
"pmullh $f6, $f6, $f0
\r\n
"
"dli $4, 0x000f000e000d000c
\r\n
"
"dmtc1 $4, $f8
\r\n
"
"pmullh $f8, $f8, $f0
\r\n
"
"daddu $3, %0, $0
\r\n
"
"dli $2, 16
\r\n
"
"1:
\r\n
"
"paddsh $f16, $f2, $f12
\r\n
"
"psrah $f16, $f16, $f14
\r\n
"
"paddsh $f18, $f4, $f12
\r\n
"
"psrah $f18, $f18, $f14
\r\n
"
"packushb $f20, $f16, $f18
\r\n
"
"gssdlc1 $f20, 7($3)
\r\n
"
"gssdrc1 $f20, 0($3)
\r\n
"
"paddsh $f16, $f6, $f12
\r\n
"
"psrah $f16, $f16, $f14
\r\n
"
"paddsh $f18, $f8, $f12
\r\n
"
"psrah $f18, $f18, $f14
\r\n
"
"packushb $f20, $f16, $f18
\r\n
"
"gssdlc1 $f20, 15($3)
\r\n
"
"gssdrc1 $f20, 8($3)
\r\n
"
"paddsh $f12, $f12, $f10
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $2, -1
\r\n
"
"bnez $2, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
),
"r"
(
svq3
),
"r"
(
rv40
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"$7"
,
"$8"
,
"memory"
);
}
void
ff_pred16x16_plane_svq3_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
ff_pred16x16_plane_compat_8_mmi
(
src
,
stride
,
1
,
0
);
}
void
ff_pred16x16_plane_rv40_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
ff_pred16x16_plane_compat_8_mmi
(
src
,
stride
,
0
,
1
);
}
void
ff_pred16x16_plane_h264_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
ff_pred16x16_plane_compat_8_mmi
(
src
,
stride
,
0
,
0
);
}
void
ff_pred8x8_top_dc_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
"dli $2, 2
\r\n
"
"xor $f0, $f0, $f0
\r\n
"
"xor $f2, $f2, $f2
\r\n
"
"xor $f30, $f30, $f30
\r\n
"
"negu $3, %1
\r\n
"
"daddu $3, $3, %0
\r\n
"
"gsldlc1 $f4, 7($3)
\r\n
"
"gsldrc1 $f4, 0($3)
\r\n
"
"punpcklbh $f0, $f4, $f30
\r\n
"
"punpckhbh $f2, $f4, $f30
\r\n
"
"biadd $f0, $f0
\r\n
"
"biadd $f2, $f2
\r\n
"
"pshufh $f0, $f0, $f30
\r\n
"
"pshufh $f2, $f2, $f30
\r\n
"
"dmtc1 $2, $f4
\r\n
"
"pshufh $f4, $f4, $f30
\r\n
"
"paddush $f0, $f0, $f4
\r\n
"
"paddush $f2, $f2, $f4
\r\n
"
"dmtc1 $2, $f4
\r\n
"
"psrlh $f0, $f0, $f4
\r\n
"
"psrlh $f2, $f2, $f4
\r\n
"
"packushb $f4, $f0, $f2
\r\n
"
"dli $2, 8
\r\n
"
"1:
\r\n
"
"gssdlc1 $f4, 7(%0)
\r\n
"
"gssdrc1 $f4, 0(%0)
\r\n
"
"daddu %0, %0, %1
\r\n
"
"daddiu $2, $2, -1
\r\n
"
"bnez $2, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"memory"
);
}
void
ff_pred8x8_dc_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
"negu $2, %1
\r\n
"
"daddu $2, $2, %0
\r\n
"
"daddiu $5, $2, 4
\r\n
"
"lbu $6, 0($2)
\r\n
"
"daddu $3, $0, $6
\r\n
"
"daddiu $2, 1
\r\n
"
"lbu $6, 0($5)
\r\n
"
"daddu $4, $0, $6
\r\n
"
"daddiu $5, 1
\r\n
"
"lbu $6, 0($2)
\r\n
"
"daddu $3, $3, $6
\r\n
"
"daddiu $2, 1
\r\n
"
"lbu $6, 0($5)
\r\n
"
"daddu $4, $4, $6
\r\n
"
"daddiu $5, 1
\r\n
"
"lbu $6, 0($2)
\r\n
"
"daddu $3, $3, $6
\r\n
"
"daddiu $2, 1
\r\n
"
"lbu $6, 0($5)
\r\n
"
"daddu $4, $4, $6
\r\n
"
"daddiu $5, 1
\r\n
"
"lbu $6, 0($2)
\r\n
"
"daddu $3, $3, $6
\r\n
"
"daddiu $2, 1
\r\n
"
"lbu $6, 0($5)
\r\n
"
"daddu $4, $4, $6
\r\n
"
"daddiu $5, 1
\r\n
"
"dli $6, -1
\r\n
"
"daddu $6, $6, %0
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $7, $0, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $7, $7, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $7, $7, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $7, $7, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $8, $0, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $8, $8, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $8, $8, $5
\r\n
"
"daddu $6, $6, %1
\r\n
"
"lbu $5, 0($6)
\r\n
"
"daddu $8, $8, $5
\r\n
"
"daddu $3, $3, $7
\r\n
"
"daddiu $3, $3, 4
\r\n
"
"daddiu $4, $4, 2
\r\n
"
"daddiu $5, $8, 2
\r\n
"
"daddu $6, $4, $5
\r\n
"
"dsrl $3, 3
\r\n
"
"dsrl $4, 2
\r\n
"
"dsrl $5, 2
\r\n
"
"dsrl $6, 3
\r\n
"
"xor $f30, $f30, $f30
\r\n
"
"dmtc1 $3, $f0
\r\n
"
"pshufh $f0, $f0, $f30
\r\n
"
"dmtc1 $4, $f2
\r\n
"
"pshufh $f2, $f2, $f30
\r\n
"
"dmtc1 $5, $f4
\r\n
"
"pshufh $f4, $f4, $f30
\r\n
"
"dmtc1 $6, $f6
\r\n
"
"pshufh $f6, $f6, $f30
\r\n
"
"packushb $f0, $f0, $f2
\r\n
"
"packushb $f2, $f4, $f6
\r\n
"
"daddu $2, $0, %0
\r\n
"
"sdc1 $f0, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f0, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f0, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f0, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f2, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f2, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f2, 0($2)
\r\n
"
"daddu $2, $2, %1
\r\n
"
"sdc1 $f2, 0($2)
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"$7"
,
"$8"
,
"memory"
);
}
void
ff_pred8x16_vertical_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
"dsubu $2, %0, %1
\r\n
"
"daddu $3, %0, $0
\r\n
"
"ldl $4, 7($2)
\r\n
"
"ldr $4, 0($2)
\r\n
"
"dli $5, 0x10
\r\n
"
"1:
\r\n
"
"sdl $4, 7($3)
\r\n
"
"sdr $4, 0($3)
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $5, -1
\r\n
"
"bnez $5, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"memory"
);
}
void
ff_pred8x16_horizontal_8_mmi
(
uint8_t
*
src
,
ptrdiff_t
stride
)
{
__asm__
volatile
(
".set arch=loongson3a
\r\n
"
"daddiu $2, %0, -1
\r\n
"
"daddu $3, %0, $0
\r\n
"
"dli $6, 0x10
\r\n
"
"dli $7, 0x0101010101010101
\r\n
"
"1:
\r\n
"
"lbu $4, 0($2)
\r\n
"
"dmul $5, $4, $7
\r\n
"
"sdl $5, 7($3)
\r\n
"
"sdr $5, 0($3)
\r\n
"
"daddu $2, %1
\r\n
"
"daddu $3, %1
\r\n
"
"daddiu $6, -1
\r\n
"
"bnez $6, 1b
\r\n
"
::
"r"
(
src
),
"r"
(
stride
)
:
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$6"
,
"memory"
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment