intmath.h 3.09 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
 *
4
 * This file is part of Libav.
5
 *
6
 * Libav is free software; you can redistribute it and/or
7 8 9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
11
 * Libav is distributed in the hope that it will be useful,
12 13 14 15 16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with Libav; if not, write to the Free Software
18 19 20 21 22 23
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#ifndef AVUTIL_ARM_INTMATH_H
#define AVUTIL_ARM_INTMATH_H

24 25
#include <stdint.h>

26
#include "config.h"
27
#include "libavutil/attributes.h"
28 29 30 31

#if HAVE_INLINE_ASM

#if HAVE_ARMV6
32 33

#define FASTDIV FASTDIV
34
static av_always_inline av_const int FASTDIV(int a, int b)
35
{
Mans Rullgard's avatar
Mans Rullgard committed
36 37 38
    int r;
    __asm__ ("cmp     %2, #2               \n\t"
             "ldr     %0, [%3, %2, lsl #2] \n\t"
39
             "ite     le                   \n\t"
Mans Rullgard's avatar
Mans Rullgard committed
40 41 42
             "lsrle   %0, %1, #1           \n\t"
             "smmulgt %0, %0, %1           \n\t"
             : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
43 44
    return r;
}
45

46
#define av_clip_uint8 av_clip_uint8_arm
47
static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
48 49
{
    unsigned x;
50
    __asm__ ("usat %0, #8,  %1" : "=r"(x) : "r"(a));
51 52 53 54
    return x;
}

#define av_clip_int8 av_clip_int8_arm
55
static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
56 57
{
    unsigned x;
58
    __asm__ ("ssat %0, #8,  %1" : "=r"(x) : "r"(a));
59 60 61 62
    return x;
}

#define av_clip_uint16 av_clip_uint16_arm
63
static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
64 65
{
    unsigned x;
66
    __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
67 68 69 70
    return x;
}

#define av_clip_int16 av_clip_int16_arm
71
static av_always_inline av_const int16_t av_clip_int16_arm(int a)
72 73
{
    int x;
74
    __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
75 76 77
    return x;
}

78 79 80 81 82 83 84 85 86
#define av_clip_uintp2 av_clip_uintp2_arm
static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
{
    unsigned x;
    __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
    return x;
}


87 88 89
#else /* HAVE_ARMV6 */

#define FASTDIV FASTDIV
90
static av_always_inline av_const int FASTDIV(int a, int b)
91 92
{
    int r, t;
93 94
    __asm__ ("umull %1, %0, %2, %3"
             : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
95 96 97
    return r;
}

98
#endif /* HAVE_ARMV6 */
99

100
#define av_clipl_int32 av_clipl_int32_arm
101
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
102 103
{
    int x, y;
104
    __asm__ ("adds   %1, %R2, %Q2, lsr #31  \n\t"
105
             "itet   ne                     \n\t"
106 107 108 109
             "mvnne  %1, #1<<31             \n\t"
             "moveq  %0, %Q2                \n\t"
             "eorne  %0, %1,  %R2, asr #31  \n\t"
             : "=r"(x), "=&r"(y) : "r"(a));
110 111 112
    return x;
}

113 114 115
#endif /* HAVE_INLINE_ASM */

#endif /* AVUTIL_ARM_INTMATH_H */