cpu.c 8.02 KB
Newer Older
1 2 3 4 5
/*
 * CPU detection code, extracted from mmx.h
 * (c)1997-99 by H. Dietz and R. Fisher
 * Converted to C and improved by Fabrice Bellard.
 *
6 7 8
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14 15 16 17 18
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20 21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */
Fabrice Bellard's avatar
Fabrice Bellard committed
22 23

#include <stdlib.h>
24
#include <string.h>
25

26
#include "libavutil/x86/asm.h"
27
#include "libavutil/x86/cpu.h"
28
#include "libavutil/cpu.h"
29
#include "libavutil/cpu_internal.h"
30

31 32 33 34 35 36 37 38 39 40
#if HAVE_YASM

#define cpuid(index, eax, ebx, ecx, edx)        \
    ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)

#define xgetbv(index, eax, edx)                 \
    ff_cpu_xgetbv(index, &eax, &edx)

#elif HAVE_INLINE_ASM

41
/* ebx saving is necessary for PIC. gcc seems unable to see it alone */
42 43
#define cpuid(index, eax, ebx, ecx, edx)                        \
    __asm__ volatile (                                          \
44
        "mov    %%"FF_REG_b", %%"FF_REG_S" \n\t"                \
45
        "cpuid                       \n\t"                      \
46
        "xchg   %%"FF_REG_b", %%"FF_REG_S                       \
47
        : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx)        \
48
        : "0" (index), "2"(0))
49 50

#define xgetbv(index, eax, edx)                                 \
51
    __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
52

53 54 55 56 57 58 59 60 61 62
#define get_eflags(x)                           \
    __asm__ volatile ("pushfl     \n"           \
                      "pop    %0  \n"           \
                      : "=r"(x))

#define set_eflags(x)                           \
    __asm__ volatile ("push    %0 \n"           \
                      "popfl      \n"           \
                      :: "r"(x))

63 64
#endif /* HAVE_INLINE_ASM */

65 66 67
#if ARCH_X86_64

#define cpuid_test() 1
68

69 70 71 72
#elif HAVE_YASM

#define cpuid_test ff_cpu_cpuid_test

73
#elif HAVE_INLINE_ASM
74 75 76

static int cpuid_test(void)
{
77
    x86_reg a, c;
78

79 80 81 82 83
    /* Check if CPUID is supported by attempting to toggle the ID bit in
     * the EFLAGS register. */
    get_eflags(a);
    set_eflags(a ^ 0x200000);
    get_eflags(c);
84

85 86
    return a != c;
}
87
#endif
Fabrice Bellard's avatar
Fabrice Bellard committed
88

89 90 91 92
/* Function to test if multimedia instructions are supported...  */
int ff_get_cpu_flags_x86(void)
{
    int rval = 0;
93 94 95

#ifdef cpuid

96 97 98 99 100 101 102 103
    int eax, ebx, ecx, edx;
    int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
    int family = 0, model = 0;
    union { int i[3]; char c[12]; } vendor;

    if (!cpuid_test())
        return 0; /* CPUID not supported */

104
    cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
105

106
    if (max_std_level >= 1) {
107
        cpuid(1, eax, ebx, ecx, std_caps);
108 109
        family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
        model  = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
110
        if (std_caps & (1 << 15))
111
            rval |= AV_CPU_FLAG_CMOV;
112
        if (std_caps & (1 << 23))
113
            rval |= AV_CPU_FLAG_MMX;
114
        if (std_caps & (1 << 25))
115
            rval |= AV_CPU_FLAG_MMXEXT;
116
#if HAVE_SSE
117 118 119
        if (std_caps & (1 << 25))
            rval |= AV_CPU_FLAG_SSE;
        if (std_caps & (1 << 26))
120
            rval |= AV_CPU_FLAG_SSE2;
121
        if (ecx & 1)
122
            rval |= AV_CPU_FLAG_SSE3;
123
        if (ecx & 0x00000200 )
124
            rval |= AV_CPU_FLAG_SSSE3;
125
        if (ecx & 0x00080000 )
126
            rval |= AV_CPU_FLAG_SSE4;
127
        if (ecx & 0x00100000 )
128
            rval |= AV_CPU_FLAG_SSE42;
129 130
        if (ecx & 0x01000000 )
            rval |= AV_CPU_FLAG_AESNI;
131 132 133 134 135
#if HAVE_AVX
        /* Check OXSAVE and AVX bits */
        if ((ecx & 0x18000000) == 0x18000000) {
            /* Check for OS support */
            xgetbv(0, eax, edx);
136
            if ((eax & 0x6) == 0x6) {
137
                rval |= AV_CPU_FLAG_AVX;
138
                if (ecx & 0x00001000)
139 140
                    rval |= AV_CPU_FLAG_FMA3;
            }
141
        }
142 143
#endif /* HAVE_AVX */
#endif /* HAVE_SSE */
144
    }
145
    if (max_std_level >= 7) {
146
        cpuid(7, eax, ebx, ecx, edx);
147
#if HAVE_AVX2
148
        if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
149
            rval |= AV_CPU_FLAG_AVX2;
150
#endif /* HAVE_AVX2 */
151
        /* BMI1/2 don't need OS support */
152
        if (ebx & 0x00000008) {
153
            rval |= AV_CPU_FLAG_BMI1;
154
            if (ebx & 0x00000100)
155 156 157
                rval |= AV_CPU_FLAG_BMI2;
        }
    }
158 159 160

    cpuid(0x80000000, max_ext_level, ebx, ecx, edx);

161
    if (max_ext_level >= 0x80000001) {
162
        cpuid(0x80000001, eax, ebx, ecx, ext_caps);
163
        if (ext_caps & (1U << 31))
164
            rval |= AV_CPU_FLAG_3DNOW;
165
        if (ext_caps & (1 << 30))
166
            rval |= AV_CPU_FLAG_3DNOWEXT;
167
        if (ext_caps & (1 << 23))
168
            rval |= AV_CPU_FLAG_MMX;
169
        if (ext_caps & (1 << 22))
170
            rval |= AV_CPU_FLAG_MMXEXT;
171

172
        if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
173 174 175 176 177 178 179
        /* Allow for selectively disabling SSE2 functions on AMD processors
           with SSE2 support but not SSE4a. This includes Athlon64, some
           Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
           than SSE2 often enough to utilize this special-case flag.
           AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
           so that SSE2 is used unless explicitly disabled by checking
           AV_CPU_FLAG_SSE2SLOW. */
180 181 182 183 184
            if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
                rval |= AV_CPU_FLAG_SSE2SLOW;

        /* Similar to the above but for AVX functions on AMD processors.
           This is necessary only for functions using YMM registers on Bulldozer
185
           and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
186
           functions using XMM registers are always faster on them.
187
           AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
188 189
           used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
            if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
190
                rval |= AV_CPU_FLAG_AVXSLOW;
191
        }
192 193 194 195 196 197 198 199 200

        /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
         * used unless the OS has AVX support. */
        if (rval & AV_CPU_FLAG_AVX) {
            if (ecx & 0x00000800)
                rval |= AV_CPU_FLAG_XOP;
            if (ecx & 0x00010000)
                rval |= AV_CPU_FLAG_FMA4;
        }
Fabrice Bellard's avatar
Fabrice Bellard committed
201
    }
202

203 204
    if (!strncmp(vendor.c, "GenuineIntel", 12)) {
        if (family == 6 && (model == 9 || model == 13 || model == 14)) {
205 206 207 208 209 210 211 212 213 214 215
            /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
             * 6/14 (core1 "yonah") theoretically support sse2, but it's
             * usually slower than mmx, so let's just pretend they don't.
             * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
             * enabled so that SSE2 is not used unless explicitly enabled
             * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
             * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
            if (rval & AV_CPU_FLAG_SSE2)
                rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
            if (rval & AV_CPU_FLAG_SSE3)
                rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
216 217 218 219 220 221 222 223
        }
        /* The Atom processor has SSSE3 support, which is useful in many cases,
         * but sometimes the SSSE3 version is slower than the SSE2 equivalent
         * on the Atom, but is generally faster on other processors supporting
         * SSSE3. This flag allows for selectively disabling certain SSSE3
         * functions on the Atom. */
        if (family == 6 && model == 28)
            rval |= AV_CPU_FLAG_ATOM;
224 225
    }

226 227
#endif /* cpuid */

228
    return rval;
Fabrice Bellard's avatar
Fabrice Bellard committed
229
}