cpu.c 9.49 KB
Newer Older
1 2 3 4 5
/*
 * CPU detection code, extracted from mmx.h
 * (c)1997-99 by H. Dietz and R. Fisher
 * Converted to C and improved by Fabrice Bellard.
 *
6 7 8
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14 15 16 17 18
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20 21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */
Fabrice Bellard's avatar
Fabrice Bellard committed
22 23

#include <stdlib.h>
24
#include <string.h>
25

26
#include "libavutil/x86/asm.h"
27
#include "libavutil/x86/cpu.h"
28
#include "libavutil/cpu.h"
29
#include "libavutil/cpu_internal.h"
30

31
#if HAVE_X86ASM
32 33 34 35 36 37 38 39 40

#define cpuid(index, eax, ebx, ecx, edx)        \
    ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)

#define xgetbv(index, eax, edx)                 \
    ff_cpu_xgetbv(index, &eax, &edx)

#elif HAVE_INLINE_ASM

41
/* ebx saving is necessary for PIC. gcc seems unable to see it alone */
42 43
#define cpuid(index, eax, ebx, ecx, edx)                        \
    __asm__ volatile (                                          \
44
        "mov    %%"FF_REG_b", %%"FF_REG_S" \n\t"                \
45
        "cpuid                       \n\t"                      \
46
        "xchg   %%"FF_REG_b", %%"FF_REG_S                       \
47
        : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx)        \
48
        : "0" (index), "2"(0))
49 50

#define xgetbv(index, eax, edx)                                 \
51
    __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
52

53 54 55 56 57 58 59 60 61 62
#define get_eflags(x)                           \
    __asm__ volatile ("pushfl     \n"           \
                      "pop    %0  \n"           \
                      : "=r"(x))

#define set_eflags(x)                           \
    __asm__ volatile ("push    %0 \n"           \
                      "popfl      \n"           \
                      :: "r"(x))

63 64
#endif /* HAVE_INLINE_ASM */

65 66 67
#if ARCH_X86_64

#define cpuid_test() 1
68

69
#elif HAVE_X86ASM
70 71 72

#define cpuid_test ff_cpu_cpuid_test

73
#elif HAVE_INLINE_ASM
74 75 76

static int cpuid_test(void)
{
77
    x86_reg a, c;
78

79 80 81 82 83
    /* Check if CPUID is supported by attempting to toggle the ID bit in
     * the EFLAGS register. */
    get_eflags(a);
    set_eflags(a ^ 0x200000);
    get_eflags(c);
84

85 86
    return a != c;
}
87
#endif
Fabrice Bellard's avatar
Fabrice Bellard committed
88

89 90 91 92
/* Function to test if multimedia instructions are supported...  */
int ff_get_cpu_flags_x86(void)
{
    int rval = 0;
93 94 95

#ifdef cpuid

96 97 98 99
    int eax, ebx, ecx, edx;
    int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
    int family = 0, model = 0;
    union { int i[3]; char c[12]; } vendor;
100
    int xcr0_lo = 0, xcr0_hi = 0;
101 102 103 104

    if (!cpuid_test())
        return 0; /* CPUID not supported */

105
    cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
106

107
    if (max_std_level >= 1) {
108
        cpuid(1, eax, ebx, ecx, std_caps);
109 110
        family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
        model  = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
111
        if (std_caps & (1 << 15))
112
            rval |= AV_CPU_FLAG_CMOV;
113
        if (std_caps & (1 << 23))
114
            rval |= AV_CPU_FLAG_MMX;
115
        if (std_caps & (1 << 25))
116
            rval |= AV_CPU_FLAG_MMXEXT;
117
#if HAVE_SSE
118 119 120
        if (std_caps & (1 << 25))
            rval |= AV_CPU_FLAG_SSE;
        if (std_caps & (1 << 26))
121
            rval |= AV_CPU_FLAG_SSE2;
122
        if (ecx & 1)
123
            rval |= AV_CPU_FLAG_SSE3;
124
        if (ecx & 0x00000200 )
125
            rval |= AV_CPU_FLAG_SSSE3;
126
        if (ecx & 0x00080000 )
127
            rval |= AV_CPU_FLAG_SSE4;
128
        if (ecx & 0x00100000 )
129
            rval |= AV_CPU_FLAG_SSE42;
130
        if (ecx & 0x02000000 )
131
            rval |= AV_CPU_FLAG_AESNI;
132 133 134 135
#if HAVE_AVX
        /* Check OXSAVE and AVX bits */
        if ((ecx & 0x18000000) == 0x18000000) {
            /* Check for OS support */
136 137
            xgetbv(0, xcr0_lo, xcr0_hi);
            if ((xcr0_lo & 0x6) == 0x6) {
138
                rval |= AV_CPU_FLAG_AVX;
139
                if (ecx & 0x00001000)
140 141
                    rval |= AV_CPU_FLAG_FMA3;
            }
142
        }
143 144
#endif /* HAVE_AVX */
#endif /* HAVE_SSE */
145
    }
146
    if (max_std_level >= 7) {
147
        cpuid(7, eax, ebx, ecx, edx);
148
#if HAVE_AVX2
149
        if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
150
            rval |= AV_CPU_FLAG_AVX2;
151 152 153 154 155 156 157
#if HAVE_AVX512 /* F, CD, BW, DQ, VL */
        if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
            if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000)
                rval |= AV_CPU_FLAG_AVX512;

        }
#endif /* HAVE_AVX512 */
158
#endif /* HAVE_AVX2 */
159
        /* BMI1/2 don't need OS support */
160
        if (ebx & 0x00000008) {
161
            rval |= AV_CPU_FLAG_BMI1;
162
            if (ebx & 0x00000100)
163 164 165
                rval |= AV_CPU_FLAG_BMI2;
        }
    }
166 167 168

    cpuid(0x80000000, max_ext_level, ebx, ecx, edx);

169
    if (max_ext_level >= 0x80000001) {
170
        cpuid(0x80000001, eax, ebx, ecx, ext_caps);
171
        if (ext_caps & (1U << 31))
172
            rval |= AV_CPU_FLAG_3DNOW;
173
        if (ext_caps & (1 << 30))
174
            rval |= AV_CPU_FLAG_3DNOWEXT;
175
        if (ext_caps & (1 << 23))
176
            rval |= AV_CPU_FLAG_MMX;
177
        if (ext_caps & (1 << 22))
178
            rval |= AV_CPU_FLAG_MMXEXT;
179

180
        if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
181 182 183 184 185 186 187
        /* Allow for selectively disabling SSE2 functions on AMD processors
           with SSE2 support but not SSE4a. This includes Athlon64, some
           Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
           than SSE2 often enough to utilize this special-case flag.
           AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
           so that SSE2 is used unless explicitly disabled by checking
           AV_CPU_FLAG_SSE2SLOW. */
188 189 190 191 192
            if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
                rval |= AV_CPU_FLAG_SSE2SLOW;

        /* Similar to the above but for AVX functions on AMD processors.
           This is necessary only for functions using YMM registers on Bulldozer
193
           and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
194
           functions using XMM registers are always faster on them.
195
           AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
196 197
           used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
            if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
198
                rval |= AV_CPU_FLAG_AVXSLOW;
199
        }
200 201 202 203 204 205 206 207 208

        /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
         * used unless the OS has AVX support. */
        if (rval & AV_CPU_FLAG_AVX) {
            if (ecx & 0x00000800)
                rval |= AV_CPU_FLAG_XOP;
            if (ecx & 0x00010000)
                rval |= AV_CPU_FLAG_FMA4;
        }
Fabrice Bellard's avatar
Fabrice Bellard committed
209
    }
210

211 212
    if (!strncmp(vendor.c, "GenuineIntel", 12)) {
        if (family == 6 && (model == 9 || model == 13 || model == 14)) {
213 214 215 216 217 218 219 220 221 222 223
            /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
             * 6/14 (core1 "yonah") theoretically support sse2, but it's
             * usually slower than mmx, so let's just pretend they don't.
             * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
             * enabled so that SSE2 is not used unless explicitly enabled
             * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
             * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
            if (rval & AV_CPU_FLAG_SSE2)
                rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
            if (rval & AV_CPU_FLAG_SSE3)
                rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
224 225 226 227 228 229 230 231
        }
        /* The Atom processor has SSSE3 support, which is useful in many cases,
         * but sometimes the SSSE3 version is slower than the SSE2 equivalent
         * on the Atom, but is generally faster on other processors supporting
         * SSSE3. This flag allows for selectively disabling certain SSSE3
         * functions on the Atom. */
        if (family == 6 && model == 28)
            rval |= AV_CPU_FLAG_ATOM;
232 233 234 235 236 237

        /* Conroe has a slow shuffle unit. Check the model number to ensure not
         * to include crippled low-end Penryns and Nehalems that lack SSE4. */
        if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
            family == 6 && model < 23)
            rval |= AV_CPU_FLAG_SSSE3SLOW;
238 239
    }

240 241
#endif /* cpuid */

242
    return rval;
Fabrice Bellard's avatar
Fabrice Bellard committed
243
}
244 245 246 247 248

size_t ff_get_cpu_max_align_x86(void)
{
    int flags = av_get_cpu_flags();

249 250
    if (flags & AV_CPU_FLAG_AVX512)
        return 64;
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    if (flags & (AV_CPU_FLAG_AVX2      |
                 AV_CPU_FLAG_AVX       |
                 AV_CPU_FLAG_XOP       |
                 AV_CPU_FLAG_FMA4      |
                 AV_CPU_FLAG_FMA3      |
                 AV_CPU_FLAG_AVXSLOW))
        return 32;
    if (flags & (AV_CPU_FLAG_AESNI     |
                 AV_CPU_FLAG_SSE42     |
                 AV_CPU_FLAG_SSE4      |
                 AV_CPU_FLAG_SSSE3     |
                 AV_CPU_FLAG_SSE3      |
                 AV_CPU_FLAG_SSE2      |
                 AV_CPU_FLAG_SSE       |
                 AV_CPU_FLAG_ATOM      |
                 AV_CPU_FLAG_SSSE3SLOW |
                 AV_CPU_FLAG_SSE3SLOW  |
                 AV_CPU_FLAG_SSE2SLOW))
        return 16;

    return 8;
}