aes.c 8.17 KB
Newer Older
1
/*
2
 * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
3
 *
4 5
 * some optimization ideas from aes128.c by Reimar Doeffinger
 *
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "common.h"
#include "aes.h"
25
#include "aes_internal.h"
26
#include "intreadwrite.h"
27
#include "timer.h"
28

29
const int av_aes_size= sizeof(AVAES);
30 31 32 33 34

struct AVAES *av_aes_alloc(void)
{
    return av_mallocz(sizeof(struct AVAES));
}
35

Michael Niedermayer's avatar
Michael Niedermayer committed
36
static const uint8_t rcon[10] = {
37
    0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
38 39 40 41
};

static uint8_t     sbox[256];
static uint8_t inv_sbox[256];
42
#if CONFIG_SMALL
43 44 45 46 47 48
static uint32_t enc_multbl[1][256];
static uint32_t dec_multbl[1][256];
#else
static uint32_t enc_multbl[4][256];
static uint32_t dec_multbl[4][256];
#endif
49

50
#if HAVE_BIGENDIAN
51
#   define ROT(x, s) (((x) >> (s)) | ((x) << (32-(s))))
52
#else
53
#   define ROT(x, s) (((x) << (s)) | ((x) >> (32-(s))))
54
#endif
55

56 57 58
static inline void addkey(av_aes_block *dst, const av_aes_block *src,
                          const av_aes_block *round_key)
{
59 60
    dst->u64[0] = src->u64[0] ^ round_key->u64[0];
    dst->u64[1] = src->u64[1] ^ round_key->u64[1];
61 62
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76
static inline void addkey_s(av_aes_block *dst, const uint8_t *src,
                            const av_aes_block *round_key)
{
    dst->u64[0] = AV_RN64(src)     ^ round_key->u64[0];
    dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1];
}

static inline void addkey_d(uint8_t *dst, const av_aes_block *src,
                            const av_aes_block *round_key)
{
    AV_WN64(dst,     src->u64[0] ^ round_key->u64[0]);
    AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]);
}

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static void subshift(av_aes_block s0[2], int s, const uint8_t *box)
{
    av_aes_block *s1 = (av_aes_block *) (s0[0].u8 - s);
    av_aes_block *s3 = (av_aes_block *) (s0[0].u8 + s);

    s0[0].u8[ 0] = box[s0[1].u8[ 0]];
    s0[0].u8[ 4] = box[s0[1].u8[ 4]];
    s0[0].u8[ 8] = box[s0[1].u8[ 8]];
    s0[0].u8[12] = box[s0[1].u8[12]];
    s1[0].u8[ 3] = box[s1[1].u8[ 7]];
    s1[0].u8[ 7] = box[s1[1].u8[11]];
    s1[0].u8[11] = box[s1[1].u8[15]];
    s1[0].u8[15] = box[s1[1].u8[ 3]];
    s0[0].u8[ 2] = box[s0[1].u8[10]];
    s0[0].u8[10] = box[s0[1].u8[ 2]];
    s0[0].u8[ 6] = box[s0[1].u8[14]];
    s0[0].u8[14] = box[s0[1].u8[ 6]];
    s3[0].u8[ 1] = box[s3[1].u8[13]];
    s3[0].u8[13] = box[s3[1].u8[ 9]];
    s3[0].u8[ 9] = box[s3[1].u8[ 5]];
    s3[0].u8[ 5] = box[s3[1].u8[ 1]];
98
}
99

100 101
static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d)
{
102
#if CONFIG_SMALL
103
    return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24);
104
#else
105
    return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d];
106
#endif
107
}
108

109 110
static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, int s3)
{
111
    uint8_t (*src)[4] = state[1].u8x4;
112 113 114 115
    state[0].u32[0] = mix_core(multbl, src[0][0], src[s1    ][1], src[2][2], src[s3    ][3]);
    state[0].u32[1] = mix_core(multbl, src[1][0], src[s3 - 1][1], src[3][2], src[s1 - 1][3]);
    state[0].u32[2] = mix_core(multbl, src[2][0], src[s3    ][1], src[0][2], src[s1    ][3]);
    state[0].u32[3] = mix_core(multbl, src[3][0], src[s1 - 1][1], src[1][2], src[s3 - 1][3]);
116
}
117

118
static inline void aes_crypt(AVAES *a, int s, const uint8_t *sbox,
119 120
                         uint32_t multbl[][256])
{
Michael Niedermayer's avatar
Michael Niedermayer committed
121
    int r;
122

123 124
    for (r = a->rounds - 1; r > 0; r--) {
        mix(a->state, multbl, 3 - s, 1 + s);
125
        addkey(&a->state[1], &a->state[0], &a->round_key[r]);
126
    }
127

128
    subshift(&a->state[0], s, sbox);
129 130
}

131 132
static void aes_encrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
                        int count, uint8_t *iv, int rounds)
133 134
{
    while (count--) {
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
        addkey_s(&a->state[1], src, &a->round_key[rounds]);
        if (iv)
            addkey_s(&a->state[1], iv, &a->state[1]);
        aes_crypt(a, 2, sbox, enc_multbl);
        addkey_d(dst, &a->state[0], &a->round_key[0]);
        if (iv)
            memcpy(iv, dst, 16);
        src += 16;
        dst += 16;
    }
}

static void aes_decrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
                        int count, uint8_t *iv, int rounds)
{
    while (count--) {
        addkey_s(&a->state[1], src, &a->round_key[rounds]);
        aes_crypt(a, 0, inv_sbox, dec_multbl);
        if (iv) {
            addkey_s(&a->state[0], iv, &a->state[0]);
            memcpy(iv, src, 16);
Michael Niedermayer's avatar
Michael Niedermayer committed
156
        }
157
        addkey_d(dst, &a->state[0], &a->round_key[0]);
158 159
        src += 16;
        dst += 16;
Michael Niedermayer's avatar
Michael Niedermayer committed
160
    }
Michael Niedermayer's avatar
Michael Niedermayer committed
161 162
}

163 164 165 166 167 168
void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src,
                  int count, uint8_t *iv, int decrypt)
{
    a->crypt(a, dst, src, count, iv, a->rounds);
}

169
static void init_multbl2(uint32_t tbl[][256], const int c[4],
170 171 172
                         const uint8_t *log8, const uint8_t *alog8,
                         const uint8_t *sbox)
{
173
    int i;
174

175 176 177 178 179 180 181 182 183
    for (i = 0; i < 256; i++) {
        int x = sbox[i];
        if (x) {
            int k, l, m, n;
            x = log8[x];
            k = alog8[x + log8[c[0]]];
            l = alog8[x + log8[c[1]]];
            m = alog8[x + log8[c[2]]];
            n = alog8[x + log8[c[3]]];
184
            tbl[0][i] = AV_NE(MKBETAG(k, l, m, n), MKTAG(k, l, m, n));
185
#if !CONFIG_SMALL
186 187 188
            tbl[1][i] = ROT(tbl[0][i], 8);
            tbl[2][i] = ROT(tbl[0][i], 16);
            tbl[3][i] = ROT(tbl[0][i], 24);
189
#endif
190 191
        }
    }
192 193
}

194
// this is based on the reference AES code by Paulo Barreto and Vincent Rijmen
195 196
int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt)
{
197 198
    int i, j, t, rconpointer = 0;
    uint8_t tk[8][4];
199 200 201
    int KC = key_bits >> 5;
    int rounds = KC + 6;
    uint8_t log8[256];
202
    uint8_t alog8[512];
203

204 205
    a->crypt = decrypt ? aes_decrypt : aes_encrypt;

206
    if (!enc_multbl[FF_ARRAY_ELEMS(enc_multbl) - 1][FF_ARRAY_ELEMS(enc_multbl[0]) - 1]) {
207 208 209 210 211 212 213
        j = 1;
        for (i = 0; i < 255; i++) {
            alog8[i] = alog8[i + 255] = j;
            log8[j] = i;
            j ^= j + j;
            if (j > 255)
                j ^= 0x11B;
214
        }
215 216 217 218 219
        for (i = 0; i < 256; i++) {
            j = i ? alog8[255 - log8[i]] : 0;
            j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4);
            j = (j ^ (j >> 8) ^ 99) & 255;
            inv_sbox[j] = i;
220
            sbox[i]     = j;
221
        }
222
        init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb },
223
                     log8, alog8, inv_sbox);
224
        init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 },
225
                     log8, alog8, sbox);
226 227
    }

228
    if (key_bits != 128 && key_bits != 192 && key_bits != 256)
229
        return AVERROR(EINVAL);
230

231
    a->rounds = rounds;
Michael Niedermayer's avatar
Michael Niedermayer committed
232

233
    memcpy(tk, key, KC * 4);
234
    memcpy(a->round_key[0].u8, key, KC * 4);
235

236
    for (t = KC * 4; t < (rounds + 1) * 16; t += KC * 4) {
237 238
        for (i = 0; i < 4; i++)
            tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]];
239 240
        tk[0][0] ^= rcon[rconpointer++];

241 242 243 244
        for (j = 1; j < KC; j++) {
            if (KC != 8 || j != KC >> 1)
                for (i = 0; i < 4; i++)
                    tk[j][i] ^= tk[j - 1][i];
245
            else
246 247
                for (i = 0; i < 4; i++)
                    tk[j][i] ^= sbox[tk[j - 1][i]];
248
        }
249 250

        memcpy(a->round_key[0].u8 + t, tk, KC * 4);
251
    }
252

253 254
    if (decrypt) {
        for (i = 1; i < rounds; i++) {
255
            av_aes_block tmp[3];
256
            tmp[2] = a->round_key[i];
257
            subshift(&tmp[1], 0, sbox);
258
            mix(tmp, dec_multbl, 1, 3);
259
            a->round_key[i] = tmp[0];
Michael Niedermayer's avatar
Michael Niedermayer committed
260
        }
261
    } else {
262 263
        for (i = 0; i < (rounds + 1) >> 1; i++)
            FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds - i]);
264
    }
265

266
    return 0;
267 268
}