Commit dd025f25 authored by Reimar Döffinger's avatar Reimar Döffinger

Change AES code to be strict-aliasing-safe.

Makes it give correct results with e.g. gcc 4.4.
For unknown reasons the generate asm code also changes
on e.g. gcc 4.3, making the code a bit larger but also
a bit faster.

Originally committed as revision 23896 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 25fe8630
...@@ -23,11 +23,18 @@ ...@@ -23,11 +23,18 @@
#include "common.h" #include "common.h"
#include "aes.h" #include "aes.h"
typedef union {
uint64_t u64[2];
uint32_t u32[4];
uint8_t u8x4[4][4];
uint8_t u8[16];
} av_aes_block;
typedef struct AVAES{ typedef struct AVAES{
// Note: round_key[16] is accessed in the init code, but this only // Note: round_key[16] is accessed in the init code, but this only
// overwrites state, which does not matter (see also r7471). // overwrites state, which does not matter (see also r7471).
uint8_t round_key[15][4][4]; av_aes_block round_key[15];
uint8_t state[2][4][4]; av_aes_block state[2];
int rounds; int rounds;
}AVAES; }AVAES;
...@@ -47,18 +54,18 @@ static uint32_t enc_multbl[4][256]; ...@@ -47,18 +54,18 @@ static uint32_t enc_multbl[4][256];
static uint32_t dec_multbl[4][256]; static uint32_t dec_multbl[4][256];
#endif #endif
static inline void addkey(uint64_t dst[2], const uint64_t src[2], const uint64_t round_key[2]){ static inline void addkey(av_aes_block *dst, const av_aes_block *src, const av_aes_block *round_key){
dst[0] = src[0] ^ round_key[0]; dst->u64[0] = src->u64[0] ^ round_key->u64[0];
dst[1] = src[1] ^ round_key[1]; dst->u64[1] = src->u64[1] ^ round_key->u64[1];
} }
static void subshift(uint8_t s0[2][16], int s, const uint8_t *box){ static void subshift(av_aes_block s0[2], int s, const uint8_t *box){
uint8_t (*s1)[16]= s0[0] - s; av_aes_block *s1= (av_aes_block *)(s0[0].u8 - s);
uint8_t (*s3)[16]= s0[0] + s; av_aes_block *s3= (av_aes_block *)(s0[0].u8 + s);
s0[0][0]=box[s0[1][ 0]]; s0[0][ 4]=box[s0[1][ 4]]; s0[0][ 8]=box[s0[1][ 8]]; s0[0][12]=box[s0[1][12]]; s0[0].u8[0]=box[s0[1].u8[ 0]]; s0[0].u8[ 4]=box[s0[1].u8[ 4]]; s0[0].u8[ 8]=box[s0[1].u8[ 8]]; s0[0].u8[12]=box[s0[1].u8[12]];
s1[0][3]=box[s1[1][ 7]]; s1[0][ 7]=box[s1[1][11]]; s1[0][11]=box[s1[1][15]]; s1[0][15]=box[s1[1][ 3]]; s1[0].u8[3]=box[s1[1].u8[ 7]]; s1[0].u8[ 7]=box[s1[1].u8[11]]; s1[0].u8[11]=box[s1[1].u8[15]]; s1[0].u8[15]=box[s1[1].u8[ 3]];
s0[0][2]=box[s0[1][10]]; s0[0][10]=box[s0[1][ 2]]; s0[0][ 6]=box[s0[1][14]]; s0[0][14]=box[s0[1][ 6]]; s0[0].u8[2]=box[s0[1].u8[10]]; s0[0].u8[10]=box[s0[1].u8[ 2]]; s0[0].u8[ 6]=box[s0[1].u8[14]]; s0[0].u8[14]=box[s0[1].u8[ 6]];
s3[0][1]=box[s3[1][13]]; s3[0][13]=box[s3[1][ 9]]; s3[0][ 9]=box[s3[1][ 5]]; s3[0][ 5]=box[s3[1][ 1]]; s3[0].u8[1]=box[s3[1].u8[13]]; s3[0].u8[13]=box[s3[1].u8[ 9]]; s3[0].u8[ 9]=box[s3[1].u8[ 5]]; s3[0].u8[ 5]=box[s3[1].u8[ 1]];
} }
static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){ static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){
...@@ -70,11 +77,11 @@ static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){ ...@@ -70,11 +77,11 @@ static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){
#endif #endif
} }
static inline void mix(uint8_t state[2][4][4], uint32_t multbl[][256], int s1, int s3){ static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, int s3){
((uint32_t *)(state))[0] = mix_core(multbl, state[1][0][0], state[1][s1 ][1], state[1][2][2], state[1][s3 ][3]); state[0].u32[0] = mix_core(multbl, state[1].u8x4[0][0], state[1].u8x4[s1 ][1], state[1].u8x4[2][2], state[1].u8x4[s3 ][3]);
((uint32_t *)(state))[1] = mix_core(multbl, state[1][1][0], state[1][s3-1][1], state[1][3][2], state[1][s1-1][3]); state[0].u32[1] = mix_core(multbl, state[1].u8x4[1][0], state[1].u8x4[s3-1][1], state[1].u8x4[3][2], state[1].u8x4[s1-1][3]);
((uint32_t *)(state))[2] = mix_core(multbl, state[1][2][0], state[1][s3 ][1], state[1][0][2], state[1][s1 ][3]); state[0].u32[2] = mix_core(multbl, state[1].u8x4[2][0], state[1].u8x4[s3 ][1], state[1].u8x4[0][2], state[1].u8x4[s1 ][3]);
((uint32_t *)(state))[3] = mix_core(multbl, state[1][3][0], state[1][s1-1][1], state[1][1][2], state[1][s3-1][3]); state[0].u32[3] = mix_core(multbl, state[1].u8x4[3][0], state[1].u8x4[s1-1][1], state[1].u8x4[1][2], state[1].u8x4[s3-1][3]);
} }
static inline void crypt(AVAES *a, int s, const uint8_t *sbox, uint32_t multbl[][256]){ static inline void crypt(AVAES *a, int s, const uint8_t *sbox, uint32_t multbl[][256]){
...@@ -82,29 +89,32 @@ static inline void crypt(AVAES *a, int s, const uint8_t *sbox, uint32_t multbl[] ...@@ -82,29 +89,32 @@ static inline void crypt(AVAES *a, int s, const uint8_t *sbox, uint32_t multbl[]
for(r=a->rounds-1; r>0; r--){ for(r=a->rounds-1; r>0; r--){
mix(a->state, multbl, 3-s, 1+s); mix(a->state, multbl, 3-s, 1+s);
addkey(a->state[1], a->state[0], a->round_key[r]); addkey(&a->state[1], &a->state[0], &a->round_key[r]);
} }
subshift(a->state[0][0], s, sbox); subshift(&a->state[0], s, sbox);
} }
void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt){ void av_aes_crypt(AVAES *a, uint8_t *dst_, const uint8_t *src_, int count, uint8_t *iv_, int decrypt){
av_aes_block *dst = (av_aes_block *)dst_;
const av_aes_block *src = (const av_aes_block *)src_;
av_aes_block *iv = (av_aes_block *)iv_;
while(count--){ while(count--){
addkey(a->state[1], src, a->round_key[a->rounds]); addkey(&a->state[1], src, &a->round_key[a->rounds]);
if(decrypt) { if(decrypt) {
crypt(a, 0, inv_sbox, dec_multbl); crypt(a, 0, inv_sbox, dec_multbl);
if(iv){ if(iv){
addkey(a->state[0], a->state[0], iv); addkey(&a->state[0], &a->state[0], iv);
memcpy(iv, src, 16); memcpy(iv, src, 16);
} }
addkey(dst, a->state[0], a->round_key[0]); addkey(dst, &a->state[0], &a->round_key[0]);
}else{ }else{
if(iv) addkey(a->state[1], a->state[1], iv); if(iv) addkey(&a->state[1], &a->state[1], iv);
crypt(a, 2, sbox, enc_multbl); crypt(a, 2, sbox, enc_multbl);
addkey(dst, a->state[0], a->round_key[0]); addkey(dst, &a->state[0], &a->round_key[0]);
if(iv) memcpy(iv, dst, 16); if(iv) memcpy(iv, dst, 16);
} }
src+=16; src++;
dst+=16; dst++;
} }
} }
...@@ -158,7 +168,7 @@ int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) { ...@@ -158,7 +168,7 @@ int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) {
memcpy(tk, key, KC*4); memcpy(tk, key, KC*4);
for(t= 0; t < (rounds+1)*16;) { for(t= 0; t < (rounds+1)*16;) {
memcpy(a->round_key[0][0]+t, tk, KC*4); memcpy(a->round_key[0].u8+t, tk, KC*4);
t+= KC*4; t+= KC*4;
for(i = 0; i < 4; i++) for(i = 0; i < 4; i++)
...@@ -175,16 +185,16 @@ int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) { ...@@ -175,16 +185,16 @@ int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) {
if(decrypt){ if(decrypt){
for(i=1; i<rounds; i++){ for(i=1; i<rounds; i++){
uint8_t tmp[3][16]; av_aes_block tmp[3];
memcpy(tmp[2], a->round_key[i][0], 16); memcpy(&tmp[2], &a->round_key[i], 16);
subshift(&tmp[1], 0, sbox); subshift(&tmp[1], 0, sbox);
mix(tmp, dec_multbl, 1, 3); mix(tmp, dec_multbl, 1, 3);
memcpy(a->round_key[i][0], tmp[0], 16); memcpy(&a->round_key[i], &tmp[0], 16);
} }
}else{ }else{
for(i=0; i<(rounds+1)>>1; i++){ for(i=0; i<(rounds+1)>>1; i++){
for(j=0; j<16; j++) for(j=0; j<16; j++)
FFSWAP(int, a->round_key[i][0][j], a->round_key[rounds-i][0][j]); FFSWAP(int, a->round_key[i].u8[j], a->round_key[rounds-i].u8[j]);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment