diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index 798fc9a2c8d6..b3ebdc5679cb 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c @@ -234,30 +234,8 @@ static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - int err; - err = aesti_expand_key(ctx, in_key, key_len); - if (err) - return err; - - /* - * In order to force the compiler to emit data independent Sbox lookups - * at the start of each block, xor the first round key with values at - * fixed indexes in the Sbox. This will need to be repeated each time - * the key is used, which will pull the entire Sbox into the D-cache - * before any data dependent Sbox lookups are performed. - */ - ctx->key_enc[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; - ctx->key_enc[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; - ctx->key_enc[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; - ctx->key_enc[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224]; - - ctx->key_dec[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; - ctx->key_dec[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; - ctx->key_dec[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; - ctx->key_dec[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224]; - - return 0; + return aesti_expand_key(ctx, in_key, key_len); } static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) @@ -280,10 +258,16 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ local_irq_save(flags); - st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; - st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; - st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; - st0[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224]; + /* + * Force the compiler to emit data independent Sbox references, + * by xoring the input with Sbox values that are known to add up + * to zero. This pulls the entire Sbox into the D-cache before any + * data dependent lookups are done. + */ + st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[ 64] ^ __aesti_sbox[134] ^ __aesti_sbox[195]; + st0[1] ^= __aesti_sbox[16] ^ __aesti_sbox[ 82] ^ __aesti_sbox[158] ^ __aesti_sbox[221]; + st0[2] ^= __aesti_sbox[32] ^ __aesti_sbox[ 96] ^ __aesti_sbox[160] ^ __aesti_sbox[234]; + st0[3] ^= __aesti_sbox[48] ^ __aesti_sbox[112] ^ __aesti_sbox[186] ^ __aesti_sbox[241]; for (round = 0;; round += 2, rkp += 8) { st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0]; @@ -328,10 +312,16 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ local_irq_save(flags); - st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; - st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; - st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; - st0[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224]; + /* + * Force the compiler to emit data independent Sbox references, + * by xoring the input with Sbox values that are known to add up + * to zero. This pulls the entire Sbox into the D-cache before any + * data dependent lookups are done. + */ + st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[ 64] ^ __aesti_inv_sbox[129] ^ __aesti_inv_sbox[200]; + st0[1] ^= __aesti_inv_sbox[16] ^ __aesti_inv_sbox[ 83] ^ __aesti_inv_sbox[150] ^ __aesti_inv_sbox[212]; + st0[2] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[ 96] ^ __aesti_inv_sbox[160] ^ __aesti_inv_sbox[236]; + st0[3] ^= __aesti_inv_sbox[48] ^ __aesti_inv_sbox[112] ^ __aesti_inv_sbox[187] ^ __aesti_inv_sbox[247]; for (round = 0;; round += 2, rkp += 8) { st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];