2 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
3 * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/libkern.h>
33 #include <sys/malloc.h>
35 #include <sys/systm.h>
36 #include <crypto/aesni/aesni.h>
38 MALLOC_DECLARE(M_AESNI);
41 aesni_encrypt_cbc(int rounds, const void *key_schedule, size_t len,
42 const uint8_t *from, uint8_t *to, const uint8_t iv[AES_BLOCK_LEN])
49 for (i = 0; i < len; i++) {
50 aesni_enc(rounds - 1, key_schedule, from, to, ivp);
52 from += AES_BLOCK_LEN;
58 aesni_encrypt_ecb(int rounds, const void *key_schedule, size_t len,
59 const uint8_t from[AES_BLOCK_LEN], uint8_t to[AES_BLOCK_LEN])
64 for (i = 0; i < len; i++) {
65 aesni_enc(rounds - 1, key_schedule, from, to, NULL);
66 from += AES_BLOCK_LEN;
72 aesni_decrypt_ecb(int rounds, const void *key_schedule, size_t len,
73 const uint8_t from[AES_BLOCK_LEN], uint8_t to[AES_BLOCK_LEN])
78 for (i = 0; i < len; i++) {
79 aesni_dec(rounds - 1, key_schedule, from, to, NULL);
80 from += AES_BLOCK_LEN;
85 #define AES_XTS_BLOCKSIZE 16
86 #define AES_XTS_IVSIZE 8
87 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */
90 aesni_crypt_xts_block(int rounds, const void *key_schedule, uint64_t *tweak,
91 const uint64_t *from, uint64_t *to, uint64_t *block, int do_encrypt)
95 block[0] = from[0] ^ tweak[0];
96 block[1] = from[1] ^ tweak[1];
99 aesni_enc(rounds - 1, key_schedule, (uint8_t *)block, (uint8_t *)to, NULL);
101 aesni_dec(rounds - 1, key_schedule, (uint8_t *)block, (uint8_t *)to, NULL);
106 /* Exponentiate tweak. */
107 carry = ((tweak[0] & 0x8000000000000000ULL) > 0);
109 if (tweak[1] & 0x8000000000000000ULL) {
110 uint8_t *twk = (uint8_t *)tweak;
112 twk[0] ^= AES_XTS_ALPHA;
120 aesni_crypt_xts(int rounds, const void *data_schedule,
121 const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to,
122 const uint8_t iv[AES_BLOCK_LEN], int do_encrypt)
124 uint64_t block[AES_XTS_BLOCKSIZE / 8];
125 uint8_t tweak[AES_XTS_BLOCKSIZE];
129 * Prepare tweak as E_k2(IV). IV is specified as LE representation
130 * of a 64-bit block number which we allow to be passed in directly.
132 #if BYTE_ORDER == LITTLE_ENDIAN
133 bcopy(iv, tweak, AES_XTS_IVSIZE);
134 /* Last 64 bits of IV are always zero. */
135 bzero(tweak + AES_XTS_IVSIZE, AES_XTS_IVSIZE);
137 #error Only LITTLE_ENDIAN architectures are supported.
139 aesni_enc(rounds - 1, tweak_schedule, tweak, tweak, NULL);
141 len /= AES_XTS_BLOCKSIZE;
142 for (i = 0; i < len; i++) {
143 aesni_crypt_xts_block(rounds, data_schedule, (uint64_t *)tweak,
144 (const uint64_t *)from, (uint64_t *)to, block, do_encrypt);
145 from += AES_XTS_BLOCKSIZE;
146 to += AES_XTS_BLOCKSIZE;
149 bzero(tweak, sizeof(tweak));
150 bzero(block, sizeof(block));
154 aesni_encrypt_xts(int rounds, const void *data_schedule,
155 const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to,
156 const uint8_t iv[AES_BLOCK_LEN])
159 aesni_crypt_xts(rounds, data_schedule, tweak_schedule, len, from, to,
164 aesni_decrypt_xts(int rounds, const void *data_schedule,
165 const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to,
166 const uint8_t iv[AES_BLOCK_LEN])
169 aesni_crypt_xts(rounds, data_schedule, tweak_schedule, len, from, to,
174 aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
182 ses->rounds = AES128_ROUNDS;
185 ses->rounds = AES192_ROUNDS;
188 ses->rounds = AES256_ROUNDS;
197 ses->rounds = AES128_ROUNDS;
200 ses->rounds = AES256_ROUNDS;
210 aesni_set_enckey(key, ses->enc_schedule, ses->rounds);
211 aesni_set_deckey(ses->enc_schedule, ses->dec_schedule, ses->rounds);
212 if (ses->algo == CRYPTO_AES_CBC)
213 arc4rand(ses->iv, sizeof(ses->iv), 0);
214 else /* if (ses->algo == CRYPTO_AES_XTS) */ {
215 aesni_set_enckey(key + keylen / 16, ses->xts_schedule,
223 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
226 int error, saved_ctx;
229 if (!is_fpu_kern_thread(0)) {
230 error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
237 error = aesni_cipher_setup_common(ses, encini->cri_key,
240 fpu_kern_leave(td, ses->fpu_ctx);
246 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
251 int error, allocated, saved_ctx;
253 buf = aesni_cipher_alloc(enccrd, crp, &allocated);
258 if (!is_fpu_kern_thread(0)) {
259 error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
268 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
269 error = aesni_cipher_setup_common(ses, enccrd->crd_key,
275 if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
276 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
277 bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
278 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
279 crypto_copyback(crp->crp_flags, crp->crp_buf,
280 enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
281 if (ses->algo == CRYPTO_AES_CBC) {
282 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
283 enccrd->crd_len, buf, buf, ses->iv);
284 } else /* if (ses->algo == CRYPTO_AES_XTS) */ {
285 aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
286 ses->xts_schedule, enccrd->crd_len, buf, buf,
290 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
291 bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
293 crypto_copydata(crp->crp_flags, crp->crp_buf,
294 enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
295 if (ses->algo == CRYPTO_AES_CBC) {
296 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
297 enccrd->crd_len, buf, ses->iv);
298 } else /* if (ses->algo == CRYPTO_AES_XTS) */ {
299 aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
300 ses->xts_schedule, enccrd->crd_len, buf, buf,
305 fpu_kern_leave(td, ses->fpu_ctx);
307 crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
308 enccrd->crd_len, buf);
309 if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
310 crypto_copydata(crp->crp_flags, crp->crp_buf,
311 enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
312 AES_BLOCK_LEN, ses->iv);
315 bzero(buf, enccrd->crd_len);