2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/libkern.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/rwlock.h>
42 #include <crypto/aesni/aesni.h>
43 #include <cryptodev_if.h>
48 TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions;
52 static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
53 static int aesni_freesession(device_t, uint64_t tid);
54 static void aesni_freesession_locked(struct aesni_softc *sc,
55 struct aesni_session *ses);
56 static int aesni_cipher_setup(struct aesni_session *ses,
57 struct cryptoini *encini);
58 static int aesni_cipher_process(struct aesni_session *ses,
59 struct cryptodesc *enccrd, struct cryptop *crp);
61 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
64 aesni_identify(driver_t *drv, device_t parent)
67 /* NB: order 10 is so we get attached after h/w devices */
68 if (device_find_child(parent, "aesni", -1) == NULL &&
69 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0)
70 panic("aesni: could not attach");
74 aesni_probe(device_t dev)
77 if ((cpu_feature2 & CPUID2_AESNI) == 0) {
78 device_printf(dev, "No AESNI support.\n");
82 if ((cpu_feature & CPUID_SSE2) == 0) {
83 device_printf(dev, "No SSE2 support but AESNI!?!\n");
87 device_set_desc_copy(dev, "AES-CBC,AES-XTS");
92 aesni_attach(device_t dev)
94 struct aesni_softc *sc;
96 sc = device_get_softc(dev);
97 TAILQ_INIT(&sc->sessions);
99 sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE |
102 device_printf(dev, "Could not get crypto driver id.\n");
106 rw_init(&sc->lock, "aesni_lock");
107 crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
108 crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
113 aesni_detach(device_t dev)
115 struct aesni_softc *sc;
116 struct aesni_session *ses;
118 sc = device_get_softc(dev);
120 TAILQ_FOREACH(ses, &sc->sessions, next) {
122 rw_wunlock(&sc->lock);
124 "Cannot detach, sessions still active.\n");
128 while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
129 TAILQ_REMOVE(&sc->sessions, ses, next);
130 fpu_kern_free_ctx(ses->fpu_ctx);
133 rw_wunlock(&sc->lock);
134 rw_destroy(&sc->lock);
135 crypto_unregister_all(sc->cid);
140 aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
142 struct aesni_softc *sc;
143 struct aesni_session *ses;
144 struct cryptoini *encini;
147 if (sidp == NULL || cri == NULL)
150 sc = device_get_softc(dev);
153 for (; cri != NULL; cri = cri->cri_next) {
154 switch (cri->cri_alg) {
170 * Free sessions goes first, so if first session is used, we need to
173 ses = TAILQ_FIRST(&sc->sessions);
174 if (ses == NULL || ses->used) {
175 ses = malloc(sizeof(*ses), M_AESNI, M_NOWAIT | M_ZERO);
177 rw_wunlock(&sc->lock);
180 ses->fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
182 if (ses->fpu_ctx == NULL) {
184 rw_wunlock(&sc->lock);
189 TAILQ_REMOVE(&sc->sessions, ses, next);
192 TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
193 rw_wunlock(&sc->lock);
194 ses->algo = encini->cri_alg;
196 error = aesni_cipher_setup(ses, encini);
199 aesni_freesession_locked(sc, ses);
200 rw_wunlock(&sc->lock);
209 aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
211 struct fpu_kern_ctx *ctx;
215 TAILQ_REMOVE(&sc->sessions, ses, next);
217 bzero(ses, sizeof(*ses));
220 TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
224 aesni_freesession(device_t dev, uint64_t tid)
226 struct aesni_softc *sc;
227 struct aesni_session *ses;
230 sc = device_get_softc(dev);
231 sid = ((uint32_t)tid) & 0xffffffff;
233 TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
238 rw_wunlock(&sc->lock);
241 aesni_freesession_locked(sc, ses);
242 rw_wunlock(&sc->lock);
247 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
249 struct aesni_softc *sc = device_get_softc(dev);
250 struct aesni_session *ses = NULL;
251 struct cryptodesc *crd, *enccrd;
261 if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
266 for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
267 switch (crd->crd_alg) {
270 if (enccrd != NULL) {
280 if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
286 TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
287 if (ses->id == (crp->crp_sid & 0xffffffff))
290 rw_runlock(&sc->lock);
296 error = aesni_cipher_process(ses, enccrd, crp);
301 crp->crp_etype = error;
307 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
314 if (crp->crp_flags & CRYPTO_F_IMBUF)
316 else if (crp->crp_flags & CRYPTO_F_IOV) {
317 uio = (struct uio *)crp->crp_buf;
318 if (uio->uio_iovcnt != 1)
321 addr = (u_char *)iov->iov_base + enccrd->crd_skip;
323 addr = (u_char *)crp->crp_buf;
328 addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
331 crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
332 enccrd->crd_len, addr);
338 static device_method_t aesni_methods[] = {
339 DEVMETHOD(device_identify, aesni_identify),
340 DEVMETHOD(device_probe, aesni_probe),
341 DEVMETHOD(device_attach, aesni_attach),
342 DEVMETHOD(device_detach, aesni_detach),
344 DEVMETHOD(cryptodev_newsession, aesni_newsession),
345 DEVMETHOD(cryptodev_freesession, aesni_freesession),
346 DEVMETHOD(cryptodev_process, aesni_process),
351 static driver_t aesni_driver = {
354 sizeof(struct aesni_softc),
356 static devclass_t aesni_devclass;
358 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0);
359 MODULE_VERSION(aesni, 1);
360 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
363 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
369 error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
373 error = aesni_cipher_setup_common(ses, encini->cri_key,
375 fpu_kern_leave(td, ses->fpu_ctx);
380 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
385 int error, allocated;
387 buf = aesni_cipher_alloc(enccrd, crp, &allocated);
392 error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
397 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
398 error = aesni_cipher_setup_common(ses, enccrd->crd_key,
404 if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
405 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
406 bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
407 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
408 crypto_copyback(crp->crp_flags, crp->crp_buf,
409 enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
410 if (ses->algo == CRYPTO_AES_CBC) {
411 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
412 enccrd->crd_len, buf, buf, ses->iv);
413 } else /* if (ses->algo == CRYPTO_AES_XTS) */ {
414 aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
415 ses->xts_schedule, enccrd->crd_len, buf, buf,
419 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
420 bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
422 crypto_copydata(crp->crp_flags, crp->crp_buf,
423 enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
424 if (ses->algo == CRYPTO_AES_CBC) {
425 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
426 enccrd->crd_len, buf, ses->iv);
427 } else /* if (ses->algo == CRYPTO_AES_XTS) */ {
428 aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
429 ses->xts_schedule, enccrd->crd_len, buf, buf,
434 crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
435 enccrd->crd_len, buf);
436 if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
437 crypto_copydata(crp->crp_flags, crp->crp_buf,
438 enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
439 AES_BLOCK_LEN, ses->iv);
441 fpu_kern_leave(td, ses->fpu_ctx);
444 bzero(buf, enccrd->crd_len);