1 /* $OpenBSD: glxsb.c,v 1.7 2007/02/12 14:31:45 tom Exp $ */
4 * Copyright (c) 2006 Tom Cosgrove <tom@openbsd.org>
5 * Copyright (c) 2003, 2004 Theo de Raadt
6 * Copyright (c) 2003 Jason Wright
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 * Driver for the security block on the AMD Geode LX processors
23 * http://www.amd.com/files/connectivitysolutions/geode/geode_lx/33234d_lx_ds.pdf
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
29 #include <sys/param.h>
30 #include <sys/systm.h>
32 #include <sys/errno.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
40 #include <sys/random.h>
42 #include <sys/rwlock.h>
43 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
46 #include <machine/bus.h>
47 #include <machine/cpufunc.h>
48 #include <machine/resource.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcireg.h>
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/cryptosoft.h>
55 #include <opencrypto/xform.h>
57 #include "cryptodev_if.h"
60 #define PCI_VENDOR_AMD 0x1022 /* AMD */
61 #define PCI_PRODUCT_AMD_GEODE_LX_CRYPTO 0x2082 /* Geode LX Crypto */
63 #define SB_GLD_MSR_CAP 0x58002000 /* RO - Capabilities */
64 #define SB_GLD_MSR_CONFIG 0x58002001 /* RW - Master Config */
65 #define SB_GLD_MSR_SMI 0x58002002 /* RW - SMI */
66 #define SB_GLD_MSR_ERROR 0x58002003 /* RW - Error */
67 #define SB_GLD_MSR_PM 0x58002004 /* RW - Power Mgmt */
68 #define SB_GLD_MSR_DIAG 0x58002005 /* RW - Diagnostic */
69 #define SB_GLD_MSR_CTRL 0x58002006 /* RW - Security Block Cntrl */
71 /* For GLD_MSR_CTRL: */
72 #define SB_GMC_DIV0 0x0000 /* AES update divisor values */
73 #define SB_GMC_DIV1 0x0001
74 #define SB_GMC_DIV2 0x0002
75 #define SB_GMC_DIV3 0x0003
76 #define SB_GMC_DIV_MASK 0x0003
77 #define SB_GMC_SBI 0x0004 /* AES swap bits */
78 #define SB_GMC_SBY 0x0008 /* AES swap bytes */
79 #define SB_GMC_TW 0x0010 /* Time write (EEPROM) */
80 #define SB_GMC_T_SEL0 0x0000 /* RNG post-proc: none */
81 #define SB_GMC_T_SEL1 0x0100 /* RNG post-proc: LFSR */
82 #define SB_GMC_T_SEL2 0x0200 /* RNG post-proc: whitener */
83 #define SB_GMC_T_SEL3 0x0300 /* RNG LFSR+whitener */
84 #define SB_GMC_T_SEL_MASK 0x0300
85 #define SB_GMC_T_NE 0x0400 /* Noise (generator) Enable */
86 #define SB_GMC_T_TM 0x0800 /* RNG test mode */
89 /* Security Block configuration/control registers (offsets from base) */
90 #define SB_CTL_A 0x0000 /* RW - SB Control A */
91 #define SB_CTL_B 0x0004 /* RW - SB Control B */
92 #define SB_AES_INT 0x0008 /* RW - SB AES Interrupt */
93 #define SB_SOURCE_A 0x0010 /* RW - Source A */
94 #define SB_DEST_A 0x0014 /* RW - Destination A */
95 #define SB_LENGTH_A 0x0018 /* RW - Length A */
96 #define SB_SOURCE_B 0x0020 /* RW - Source B */
97 #define SB_DEST_B 0x0024 /* RW - Destination B */
98 #define SB_LENGTH_B 0x0028 /* RW - Length B */
99 #define SB_WKEY 0x0030 /* WO - Writable Key 0-3 */
100 #define SB_WKEY_0 0x0030 /* WO - Writable Key 0 */
101 #define SB_WKEY_1 0x0034 /* WO - Writable Key 1 */
102 #define SB_WKEY_2 0x0038 /* WO - Writable Key 2 */
103 #define SB_WKEY_3 0x003C /* WO - Writable Key 3 */
104 #define SB_CBC_IV 0x0040 /* RW - CBC IV 0-3 */
105 #define SB_CBC_IV_0 0x0040 /* RW - CBC IV 0 */
106 #define SB_CBC_IV_1 0x0044 /* RW - CBC IV 1 */
107 #define SB_CBC_IV_2 0x0048 /* RW - CBC IV 2 */
108 #define SB_CBC_IV_3 0x004C /* RW - CBC IV 3 */
109 #define SB_RANDOM_NUM 0x0050 /* RW - Random Number */
110 #define SB_RANDOM_NUM_STATUS 0x0054 /* RW - Random Number Status */
111 #define SB_EEPROM_COMM 0x0800 /* RW - EEPROM Command */
112 #define SB_EEPROM_ADDR 0x0804 /* RW - EEPROM Address */
113 #define SB_EEPROM_DATA 0x0808 /* RW - EEPROM Data */
114 #define SB_EEPROM_SEC_STATE 0x080C /* RW - EEPROM Security State */
116 /* For SB_CTL_A and _B */
117 #define SB_CTL_ST 0x0001 /* Start operation (enc/dec) */
118 #define SB_CTL_ENC 0x0002 /* Encrypt (0 is decrypt) */
119 #define SB_CTL_DEC 0x0000 /* Decrypt */
120 #define SB_CTL_WK 0x0004 /* Use writable key (we set) */
121 #define SB_CTL_DC 0x0008 /* Destination coherent */
122 #define SB_CTL_SC 0x0010 /* Source coherent */
123 #define SB_CTL_CBC 0x0020 /* CBC (0 is ECB) */
126 #define SB_AI_DISABLE_AES_A 0x0001 /* Disable AES A compl int */
127 #define SB_AI_ENABLE_AES_A 0x0000 /* Enable AES A compl int */
128 #define SB_AI_DISABLE_AES_B 0x0002 /* Disable AES B compl int */
129 #define SB_AI_ENABLE_AES_B 0x0000 /* Enable AES B compl int */
130 #define SB_AI_DISABLE_EEPROM 0x0004 /* Disable EEPROM op comp int */
131 #define SB_AI_ENABLE_EEPROM 0x0000 /* Enable EEPROM op compl int */
132 #define SB_AI_AES_A_COMPLETE 0x10000 /* AES A operation complete */
133 #define SB_AI_AES_B_COMPLETE 0x20000 /* AES B operation complete */
134 #define SB_AI_EEPROM_COMPLETE 0x40000 /* EEPROM operation complete */
136 #define SB_AI_CLEAR_INTR \
137 (SB_AI_DISABLE_AES_A | SB_AI_DISABLE_AES_B |\
138 SB_AI_DISABLE_EEPROM | SB_AI_AES_A_COMPLETE |\
139 SB_AI_AES_B_COMPLETE | SB_AI_EEPROM_COMPLETE)
141 #define SB_RNS_TRNG_VALID 0x0001 /* in SB_RANDOM_NUM_STATUS */
143 #define SB_MEM_SIZE 0x0810 /* Size of memory block */
145 #define SB_AES_ALIGN 0x0010 /* Source and dest buffers */
146 /* must be 16-byte aligned */
147 #define SB_AES_BLOCK_SIZE 0x0010
150 * The Geode LX security block AES acceleration doesn't perform scatter-
151 * gather: it just takes source and destination addresses. Therefore the
152 * plain- and ciphertexts need to be contiguous. To this end, we allocate
153 * a buffer for both, and accept the overhead of copying in and out. If
154 * the number of bytes in one operation is bigger than allowed for by the
155 * buffer (buffer is twice the size of the max length, as it has both input
156 * and output) then we have to perform multiple encryptions/decryptions.
159 #define GLXSB_MAX_AES_LEN 16384
161 MALLOC_DEFINE(M_GLXSB, "glxsb_data", "Glxsb Data");
163 struct glxsb_dma_map {
164 bus_dmamap_t dma_map; /* DMA map */
165 bus_dma_segment_t dma_seg; /* segments */
166 int dma_nsegs; /* #segments */
167 int dma_size; /* size */
168 caddr_t dma_vaddr; /* virtual address */
169 bus_addr_t dma_paddr; /* physical address */
172 struct glxsb_taskop {
173 struct glxsb_session *to_ses; /* crypto session */
174 struct cryptop *to_crp; /* cryptop to perfom */
175 struct cryptodesc *to_enccrd; /* enccrd to perform */
176 struct cryptodesc *to_maccrd; /* maccrd to perform */
180 device_t sc_dev; /* device backpointer */
181 struct resource *sc_sr; /* resource */
182 int sc_rid; /* resource rid */
183 struct callout sc_rngco; /* RNG callout */
184 int sc_rnghz; /* RNG callout ticks */
185 bus_dma_tag_t sc_dmat; /* DMA tag */
186 struct glxsb_dma_map sc_dma; /* DMA map */
187 int32_t sc_cid; /* crypto tag */
188 uint32_t sc_sid; /* session id */
189 TAILQ_HEAD(ses_head, glxsb_session)
190 sc_sessions; /* crypto sessions */
191 struct rwlock sc_sessions_lock;/* sessions lock */
192 struct mtx sc_task_mtx; /* task mutex */
193 struct taskqueue *sc_tq; /* task queue */
194 struct task sc_cryptotask; /* task */
195 struct glxsb_taskop sc_to; /* task's crypto operation */
196 int sc_task_count; /* tasks count */
199 static int glxsb_probe(device_t);
200 static int glxsb_attach(device_t);
201 static int glxsb_detach(device_t);
203 static void glxsb_dmamap_cb(void *, bus_dma_segment_t *, int, int);
204 static int glxsb_dma_alloc(struct glxsb_softc *);
205 static void glxsb_dma_pre_op(struct glxsb_softc *, struct glxsb_dma_map *);
206 static void glxsb_dma_post_op(struct glxsb_softc *, struct glxsb_dma_map *);
207 static void glxsb_dma_free(struct glxsb_softc *, struct glxsb_dma_map *);
209 static void glxsb_rnd(void *);
210 static int glxsb_crypto_setup(struct glxsb_softc *);
211 static int glxsb_crypto_newsession(device_t, uint32_t *, struct cryptoini *);
212 static int glxsb_crypto_freesession(device_t, uint64_t);
213 static int glxsb_aes(struct glxsb_softc *, uint32_t, uint32_t,
214 uint32_t, void *, int, void *);
216 static int glxsb_crypto_encdec(struct cryptop *, struct cryptodesc *,
217 struct glxsb_session *, struct glxsb_softc *);
219 static void glxsb_crypto_task(void *, int);
220 static int glxsb_crypto_process(device_t, struct cryptop *, int);
222 static device_method_t glxsb_methods[] = {
223 /* device interface */
224 DEVMETHOD(device_probe, glxsb_probe),
225 DEVMETHOD(device_attach, glxsb_attach),
226 DEVMETHOD(device_detach, glxsb_detach),
228 /* crypto device methods */
229 DEVMETHOD(cryptodev_newsession, glxsb_crypto_newsession),
230 DEVMETHOD(cryptodev_freesession, glxsb_crypto_freesession),
231 DEVMETHOD(cryptodev_process, glxsb_crypto_process),
236 static driver_t glxsb_driver = {
239 sizeof(struct glxsb_softc)
242 static devclass_t glxsb_devclass;
244 DRIVER_MODULE(glxsb, pci, glxsb_driver, glxsb_devclass, 0, 0);
245 MODULE_VERSION(glxsb, 1);
246 MODULE_DEPEND(glxsb, crypto, 1, 1, 1);
249 glxsb_probe(device_t dev)
252 if (pci_get_vendor(dev) == PCI_VENDOR_AMD &&
253 pci_get_device(dev) == PCI_PRODUCT_AMD_GEODE_LX_CRYPTO) {
255 "AMD Geode LX Security Block (AES-128-CBC, RNG)");
256 return (BUS_PROBE_DEFAULT);
263 glxsb_attach(device_t dev)
265 struct glxsb_softc *sc = device_get_softc(dev);
269 msr = rdmsr(SB_GLD_MSR_CAP);
271 if ((msr & 0xFFFF00) != 0x130400) {
272 device_printf(dev, "unknown ID 0x%x\n",
273 (int)((msr & 0xFFFF00) >> 16));
277 pci_enable_busmaster(dev);
279 /* Map in the security block configuration/control registers */
280 sc->sc_rid = PCIR_BAR(0);
281 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
283 if (sc->sc_sr == NULL) {
284 device_printf(dev, "cannot map register space\n");
289 * Configure the Security Block.
291 * We want to enable the noise generator (T_NE), and enable the
292 * linear feedback shift register and whitener post-processing
293 * (T_SEL = 3). Also ensure that test mode (deterministic values)
296 msr = rdmsr(SB_GLD_MSR_CTRL);
297 msr &= ~(SB_GMC_T_TM | SB_GMC_T_SEL_MASK);
298 msr |= SB_GMC_T_NE | SB_GMC_T_SEL3;
300 msr |= SB_GMC_SBI | SB_GMC_SBY; /* for AES, if necessary */
302 wrmsr(SB_GLD_MSR_CTRL, msr);
304 /* Disable interrupts */
305 bus_write_4(sc->sc_sr, SB_AES_INT, SB_AI_CLEAR_INTR);
307 /* Allocate a contiguous DMA-able buffer to work in */
308 if (glxsb_dma_alloc(sc) != 0)
311 /* Initialize our task queue */
312 sc->sc_tq = taskqueue_create("glxsb_taskq", M_NOWAIT | M_ZERO,
313 taskqueue_thread_enqueue, &sc->sc_tq);
314 if (sc->sc_tq == NULL) {
315 device_printf(dev, "cannot create task queue\n");
318 if (taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
319 device_get_nameunit(dev)) != 0) {
320 device_printf(dev, "cannot start task queue\n");
323 TASK_INIT(&sc->sc_cryptotask, 0, glxsb_crypto_task, sc);
325 /* Initialize crypto */
326 if (glxsb_crypto_setup(sc) != 0)
329 /* Install a periodic collector for the "true" (AMD's word) RNG */
331 sc->sc_rnghz = hz / 100;
334 callout_init(&sc->sc_rngco, CALLOUT_MPSAFE);
340 taskqueue_free(sc->sc_tq);
342 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_sr);
347 glxsb_detach(device_t dev)
349 struct glxsb_softc *sc = device_get_softc(dev);
350 struct glxsb_session *ses;
352 rw_wlock(&sc->sc_sessions_lock);
353 TAILQ_FOREACH(ses, &sc->sc_sessions, ses_next) {
355 rw_wunlock(&sc->sc_sessions_lock);
357 "cannot detach, sessions still active.\n");
361 while ((ses = TAILQ_FIRST(&sc->sc_sessions)) != NULL) {
362 TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
365 rw_wunlock(&sc->sc_sessions_lock);
366 crypto_unregister_all(sc->sc_cid);
367 callout_drain(&sc->sc_rngco);
368 taskqueue_drain(sc->sc_tq, &sc->sc_cryptotask);
369 bus_generic_detach(dev);
370 glxsb_dma_free(sc, &sc->sc_dma);
371 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_sr);
372 taskqueue_free(sc->sc_tq);
373 rw_destroy(&sc->sc_sessions_lock);
374 mtx_destroy(&sc->sc_task_mtx);
379 * callback for bus_dmamap_load()
382 glxsb_dmamap_cb(void *arg, bus_dma_segment_t *seg, int nseg, int error)
385 bus_addr_t *paddr = (bus_addr_t*) arg;
386 *paddr = seg[0].ds_addr;
390 glxsb_dma_alloc(struct glxsb_softc *sc)
392 struct glxsb_dma_map *dma = &sc->sc_dma;
396 dma->dma_size = GLXSB_MAX_AES_LEN * 2;
398 /* Setup DMA descriptor area */
399 rc = bus_dma_tag_create(NULL, /* parent */
400 SB_AES_ALIGN, 0, /* alignments, bounds */
401 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
402 BUS_SPACE_MAXADDR, /* highaddr */
403 NULL, NULL, /* filter, filterarg */
404 dma->dma_size, /* maxsize */
405 dma->dma_nsegs, /* nsegments */
406 dma->dma_size, /* maxsegsize */
407 BUS_DMA_ALLOCNOW, /* flags */
408 NULL, NULL, /* lockfunc, lockarg */
411 device_printf(sc->sc_dev,
412 "cannot allocate DMA tag (%d)\n", rc);
416 rc = bus_dmamem_alloc(sc->sc_dmat, (void **)&dma->dma_vaddr,
417 BUS_DMA_NOWAIT, &dma->dma_map);
419 device_printf(sc->sc_dev,
420 "cannot allocate DMA memory of %d bytes (%d)\n",
425 rc = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
426 dma->dma_size, glxsb_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
428 device_printf(sc->sc_dev,
429 "cannot load DMA memory for %d bytes (%d)\n",
437 bus_dmamem_free(sc->sc_dmat, dma->dma_vaddr, dma->dma_map);
439 bus_dma_tag_destroy(sc->sc_dmat);
444 glxsb_dma_pre_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
447 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
448 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
452 glxsb_dma_post_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
455 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
460 glxsb_dma_free(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
463 bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
464 bus_dmamem_free(sc->sc_dmat, dma->dma_vaddr, dma->dma_map);
465 bus_dma_tag_destroy(sc->sc_dmat);
471 struct glxsb_softc *sc = v;
472 uint32_t status, value;
474 status = bus_read_4(sc->sc_sr, SB_RANDOM_NUM_STATUS);
475 if (status & SB_RNS_TRNG_VALID) {
476 value = bus_read_4(sc->sc_sr, SB_RANDOM_NUM);
477 /* feed with one uint32 */
478 random_harvest(&value, 4, 32, 0, RANDOM_PURE);
481 callout_reset(&sc->sc_rngco, sc->sc_rnghz, glxsb_rnd, sc);
485 glxsb_crypto_setup(struct glxsb_softc *sc)
488 sc->sc_cid = crypto_get_driverid(sc->sc_dev, CRYPTOCAP_F_HARDWARE);
490 if (sc->sc_cid < 0) {
491 device_printf(sc->sc_dev, "cannot get crypto driver id\n");
495 TAILQ_INIT(&sc->sc_sessions);
497 rw_init(&sc->sc_sessions_lock, "glxsb_sessions_lock");
498 mtx_init(&sc->sc_task_mtx, "glxsb_crypto_mtx", NULL, MTX_DEF);
500 if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0)
502 if (crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0) != 0)
504 if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0)
506 if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0)
508 if (crypto_register(sc->sc_cid, CRYPTO_RIPEMD160_HMAC, 0, 0) != 0)
510 if (crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0) != 0)
512 if (crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0) != 0)
514 if (crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0) != 0)
520 device_printf(sc->sc_dev, "cannot register crypto\n");
521 crypto_unregister_all(sc->sc_cid);
522 rw_destroy(&sc->sc_sessions_lock);
523 mtx_destroy(&sc->sc_task_mtx);
528 glxsb_crypto_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
530 struct glxsb_softc *sc = device_get_softc(dev);
531 struct glxsb_session *ses = NULL;
532 struct cryptoini *encini, *macini;
535 if (sc == NULL || sidp == NULL || cri == NULL)
538 encini = macini = NULL;
539 for (; cri != NULL; cri = cri->cri_next) {
540 switch(cri->cri_alg) {
541 case CRYPTO_NULL_HMAC:
542 case CRYPTO_MD5_HMAC:
543 case CRYPTO_SHA1_HMAC:
544 case CRYPTO_RIPEMD160_HMAC:
545 case CRYPTO_SHA2_256_HMAC:
546 case CRYPTO_SHA2_384_HMAC:
547 case CRYPTO_SHA2_512_HMAC:
563 * We only support HMAC algorithms to be able to work with
564 * ipsec(4), so if we are asked only for authentication without
565 * encryption, don't pretend we can accellerate it.
571 * Look for a free session
573 * Free sessions goes first, so if first session is used, we need to
577 rw_wlock(&sc->sc_sessions_lock);
578 ses = TAILQ_FIRST(&sc->sc_sessions);
579 if (ses == NULL || ses->ses_used) {
580 ses = malloc(sizeof(*ses), M_GLXSB, M_NOWAIT | M_ZERO);
582 rw_wunlock(&sc->sc_sessions_lock);
585 ses->ses_id = sc->sc_sid++;
587 TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
590 TAILQ_INSERT_TAIL(&sc->sc_sessions, ses, ses_next);
591 rw_wunlock(&sc->sc_sessions_lock);
593 if (encini->cri_alg == CRYPTO_AES_CBC) {
594 if (encini->cri_klen != 128) {
595 glxsb_crypto_freesession(sc->sc_dev, ses->ses_id);
598 arc4rand(ses->ses_iv, sizeof(ses->ses_iv), 0);
599 ses->ses_klen = encini->cri_klen;
601 /* Copy the key (Geode LX wants the primary key only) */
602 bcopy(encini->cri_key, ses->ses_key, sizeof(ses->ses_key));
605 if (macini != NULL) {
606 error = glxsb_hash_setup(ses, macini);
608 glxsb_crypto_freesession(sc->sc_dev, ses->ses_id);
618 glxsb_crypto_freesession(device_t dev, uint64_t tid)
620 struct glxsb_softc *sc = device_get_softc(dev);
621 struct glxsb_session *ses = NULL;
622 uint32_t sid = ((uint32_t)tid) & 0xffffffff;
627 rw_wlock(&sc->sc_sessions_lock);
628 TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, ses_head, ses_next) {
629 if (ses->ses_id == sid)
633 rw_wunlock(&sc->sc_sessions_lock);
636 TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
637 glxsb_hash_free(ses);
638 bzero(ses, sizeof(*ses));
641 TAILQ_INSERT_HEAD(&sc->sc_sessions, ses, ses_next);
642 rw_wunlock(&sc->sc_sessions_lock);
648 glxsb_aes(struct glxsb_softc *sc, uint32_t control, uint32_t psrc,
649 uint32_t pdst, void *key, int len, void *iv)
655 device_printf(sc->sc_dev,
656 "len must be a multiple of 16 (not %d)\n", len);
661 bus_write_4(sc->sc_sr, SB_SOURCE_A, psrc);
663 /* Set the destination address */
664 bus_write_4(sc->sc_sr, SB_DEST_A, pdst);
666 /* Set the data length */
667 bus_write_4(sc->sc_sr, SB_LENGTH_A, len);
671 bus_write_region_4(sc->sc_sr, SB_CBC_IV, iv, 4);
672 control |= SB_CTL_CBC;
676 bus_write_region_4(sc->sc_sr, SB_WKEY, key, 4);
678 /* Ask the security block to do it */
679 bus_write_4(sc->sc_sr, SB_CTL_A,
680 control | SB_CTL_WK | SB_CTL_DC | SB_CTL_SC | SB_CTL_ST);
683 * Now wait until it is done.
685 * We do a busy wait. Obviously the number of iterations of
686 * the loop required to perform the AES operation depends upon
687 * the number of bytes to process.
689 * On a 500 MHz Geode LX we see
691 * length (bytes) typical max iterations
698 * Since we have a maximum size of operation defined in
699 * GLXSB_MAX_AES_LEN, we use this constant to decide how long
700 * to wait. Allow an order of magnitude longer than it should
701 * really take, just in case.
704 for (i = 0; i < GLXSB_MAX_AES_LEN * 10; i++) {
705 status = bus_read_4(sc->sc_sr, SB_CTL_A);
706 if ((status & SB_CTL_ST) == 0) /* Done */
710 device_printf(sc->sc_dev, "operation failed to complete\n");
715 glxsb_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
716 struct glxsb_session *ses, struct glxsb_softc *sc)
718 char *op_src, *op_dst;
719 uint32_t op_psrc, op_pdst;
720 uint8_t op_iv[SB_AES_BLOCK_SIZE], *piv;
726 if (crd == NULL || (crd->crd_len % SB_AES_BLOCK_SIZE) != 0)
729 /* How much of our buffer will we need to use? */
730 xlen = crd->crd_len > GLXSB_MAX_AES_LEN ?
731 GLXSB_MAX_AES_LEN : crd->crd_len;
734 * XXX Check if we can have input == output on Geode LX.
735 * XXX In the meantime, use two separate (adjacent) buffers.
737 op_src = sc->sc_dma.dma_vaddr;
738 op_dst = (char *)sc->sc_dma.dma_vaddr + xlen;
740 op_psrc = sc->sc_dma.dma_paddr;
741 op_pdst = sc->sc_dma.dma_paddr + xlen;
743 if (crd->crd_flags & CRD_F_ENCRYPT) {
744 control = SB_CTL_ENC;
745 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
746 bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
748 bcopy(ses->ses_iv, op_iv, sizeof(op_iv));
750 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
751 crypto_copyback(crp->crp_flags, crp->crp_buf,
752 crd->crd_inject, sizeof(op_iv), op_iv);
755 control = SB_CTL_DEC;
756 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
757 bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
759 crypto_copydata(crp->crp_flags, crp->crp_buf,
760 crd->crd_inject, sizeof(op_iv), op_iv);
768 /* Process the data in GLXSB_MAX_AES_LEN chunks */
770 len = (tlen > GLXSB_MAX_AES_LEN) ? GLXSB_MAX_AES_LEN : tlen;
771 crypto_copydata(crp->crp_flags, crp->crp_buf,
772 crd->crd_skip + offset, len, op_src);
774 glxsb_dma_pre_op(sc, &sc->sc_dma);
776 error = glxsb_aes(sc, control, op_psrc, op_pdst, ses->ses_key,
779 glxsb_dma_post_op(sc, &sc->sc_dma);
783 crypto_copyback(crp->crp_flags, crp->crp_buf,
784 crd->crd_skip + offset, len, op_dst);
789 if (tlen <= 0) { /* Ideally, just == 0 */
790 /* Finished - put the IV in session IV */
795 * Copy out last block for use as next iteration/session IV.
797 * piv is set to op_iv[] before the loop starts, but is
798 * set to ses->ses_iv if we're going to exit the loop this
801 if (crd->crd_flags & CRD_F_ENCRYPT)
802 bcopy(op_dst + len - sizeof(op_iv), piv, sizeof(op_iv));
804 /* Decryption, only need this if another iteration */
806 bcopy(op_src + len - sizeof(op_iv), piv,
812 /* All AES processing has now been done. */
813 bzero(sc->sc_dma.dma_vaddr, xlen * 2);
819 glxsb_crypto_task(void *arg, int pending)
821 struct glxsb_softc *sc = arg;
822 struct glxsb_session *ses;
824 struct cryptodesc *enccrd, *maccrd;
827 maccrd = sc->sc_to.to_maccrd;
828 enccrd = sc->sc_to.to_enccrd;
829 crp = sc->sc_to.to_crp;
830 ses = sc->sc_to.to_ses;
832 /* Perform data authentication if requested before encryption */
833 if (maccrd != NULL && maccrd->crd_next == enccrd) {
834 error = glxsb_hash_process(ses, maccrd, crp);
839 error = glxsb_crypto_encdec(crp, enccrd, ses, sc);
843 /* Perform data authentication if requested after encryption */
844 if (maccrd != NULL && enccrd->crd_next == maccrd) {
845 error = glxsb_hash_process(ses, maccrd, crp);
850 mtx_lock(&sc->sc_task_mtx);
852 mtx_unlock(&sc->sc_task_mtx);
854 crp->crp_etype = error;
855 crypto_unblock(sc->sc_cid, CRYPTO_SYMQ);
860 glxsb_crypto_process(device_t dev, struct cryptop *crp, int hint)
862 struct glxsb_softc *sc = device_get_softc(dev);
863 struct glxsb_session *ses;
864 struct cryptodesc *crd, *enccrd, *maccrd;
868 enccrd = maccrd = NULL;
871 crp->crp_callback == NULL || crp->crp_desc == NULL) {
876 for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
877 switch (crd->crd_alg) {
878 case CRYPTO_NULL_HMAC:
879 case CRYPTO_MD5_HMAC:
880 case CRYPTO_SHA1_HMAC:
881 case CRYPTO_RIPEMD160_HMAC:
882 case CRYPTO_SHA2_256_HMAC:
883 case CRYPTO_SHA2_384_HMAC:
884 case CRYPTO_SHA2_512_HMAC:
885 if (maccrd != NULL) {
892 if (enccrd != NULL) {
904 if (enccrd == NULL || enccrd->crd_len % AES_BLOCK_LEN != 0) {
909 sid = crp->crp_sid & 0xffffffff;
910 rw_rlock(&sc->sc_sessions_lock);
911 TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, ses_head, ses_next) {
912 if (ses->ses_id == sid)
915 rw_runlock(&sc->sc_sessions_lock);
916 if (ses == NULL || !ses->ses_used) {
921 mtx_lock(&sc->sc_task_mtx);
922 if (sc->sc_task_count != 0) {
923 mtx_unlock(&sc->sc_task_mtx);
928 sc->sc_to.to_maccrd = maccrd;
929 sc->sc_to.to_enccrd = enccrd;
930 sc->sc_to.to_crp = crp;
931 sc->sc_to.to_ses = ses;
932 mtx_unlock(&sc->sc_task_mtx);
934 taskqueue_enqueue(sc->sc_tq, &sc->sc_cryptotask);
938 crp->crp_etype = error;