2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017 Chelsio Communications, Inc.
5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
7 * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/module.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/sysctl.h>
51 #include <dev/pci/pcivar.h>
53 #include <dev/random/randomdev.h>
55 #include <opencrypto/cryptodev.h>
56 #include <opencrypto/xform.h>
58 #include "cryptodev_if.h"
61 #include "ccp_hardware.h"
63 MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
66 * Need a global softc available for garbage random_source API, which lacks any
67 * context pointer. It's also handy for debugging.
69 struct ccp_softc *g_ccp_softc;
71 bool g_debug_print = false;
72 SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0,
73 "Set to enable debugging log messages");
79 { 0x14561022, "AMD CCP-5a" },
80 { 0x14681022, "AMD CCP-5b" },
83 static struct random_source random_ccp = {
84 .rs_ident = "AMD CCP TRNG",
85 .rs_source = RANDOM_PURE_CCP,
86 .rs_read = random_ccp_read,
90 * ccp_populate_sglist() generates a scatter/gather list that covers the entire
91 * crypto operation buffer.
94 ccp_populate_sglist(struct sglist *sg, struct cryptop *crp)
99 switch (crp->crp_buf_type) {
100 case CRYPTO_BUF_MBUF:
101 error = sglist_append_mbuf(sg, crp->crp_mbuf);
104 error = sglist_append_uio(sg, crp->crp_uio);
106 case CRYPTO_BUF_CONTIG:
107 error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
116 * Handle a GCM request with an empty payload by performing the
117 * operation in software.
120 ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp)
122 struct aes_gmac_ctx gmac_ctx;
123 char block[GMAC_BLOCK_LEN];
124 char digest[GMAC_DIGEST_LEN];
125 char iv[AES_BLOCK_LEN];
129 * This assumes a 12-byte IV from the crp. See longer comment
130 * above in ccp_gcm() for more details.
132 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
133 crp->crp_etype = EINVAL;
136 memcpy(iv, crp->crp_iv, 12);
137 *(uint32_t *)&iv[12] = htobe32(1);
139 /* Initialize the MAC. */
140 AES_GMAC_Init(&gmac_ctx);
141 AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
142 AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
145 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
146 len = imin(crp->crp_aad_length - i, sizeof(block));
147 crypto_copydata(crp, crp->crp_aad_start + i, len, block);
148 bzero(block + len, sizeof(block) - len);
149 AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
153 bzero(block, sizeof(block));
154 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8);
155 AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
156 AES_GMAC_Final(digest, &gmac_ctx);
158 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
159 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
163 char digest2[GMAC_DIGEST_LEN];
165 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
167 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
170 crp->crp_etype = EBADMSG;
177 ccp_probe(device_t dev)
182 id = pci_get_devid(dev);
183 for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
184 if (id == ip->devid) {
185 device_set_desc(dev, ip->desc);
193 ccp_initialize_queues(struct ccp_softc *sc)
195 struct ccp_queue *qp;
198 for (i = 0; i < nitems(sc->queues); i++) {
203 mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
204 /* XXX - arbitrarily chosen sizes */
205 qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
206 /* Two more SGEs than sg_crp to accommodate ipad. */
207 qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
208 qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
213 ccp_free_queues(struct ccp_softc *sc)
215 struct ccp_queue *qp;
218 for (i = 0; i < nitems(sc->queues); i++) {
221 mtx_destroy(&qp->cq_lock);
222 sglist_free(qp->cq_sg_crp);
223 sglist_free(qp->cq_sg_ulptx);
224 sglist_free(qp->cq_sg_dst);
229 ccp_attach(device_t dev)
231 struct ccp_softc *sc;
234 sc = device_get_softc(dev);
237 sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session),
238 CRYPTOCAP_F_HARDWARE);
240 device_printf(dev, "could not get crypto driver id\n");
244 error = ccp_hw_attach(dev);
248 mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
250 ccp_initialize_queues(sc);
252 if (g_ccp_softc == NULL) {
254 if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
255 random_source_register(&random_ccp);
262 ccp_detach(device_t dev)
264 struct ccp_softc *sc;
266 sc = device_get_softc(dev);
269 sc->detaching = true;
270 mtx_unlock(&sc->lock);
272 crypto_unregister_all(sc->cid);
273 if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
274 random_source_deregister(&random_ccp);
279 if (g_ccp_softc == sc)
282 mtx_destroy(&sc->lock);
287 ccp_init_hmac_digest(struct ccp_session *s, const char *key, int klen)
289 union authctx auth_ctx;
290 struct auth_hash *axf;
294 * If the key is larger than the block size, use the digest of
295 * the key as the key instead.
297 axf = s->hmac.auth_hash;
298 if (klen > axf->blocksize) {
299 axf->Init(&auth_ctx);
300 axf->Update(&auth_ctx, key, klen);
301 axf->Final(s->hmac.ipad, &auth_ctx);
302 explicit_bzero(&auth_ctx, sizeof(auth_ctx));
303 klen = axf->hashsize;
305 memcpy(s->hmac.ipad, key, klen);
307 memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
308 memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
310 for (i = 0; i < axf->blocksize; i++) {
311 s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
312 s->hmac.opad[i] ^= HMAC_OPAD_VAL;
317 ccp_aes_check_keylen(int alg, int klen)
323 if (alg == CRYPTO_AES_XTS)
329 if (alg != CRYPTO_AES_XTS)
339 ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
343 if (alg == CRYPTO_AES_XTS)
344 kbits = (klen / 2) * 8;
350 s->blkcipher.cipher_type = CCP_AES_TYPE_128;
353 s->blkcipher.cipher_type = CCP_AES_TYPE_192;
356 s->blkcipher.cipher_type = CCP_AES_TYPE_256;
359 panic("should not get here");
362 s->blkcipher.key_len = klen;
363 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
367 ccp_auth_supported(struct ccp_softc *sc,
368 const struct crypto_session_params *csp)
371 if ((sc->hw_features & VERSION_CAP_SHA) == 0)
373 switch (csp->csp_auth_alg) {
374 case CRYPTO_SHA1_HMAC:
375 case CRYPTO_SHA2_256_HMAC:
376 case CRYPTO_SHA2_384_HMAC:
377 case CRYPTO_SHA2_512_HMAC:
378 if (csp->csp_auth_key == NULL)
388 ccp_cipher_supported(struct ccp_softc *sc,
389 const struct crypto_session_params *csp)
392 if ((sc->hw_features & VERSION_CAP_AES) == 0)
394 switch (csp->csp_cipher_alg) {
396 if (csp->csp_ivlen != AES_BLOCK_LEN)
400 if (csp->csp_ivlen != AES_BLOCK_LEN)
404 if (csp->csp_ivlen != AES_XTS_IV_LEN)
410 return (ccp_aes_check_keylen(csp->csp_cipher_alg,
411 csp->csp_cipher_klen));
415 ccp_probesession(device_t dev, const struct crypto_session_params *csp)
417 struct ccp_softc *sc;
419 if (csp->csp_flags != 0)
421 sc = device_get_softc(dev);
422 switch (csp->csp_mode) {
423 case CSP_MODE_DIGEST:
424 if (!ccp_auth_supported(sc, csp))
427 case CSP_MODE_CIPHER:
428 if (!ccp_cipher_supported(sc, csp))
432 switch (csp->csp_cipher_alg) {
433 case CRYPTO_AES_NIST_GCM_16:
434 if (csp->csp_ivlen != AES_GCM_IV_LEN)
436 if (csp->csp_auth_mlen < 0 ||
437 csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
439 if ((sc->hw_features & VERSION_CAP_AES) == 0)
447 if (!ccp_auth_supported(sc, csp) ||
448 !ccp_cipher_supported(sc, csp))
455 return (CRYPTODEV_PROBE_HARDWARE);
459 ccp_newsession(device_t dev, crypto_session_t cses,
460 const struct crypto_session_params *csp)
462 struct ccp_softc *sc;
463 struct ccp_session *s;
464 struct auth_hash *auth_hash;
465 enum ccp_aes_mode cipher_mode;
469 /* XXX reconcile auth_mode with use by ccp_sha */
470 switch (csp->csp_auth_alg) {
471 case CRYPTO_SHA1_HMAC:
472 auth_hash = &auth_hash_hmac_sha1;
475 case CRYPTO_SHA2_256_HMAC:
476 auth_hash = &auth_hash_hmac_sha2_256;
477 auth_mode = SHA2_256;
479 case CRYPTO_SHA2_384_HMAC:
480 auth_hash = &auth_hash_hmac_sha2_384;
481 auth_mode = SHA2_384;
483 case CRYPTO_SHA2_512_HMAC:
484 auth_hash = &auth_hash_hmac_sha2_512;
485 auth_mode = SHA2_512;
493 switch (csp->csp_cipher_alg) {
495 cipher_mode = CCP_AES_MODE_CBC;
498 cipher_mode = CCP_AES_MODE_CTR;
500 case CRYPTO_AES_NIST_GCM_16:
501 cipher_mode = CCP_AES_MODE_GCTR;
504 cipher_mode = CCP_AES_MODE_XTS;
507 cipher_mode = CCP_AES_MODE_ECB;
511 sc = device_get_softc(dev);
514 mtx_unlock(&sc->lock);
518 s = crypto_get_driver_session(cses);
520 /* Just grab the first usable queue for now. */
521 for (q = 0; q < nitems(sc->queues); q++)
522 if ((sc->valid_queues & (1 << q)) != 0)
524 if (q == nitems(sc->queues)) {
525 mtx_unlock(&sc->lock);
530 switch (csp->csp_mode) {
537 case CSP_MODE_DIGEST:
540 case CSP_MODE_CIPHER:
545 if (s->mode == GCM) {
546 if (csp->csp_auth_mlen == 0)
547 s->gmac.hash_len = AES_GMAC_HASH_LEN;
549 s->gmac.hash_len = csp->csp_auth_mlen;
550 } else if (auth_hash != NULL) {
551 s->hmac.auth_hash = auth_hash;
552 s->hmac.auth_mode = auth_mode;
553 if (csp->csp_auth_mlen == 0)
554 s->hmac.hash_len = auth_hash->hashsize;
556 s->hmac.hash_len = csp->csp_auth_mlen;
557 ccp_init_hmac_digest(s, csp->csp_auth_key, csp->csp_auth_klen);
559 if (cipher_mode != CCP_AES_MODE_ECB) {
560 s->blkcipher.cipher_mode = cipher_mode;
561 if (csp->csp_cipher_key != NULL)
562 ccp_aes_setkey(s, csp->csp_cipher_alg,
563 csp->csp_cipher_key, csp->csp_cipher_klen);
567 mtx_unlock(&sc->lock);
573 ccp_freesession(device_t dev, crypto_session_t cses)
575 struct ccp_session *s;
577 s = crypto_get_driver_session(cses);
581 "session %p freed with %d pending requests\n", s,
587 ccp_process(device_t dev, struct cryptop *crp, int hint)
589 const struct crypto_session_params *csp;
590 struct ccp_softc *sc;
591 struct ccp_queue *qp;
592 struct ccp_session *s;
599 csp = crypto_get_params(crp->crp_session);
600 s = crypto_get_driver_session(crp->crp_session);
601 sc = device_get_softc(dev);
603 qp = &sc->queues[s->queue];
604 mtx_unlock(&sc->lock);
605 error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT);
610 error = ccp_populate_sglist(qp->cq_sg_crp, crp);
614 if (crp->crp_auth_key != NULL) {
615 KASSERT(s->hmac.auth_hash != NULL, ("auth key without HMAC"));
616 ccp_init_hmac_digest(s, crp->crp_auth_key, csp->csp_auth_klen);
618 if (crp->crp_cipher_key != NULL)
619 ccp_aes_setkey(s, csp->csp_cipher_alg, crp->crp_cipher_key,
620 csp->csp_cipher_klen);
624 if (s->pending != 0) {
628 error = ccp_hmac(qp, s, crp);
631 if (s->pending != 0) {
635 error = ccp_blkcipher(qp, s, crp);
638 if (s->pending != 0) {
642 error = ccp_authenc(qp, s, crp);
645 if (crp->crp_payload_length == 0) {
646 mtx_unlock(&qp->cq_lock);
647 ccp_gcm_soft(s, crp);
650 if (s->pending != 0) {
654 error = ccp_gcm(qp, s, crp);
665 * Squash EAGAIN so callers don't uselessly and
666 * expensively retry if the ring was full.
672 ccp_queue_release(qp);
676 DPRINTF(dev, "%s: early error:%d\n", __func__, error);
677 crp->crp_etype = error;
683 static device_method_t ccp_methods[] = {
684 DEVMETHOD(device_probe, ccp_probe),
685 DEVMETHOD(device_attach, ccp_attach),
686 DEVMETHOD(device_detach, ccp_detach),
688 DEVMETHOD(cryptodev_probesession, ccp_probesession),
689 DEVMETHOD(cryptodev_newsession, ccp_newsession),
690 DEVMETHOD(cryptodev_freesession, ccp_freesession),
691 DEVMETHOD(cryptodev_process, ccp_process),
696 static driver_t ccp_driver = {
699 sizeof(struct ccp_softc)
702 static devclass_t ccp_devclass;
703 DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL);
704 MODULE_VERSION(ccp, 1);
705 MODULE_DEPEND(ccp, crypto, 1, 1, 1);
706 MODULE_DEPEND(ccp, random_device, 1, 1, 1);
707 #if 0 /* There are enough known issues that we shouldn't load automatically */
708 MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids,
713 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
715 struct ccp_softc *sc;
717 mtx_assert(&qp->cq_lock, MA_OWNED);
720 if (n < 1 || n >= (1 << sc->ring_size_order))
724 if (ccp_queue_get_ring_space(qp) >= n)
726 if ((mflags & M_WAITOK) == 0)
728 qp->cq_waiting = true;
729 msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
734 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
738 mtx_lock(&qp->cq_lock);
739 qp->cq_acq_tail = qp->cq_tail;
740 error = ccp_queue_reserve_space(qp, n, mflags);
742 mtx_unlock(&qp->cq_lock);
747 ccp_queue_release(struct ccp_queue *qp)
750 mtx_assert(&qp->cq_lock, MA_OWNED);
751 if (qp->cq_tail != qp->cq_acq_tail) {
753 ccp_queue_write_tail(qp);
755 mtx_unlock(&qp->cq_lock);
759 ccp_queue_abort(struct ccp_queue *qp)
763 mtx_assert(&qp->cq_lock, MA_OWNED);
765 /* Wipe out any descriptors associated with this aborted txn. */
766 for (i = qp->cq_acq_tail; i != qp->cq_tail;
767 i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) {
768 memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i]));
770 qp->cq_tail = qp->cq_acq_tail;
772 mtx_unlock(&qp->cq_lock);
776 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
777 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
779 db_show_ccp_sc(struct ccp_softc *sc)
782 db_printf("ccp softc at %p\n", sc);
783 db_printf(" cid: %d\n", (int)sc->cid);
785 db_printf(" lock: ");
786 db_show_lock(&sc->lock);
788 db_printf(" detaching: %d\n", (int)sc->detaching);
789 db_printf(" ring_size_order: %u\n", sc->ring_size_order);
791 db_printf(" hw_version: %d\n", (int)sc->hw_version);
792 db_printf(" hw_features: %b\n", (int)sc->hw_features,
793 "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
794 "\11SHA\0103DES\07AES");
796 db_printf(" hw status:\n");
801 db_show_ccp_qp(struct ccp_queue *qp)
804 db_printf(" lock: ");
805 db_show_lock(&qp->cq_lock);
807 db_printf(" cq_qindex: %u\n", qp->cq_qindex);
808 db_printf(" cq_softc: %p\n", qp->cq_softc);
810 db_printf(" head: %u\n", qp->cq_head);
811 db_printf(" tail: %u\n", qp->cq_tail);
812 db_printf(" acq_tail: %u\n", qp->cq_acq_tail);
813 db_printf(" desc_ring: %p\n", qp->desc_ring);
814 db_printf(" completions_ring: %p\n", qp->completions_ring);
815 db_printf(" descriptors (phys): 0x%jx\n",
816 (uintmax_t)qp->desc_ring_bus_addr);
818 db_printf(" hw status:\n");
819 db_ccp_show_queue_hw(qp);
822 DB_SHOW_COMMAND(ccp, db_show_ccp)
824 struct ccp_softc *sc;
825 unsigned unit, qindex;
830 unit = (unsigned)addr;
832 sc = devclass_get_softc(ccp_devclass, unit);
834 db_printf("No such device ccp%u\n", unit);
843 qindex = (unsigned)count;
844 if (qindex >= nitems(sc->queues)) {
845 db_printf("No such queue %u\n", qindex);
848 db_show_ccp_qp(&sc->queues[qindex]);
852 db_printf("usage: show ccp <unit>[,<qindex>]\n");