2 * Copyright (c) 2017 Chelsio Communications, Inc.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
42 #include "cryptodev_if.h"
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
48 * Requests consist of:
50 * +-------------------------------+
51 * | struct fw_crypto_lookaside_wr |
52 * +-------------------------------+
53 * | struct ulp_txpkt |
54 * +-------------------------------+
55 * | struct ulptx_idata |
56 * +-------------------------------+
57 * | struct cpl_tx_sec_pdu |
58 * +-------------------------------+
59 * | struct cpl_tls_tx_scmd_fmt |
60 * +-------------------------------+
61 * | key context header |
62 * +-------------------------------+
63 * | AES key | ----- For requests with AES
64 * +-------------------------------+
65 * | Hash state | ----- For hash-only requests
66 * +-------------------------------+ -
67 * | IPAD (16-byte aligned) | \
68 * +-------------------------------+ +---- For requests with HMAC
69 * | OPAD (16-byte aligned) | /
70 * +-------------------------------+ -
71 * | GMAC H | ----- For AES-GCM
72 * +-------------------------------+ -
73 * | struct cpl_rx_phys_dsgl | \
74 * +-------------------------------+ +---- Destination buffer for
75 * | PHYS_DSGL entries | / non-hash-only requests
76 * +-------------------------------+ -
77 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests
78 * +-------------------------------+
79 * | IV | ----- If immediate IV
80 * +-------------------------------+
81 * | Payload | ----- If immediate Payload
82 * +-------------------------------+ -
83 * | struct ulptx_sgl | \
84 * +-------------------------------+ +---- If payload via SGL
86 * +-------------------------------+ -
88 * Note that the key context must be padded to ensure 16-byte alignment.
89 * For HMAC requests, the key consists of the partial hash of the IPAD
90 * followed by the partial hash of the OPAD.
94 * +-------------------------------+
95 * | struct cpl_fw6_pld |
96 * +-------------------------------+
97 * | hash digest | ----- For HMAC request with
98 * +-------------------------------+ 'hash_size' set in work request
100 * A 32-bit big-endian error status word is supplied in the last 4
101 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
102 * "MAC" error and bit 1 indicates a "PAD" error.
104 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
105 * in the request is returned in data[1] of the CPL_FW6_PLD message.
107 * For block cipher replies, the updated IV is supplied in data[2] and
108 * data[3] of the CPL_FW6_PLD message.
110 * For hash replies where the work request set 'hash_size' to request
111 * a copy of the hash in the reply, the hash digest is supplied
112 * immediately following the CPL_FW6_PLD message.
116 * The crypto engine supports a maximum AAD size of 511 bytes.
118 #define MAX_AAD_LEN 511
121 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
122 * entries. While the CPL includes a 16-bit length field, the T6 can
123 * sometimes hang if an error occurs while processing a request with a
124 * single DSGL entry larger than 2k.
126 #define MAX_RX_PHYS_DSGL_SGE 32
127 #define DSGL_SGE_MAXLEN 2048
130 * The adapter only supports requests with a total input or output
131 * length of 64k-1 or smaller. Longer requests either result in hung
132 * requests or incorrect results.
134 #define MAX_REQUEST_SIZE 65535
136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
138 struct ccr_session_hmac {
139 struct auth_hash *auth_hash;
141 unsigned int partial_digest_len;
142 unsigned int auth_mode;
143 unsigned int mk_size;
144 char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
145 char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
148 struct ccr_session_gmac {
150 char ghash_h[GMAC_BLOCK_LEN];
153 struct ccr_session_ccm_mac {
157 struct ccr_session_blkcipher {
158 unsigned int cipher_mode;
159 unsigned int key_len;
162 char enckey[CHCR_AES_MAX_KEY_LEN];
163 char deckey[CHCR_AES_MAX_KEY_LEN];
169 enum { HASH, HMAC, BLKCIPHER, AUTHENC, GCM, CCM } mode;
171 struct ccr_session_hmac hmac;
172 struct ccr_session_gmac gmac;
173 struct ccr_session_ccm_mac ccm_mac;
175 struct ccr_session_blkcipher blkcipher;
179 struct adapter *adapter;
189 * Pre-allocate S/G lists used when preparing a work request.
190 * 'sg_crp' contains an sglist describing the entire buffer
191 * for a 'struct cryptop'. 'sg_ulptx' is used to describe
192 * the data the engine should DMA as input via ULPTX_SGL.
193 * 'sg_dsgl' is used to describe the destination that cipher
194 * text and a tag should be written to.
196 struct sglist *sg_crp;
197 struct sglist *sg_ulptx;
198 struct sglist *sg_dsgl;
201 * Pre-allocate a dummy output buffer for the IV and AAD for
205 struct sglist *sg_iv_aad;
208 uint64_t stats_blkcipher_encrypt;
209 uint64_t stats_blkcipher_decrypt;
212 uint64_t stats_authenc_encrypt;
213 uint64_t stats_authenc_decrypt;
214 uint64_t stats_gcm_encrypt;
215 uint64_t stats_gcm_decrypt;
216 uint64_t stats_ccm_encrypt;
217 uint64_t stats_ccm_decrypt;
218 uint64_t stats_wr_nomem;
219 uint64_t stats_inflight;
220 uint64_t stats_mac_error;
221 uint64_t stats_pad_error;
222 uint64_t stats_bad_session;
223 uint64_t stats_sglist_error;
224 uint64_t stats_process_error;
225 uint64_t stats_sw_fallback;
229 * Crypto requests involve two kind of scatter/gather lists.
231 * Non-hash-only requests require a PHYS_DSGL that describes the
232 * location to store the results of the encryption or decryption
233 * operation. This SGL uses a different format (PHYS_DSGL) and should
234 * exclude the crd_skip bytes at the start of the data as well as
235 * any AAD or IV. For authenticated encryption requests it should
236 * cover include the destination of the hash or tag.
238 * The input payload may either be supplied inline as immediate data,
239 * or via a standard ULP_TX SGL. This SGL should include AAD,
240 * ciphertext, and the hash or tag for authenticated decryption
243 * These scatter/gather lists can describe different subsets of the
244 * buffer described by the crypto operation. ccr_populate_sglist()
245 * generates a scatter/gather list that covers the entire crypto
246 * operation buffer that is then used to construct the other
247 * scatter/gather lists.
250 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
255 if (crp->crp_flags & CRYPTO_F_IMBUF)
256 error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
257 else if (crp->crp_flags & CRYPTO_F_IOV)
258 error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
260 error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
265 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
269 ccr_count_sgl(struct sglist *sg, int maxsegsize)
274 for (i = 0; i < sg->sg_nseg; i++)
275 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
279 /* These functions deal with PHYS_DSGL for the reply buffer. */
281 ccr_phys_dsgl_len(int nsegs)
285 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
286 if ((nsegs % 8) != 0) {
287 len += sizeof(uint16_t) * 8;
288 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
294 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
297 struct cpl_rx_phys_dsgl *cpl;
298 struct phys_sge_pairs *sgl;
305 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
306 V_CPL_RX_PHYS_DSGL_ISRDMA(0));
307 cpl->pcirlxorder_to_noofsgentr = htobe32(
308 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
309 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
310 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
311 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
312 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
313 cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
314 cpl->rss_hdr_int.hash_val = 0;
315 sgl = (struct phys_sge_pairs *)(cpl + 1);
317 for (i = 0; i < sg->sg_nseg; i++) {
318 seglen = sg->sg_segs[i].ss_len;
319 paddr = sg->sg_segs[i].ss_paddr;
321 sgl->addr[j] = htobe64(paddr);
322 if (seglen > DSGL_SGE_MAXLEN) {
323 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
324 paddr += DSGL_SGE_MAXLEN;
325 seglen -= DSGL_SGE_MAXLEN;
327 sgl->len[j] = htobe16(seglen);
335 } while (seglen != 0);
337 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
340 /* These functions deal with the ULPTX_SGL for input payload. */
342 ccr_ulptx_sgl_len(int nsegs)
346 nsegs--; /* first segment is part of ulptx_sgl */
347 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
348 return (roundup2(n, 16));
352 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
354 struct ulptx_sgl *usgl;
356 struct sglist_seg *ss;
360 MPASS(nsegs == sg->sg_nseg);
361 ss = &sg->sg_segs[0];
363 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
364 V_ULPTX_NSGE(nsegs));
365 usgl->len0 = htobe32(ss->ss_len);
366 usgl->addr0 = htobe64(ss->ss_paddr);
368 for (i = 0; i < sg->sg_nseg - 1; i++) {
369 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
370 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
377 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
380 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
382 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
389 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
390 u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size,
393 u_int cctx_size, idata_len;
395 cctx_size = sizeof(struct _key_ctx) + kctx_len;
396 crwr->wreq.op_to_cctx_size = htobe32(
397 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
398 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
399 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
400 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
401 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
402 crwr->wreq.len16_pkd = htobe32(
403 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
404 crwr->wreq.session_id = 0;
405 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
406 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
407 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
408 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
409 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
410 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
411 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
412 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
413 crwr->wreq.key_addr = 0;
414 crwr->wreq.pld_size_hash_size = htobe32(
415 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
416 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
417 crwr->wreq.cookie = htobe64((uintptr_t)crp);
419 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
420 V_ULP_TXPKT_DATAMODIFY(0) |
421 V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
422 V_ULP_TXPKT_FID(sc->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1));
423 crwr->ulptx.len = htobe32(
424 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
426 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
427 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
428 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
429 if (imm_len % 16 != 0)
430 idata_len -= 16 - imm_len % 16;
431 crwr->sc_imm.len = htobe32(idata_len);
435 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
437 struct chcr_wr *crwr;
439 struct auth_hash *axf;
440 struct cryptodesc *crd;
442 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
443 u_int hmac_ctrl, imm_len, iopad_size;
444 int error, sgl_nsegs, sgl_len, use_opad;
448 /* Reject requests with too large of an input buffer. */
449 if (crd->crd_len > MAX_REQUEST_SIZE)
452 axf = s->hmac.auth_hash;
454 if (s->mode == HMAC) {
456 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
459 hmac_ctrl = SCMD_HMAC_CTRL_NOP;
462 /* PADs must be 128-bit aligned. */
463 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
466 * The 'key' part of the context includes the aligned IPAD and
469 kctx_len = iopad_size;
471 kctx_len += iopad_size;
472 hash_size_in_response = axf->hashsize;
473 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
475 if (crd->crd_len == 0) {
476 imm_len = axf->blocksize;
479 } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
480 imm_len = crd->crd_len;
485 sglist_reset(sc->sg_ulptx);
486 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
487 crd->crd_skip, crd->crd_len);
490 sgl_nsegs = sc->sg_ulptx->sg_nseg;
491 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
494 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
495 if (wr_len > SGE_MAX_WR_LEN)
497 wr = alloc_wrqe(wr_len, sc->txq);
499 sc->stats_wr_nomem++;
503 memset(crwr, 0, wr_len);
505 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
506 hash_size_in_response, crp);
508 /* XXX: Hardcodes SGE loopback channel of 0. */
509 crwr->sec_cpl.op_ivinsrtofst = htobe32(
510 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
511 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
512 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
513 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
514 V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
516 crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
519 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
520 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
522 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
523 crwr->sec_cpl.seqno_numivs = htobe32(
524 V_SCMD_SEQ_NO_CTRL(0) |
525 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
526 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
527 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
528 V_SCMD_HMAC_CTRL(hmac_ctrl));
529 crwr->sec_cpl.ivgen_hdrlen = htobe32(
530 V_SCMD_LAST_FRAG(0) |
531 V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
533 memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
535 memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
536 s->hmac.partial_digest_len);
538 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
539 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
540 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
541 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
542 V_KEY_CONTEXT_SALT_PRESENT(1) |
543 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
544 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
546 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
547 if (crd->crd_len == 0) {
550 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
551 htobe64(axf->blocksize << 3);
552 } else if (imm_len != 0)
553 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
556 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
558 /* XXX: TODO backpressure */
559 t4_wrq_tx(sc->adapter, wr);
565 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
566 const struct cpl_fw6_pld *cpl, int error)
568 struct cryptodesc *crd;
572 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
573 s->hmac.hash_len, (c_caddr_t)(cpl + 1));
580 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
582 char iv[CHCR_MAX_CRYPTO_IV_LEN];
583 struct chcr_wr *crwr;
585 struct cryptodesc *crd;
587 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
589 int dsgl_nsegs, dsgl_len;
590 int sgl_nsegs, sgl_len;
595 if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
597 if (crd->crd_alg == CRYPTO_AES_CBC &&
598 (crd->crd_len % AES_BLOCK_LEN) != 0)
601 /* Reject requests with too large of an input buffer. */
602 if (crd->crd_len > MAX_REQUEST_SIZE)
605 if (crd->crd_flags & CRD_F_ENCRYPT)
606 op_type = CHCR_ENCRYPT_OP;
608 op_type = CHCR_DECRYPT_OP;
610 sglist_reset(sc->sg_dsgl);
611 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
615 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
616 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
618 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
620 /* The 'key' must be 128-bit aligned. */
621 kctx_len = roundup2(s->blkcipher.key_len, 16);
622 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
624 if (ccr_use_imm_data(transhdr_len, crd->crd_len +
625 s->blkcipher.iv_len)) {
626 imm_len = crd->crd_len;
631 sglist_reset(sc->sg_ulptx);
632 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
633 crd->crd_skip, crd->crd_len);
636 sgl_nsegs = sc->sg_ulptx->sg_nseg;
637 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
640 wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
641 roundup2(imm_len, 16) + sgl_len;
642 if (wr_len > SGE_MAX_WR_LEN)
644 wr = alloc_wrqe(wr_len, sc->txq);
646 sc->stats_wr_nomem++;
650 memset(crwr, 0, wr_len);
653 * Read the existing IV from the request or generate a random
654 * one if none is provided. Optionally copy the generated IV
655 * into the output buffer if requested.
657 if (op_type == CHCR_ENCRYPT_OP) {
658 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
659 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
661 arc4rand(iv, s->blkcipher.iv_len, 0);
662 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
663 crypto_copyback(crp->crp_flags, crp->crp_buf,
664 crd->crd_inject, s->blkcipher.iv_len, iv);
666 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
667 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
669 crypto_copydata(crp->crp_flags, crp->crp_buf,
670 crd->crd_inject, s->blkcipher.iv_len, iv);
673 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
676 /* XXX: Hardcodes SGE loopback channel of 0. */
677 crwr->sec_cpl.op_ivinsrtofst = htobe32(
678 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
679 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
680 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
681 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
682 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
684 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
686 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
687 V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
688 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
689 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
690 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
692 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
693 crwr->sec_cpl.seqno_numivs = htobe32(
694 V_SCMD_SEQ_NO_CTRL(0) |
695 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
696 V_SCMD_ENC_DEC_CTRL(op_type) |
697 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
698 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
699 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
700 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
702 crwr->sec_cpl.ivgen_hdrlen = htobe32(
703 V_SCMD_IV_GEN_CTRL(0) |
704 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
705 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
707 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
708 switch (crd->crd_alg) {
710 if (crd->crd_flags & CRD_F_ENCRYPT)
711 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
712 s->blkcipher.key_len);
714 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
715 s->blkcipher.key_len);
718 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
719 s->blkcipher.key_len);
722 key_half = s->blkcipher.key_len / 2;
723 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
725 if (crd->crd_flags & CRD_F_ENCRYPT)
726 memcpy(crwr->key_ctx.key + key_half,
727 s->blkcipher.enckey, key_half);
729 memcpy(crwr->key_ctx.key + key_half,
730 s->blkcipher.deckey, key_half);
734 dst = (char *)(crwr + 1) + kctx_len;
735 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
736 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
737 memcpy(dst, iv, s->blkcipher.iv_len);
738 dst += s->blkcipher.iv_len;
740 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
743 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
745 /* XXX: TODO backpressure */
746 t4_wrq_tx(sc->adapter, wr);
752 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
753 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
757 * The updated IV to permit chained requests is at
758 * cpl->data[2], but OCF doesn't permit chained requests.
764 * 'hashsize' is the length of a full digest. 'authsize' is the
765 * requested digest length for this operation which may be less
769 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
773 return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
775 return (SCMD_HMAC_CTRL_IPSEC_96BIT);
776 if (authsize == hashsize / 2)
777 return (SCMD_HMAC_CTRL_DIV2);
778 return (SCMD_HMAC_CTRL_NO_TRUNC);
782 ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
783 struct cryptodesc *crda, struct cryptodesc *crde)
785 char iv[CHCR_MAX_CRYPTO_IV_LEN];
786 struct chcr_wr *crwr;
788 struct auth_hash *axf;
790 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
791 u_int hash_size_in_response, imm_len, iopad_size;
792 u_int aad_start, aad_len, aad_stop;
793 u_int auth_start, auth_stop, auth_insert;
794 u_int cipher_start, cipher_stop;
795 u_int hmac_ctrl, input_len;
796 int dsgl_nsegs, dsgl_len;
797 int sgl_nsegs, sgl_len;
801 * If there is a need in the future, requests with an empty
802 * payload could be supported as HMAC-only requests.
804 if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
806 if (crde->crd_alg == CRYPTO_AES_CBC &&
807 (crde->crd_len % AES_BLOCK_LEN) != 0)
811 * Compute the length of the AAD (data covered by the
812 * authentication descriptor but not the encryption
813 * descriptor). To simplify the logic, AAD is only permitted
814 * before the cipher/plain text, not after. This is true of
815 * all currently-generated requests.
817 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
819 if (crda->crd_skip < crde->crd_skip) {
820 if (crda->crd_skip + crda->crd_len > crde->crd_skip)
821 aad_len = (crde->crd_skip - crda->crd_skip);
823 aad_len = crda->crd_len;
826 if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
829 axf = s->hmac.auth_hash;
830 hash_size_in_response = s->hmac.hash_len;
831 if (crde->crd_flags & CRD_F_ENCRYPT)
832 op_type = CHCR_ENCRYPT_OP;
834 op_type = CHCR_DECRYPT_OP;
837 * The output buffer consists of the cipher text followed by
838 * the hash when encrypting. For decryption it only contains
841 * Due to a firmware bug, the output buffer must include a
842 * dummy output buffer for the IV and AAD prior to the real
845 if (op_type == CHCR_ENCRYPT_OP) {
846 if (s->blkcipher.iv_len + aad_len + crde->crd_len +
847 hash_size_in_response > MAX_REQUEST_SIZE)
850 if (s->blkcipher.iv_len + aad_len + crde->crd_len >
854 sglist_reset(sc->sg_dsgl);
855 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
856 s->blkcipher.iv_len + aad_len);
859 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
863 if (op_type == CHCR_ENCRYPT_OP) {
864 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
865 crda->crd_inject, hash_size_in_response);
869 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
870 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
872 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
874 /* PADs must be 128-bit aligned. */
875 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
878 * The 'key' part of the key context consists of the key followed
879 * by the IPAD and OPAD.
881 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
882 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
885 * The input buffer consists of the IV, any AAD, and then the
886 * cipher/plain text. For decryption requests the hash is
887 * appended after the cipher text.
889 * The IV is always stored at the start of the input buffer
890 * even though it may be duplicated in the payload. The
891 * crypto engine doesn't work properly if the IV offset points
892 * inside of the AAD region, so a second copy is always
895 input_len = aad_len + crde->crd_len;
898 * The firmware hangs if sent a request which is a
899 * bit smaller than MAX_REQUEST_SIZE. In particular, the
900 * firmware appears to require 512 - 16 bytes of spare room
901 * along with the size of the hash even if the hash isn't
902 * included in the input buffer.
904 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
907 if (op_type == CHCR_DECRYPT_OP)
908 input_len += hash_size_in_response;
909 if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
915 sglist_reset(sc->sg_ulptx);
917 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
918 crda->crd_skip, aad_len);
922 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
923 crde->crd_skip, crde->crd_len);
926 if (op_type == CHCR_DECRYPT_OP) {
927 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
928 crda->crd_inject, hash_size_in_response);
932 sgl_nsegs = sc->sg_ulptx->sg_nseg;
933 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
937 * Any auth-only data before the cipher region is marked as AAD.
938 * Auth-data that overlaps with the cipher region is placed in
942 aad_start = s->blkcipher.iv_len + 1;
943 aad_stop = aad_start + aad_len - 1;
948 cipher_start = s->blkcipher.iv_len + aad_len + 1;
949 if (op_type == CHCR_DECRYPT_OP)
950 cipher_stop = hash_size_in_response;
953 if (aad_len == crda->crd_len) {
958 auth_start = cipher_start;
960 auth_start = s->blkcipher.iv_len + crda->crd_skip -
962 auth_stop = (crde->crd_skip + crde->crd_len) -
963 (crda->crd_skip + crda->crd_len) + cipher_stop;
965 if (op_type == CHCR_DECRYPT_OP)
966 auth_insert = hash_size_in_response;
970 wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
971 roundup2(imm_len, 16) + sgl_len;
972 if (wr_len > SGE_MAX_WR_LEN)
974 wr = alloc_wrqe(wr_len, sc->txq);
976 sc->stats_wr_nomem++;
980 memset(crwr, 0, wr_len);
983 * Read the existing IV from the request or generate a random
984 * one if none is provided. Optionally copy the generated IV
985 * into the output buffer if requested.
987 if (op_type == CHCR_ENCRYPT_OP) {
988 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
989 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
991 arc4rand(iv, s->blkcipher.iv_len, 0);
992 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
993 crypto_copyback(crp->crp_flags, crp->crp_buf,
994 crde->crd_inject, s->blkcipher.iv_len, iv);
996 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
997 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
999 crypto_copydata(crp->crp_flags, crp->crp_buf,
1000 crde->crd_inject, s->blkcipher.iv_len, iv);
1003 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
1004 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
1006 /* XXX: Hardcodes SGE loopback channel of 0. */
1007 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1008 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1009 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1010 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1011 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1012 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1014 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
1016 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1017 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1018 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1019 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1020 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1021 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1022 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1023 V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1024 V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1025 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1027 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1028 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1029 crwr->sec_cpl.seqno_numivs = htobe32(
1030 V_SCMD_SEQ_NO_CTRL(0) |
1031 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1032 V_SCMD_ENC_DEC_CTRL(op_type) |
1033 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1034 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1035 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1036 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1037 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
1039 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1040 V_SCMD_IV_GEN_CTRL(0) |
1041 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1042 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1044 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1045 switch (crde->crd_alg) {
1046 case CRYPTO_AES_CBC:
1047 if (crde->crd_flags & CRD_F_ENCRYPT)
1048 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1049 s->blkcipher.key_len);
1051 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1052 s->blkcipher.key_len);
1054 case CRYPTO_AES_ICM:
1055 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1056 s->blkcipher.key_len);
1058 case CRYPTO_AES_XTS:
1059 key_half = s->blkcipher.key_len / 2;
1060 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1062 if (crde->crd_flags & CRD_F_ENCRYPT)
1063 memcpy(crwr->key_ctx.key + key_half,
1064 s->blkcipher.enckey, key_half);
1066 memcpy(crwr->key_ctx.key + key_half,
1067 s->blkcipher.deckey, key_half);
1071 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1072 memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
1073 memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
1075 dst = (char *)(crwr + 1) + kctx_len;
1076 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1077 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1078 memcpy(dst, iv, s->blkcipher.iv_len);
1079 dst += s->blkcipher.iv_len;
1082 crypto_copydata(crp->crp_flags, crp->crp_buf,
1083 crda->crd_skip, aad_len, dst);
1086 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1087 crde->crd_len, dst);
1088 dst += crde->crd_len;
1089 if (op_type == CHCR_DECRYPT_OP)
1090 crypto_copydata(crp->crp_flags, crp->crp_buf,
1091 crda->crd_inject, hash_size_in_response, dst);
1093 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1095 /* XXX: TODO backpressure */
1096 t4_wrq_tx(sc->adapter, wr);
1102 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1103 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1105 struct cryptodesc *crd;
1108 * The updated IV to permit chained requests is at
1109 * cpl->data[2], but OCF doesn't permit chained requests.
1111 * For a decryption request, the hardware may do a verification
1112 * of the HMAC which will fail if the existing HMAC isn't in the
1113 * buffer. If that happens, clear the error and copy the HMAC
1114 * from the CPL reply into the buffer.
1116 * For encryption requests, crd should be the cipher request
1117 * which will have CRD_F_ENCRYPT set. For decryption
1118 * requests, crp_desc will be the HMAC request which should
1119 * not have this flag set.
1121 crd = crp->crp_desc;
1122 if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1123 !(crd->crd_flags & CRD_F_ENCRYPT)) {
1124 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1125 s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1132 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1133 struct cryptodesc *crda, struct cryptodesc *crde)
1135 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1136 struct chcr_wr *crwr;
1139 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1140 u_int hash_size_in_response, imm_len;
1141 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1142 u_int hmac_ctrl, input_len;
1143 int dsgl_nsegs, dsgl_len;
1144 int sgl_nsegs, sgl_len;
1147 if (s->blkcipher.key_len == 0)
1151 * The crypto engine doesn't handle GCM requests with an empty
1152 * payload, so handle those in software instead.
1154 if (crde->crd_len == 0)
1158 * AAD is only permitted before the cipher/plain text, not
1161 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1164 if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
1167 hash_size_in_response = s->gmac.hash_len;
1168 if (crde->crd_flags & CRD_F_ENCRYPT)
1169 op_type = CHCR_ENCRYPT_OP;
1171 op_type = CHCR_DECRYPT_OP;
1174 * The IV handling for GCM in OCF is a bit more complicated in
1175 * that IPSec provides a full 16-byte IV (including the
1176 * counter), whereas the /dev/crypto interface sometimes
1177 * provides a full 16-byte IV (if no IV is provided in the
1178 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1180 * When provided a 12-byte IV, assume the IV is really 16 bytes
1181 * with a counter in the last 4 bytes initialized to 1.
1183 * While iv_len is checked below, the value is currently
1184 * always set to 12 when creating a GCM session in this driver
1185 * due to limitations in OCF (there is no way to know what the
1186 * IV length of a given request will be). This means that the
1187 * driver always assumes as 12-byte IV for now.
1189 if (s->blkcipher.iv_len == 12)
1190 iv_len = AES_BLOCK_LEN;
1192 iv_len = s->blkcipher.iv_len;
1195 * The output buffer consists of the cipher text followed by
1196 * the tag when encrypting. For decryption it only contains
1199 * Due to a firmware bug, the output buffer must include a
1200 * dummy output buffer for the IV and AAD prior to the real
1203 if (op_type == CHCR_ENCRYPT_OP) {
1204 if (iv_len + crda->crd_len + crde->crd_len +
1205 hash_size_in_response > MAX_REQUEST_SIZE)
1208 if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
1211 sglist_reset(sc->sg_dsgl);
1212 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1216 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1220 if (op_type == CHCR_ENCRYPT_OP) {
1221 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1222 crda->crd_inject, hash_size_in_response);
1226 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1227 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1229 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1232 * The 'key' part of the key context consists of the key followed
1233 * by the Galois hash key.
1235 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1236 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1239 * The input buffer consists of the IV, any AAD, and then the
1240 * cipher/plain text. For decryption requests the hash is
1241 * appended after the cipher text.
1243 * The IV is always stored at the start of the input buffer
1244 * even though it may be duplicated in the payload. The
1245 * crypto engine doesn't work properly if the IV offset points
1246 * inside of the AAD region, so a second copy is always
1249 input_len = crda->crd_len + crde->crd_len;
1250 if (op_type == CHCR_DECRYPT_OP)
1251 input_len += hash_size_in_response;
1252 if (input_len > MAX_REQUEST_SIZE)
1254 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1255 imm_len = input_len;
1260 sglist_reset(sc->sg_ulptx);
1261 if (crda->crd_len != 0) {
1262 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1263 crda->crd_skip, crda->crd_len);
1267 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1268 crde->crd_skip, crde->crd_len);
1271 if (op_type == CHCR_DECRYPT_OP) {
1272 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1273 crda->crd_inject, hash_size_in_response);
1277 sgl_nsegs = sc->sg_ulptx->sg_nseg;
1278 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1281 if (crda->crd_len != 0) {
1282 aad_start = iv_len + 1;
1283 aad_stop = aad_start + crda->crd_len - 1;
1288 cipher_start = iv_len + crda->crd_len + 1;
1289 if (op_type == CHCR_DECRYPT_OP)
1290 cipher_stop = hash_size_in_response;
1293 if (op_type == CHCR_DECRYPT_OP)
1294 auth_insert = hash_size_in_response;
1298 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1300 if (wr_len > SGE_MAX_WR_LEN)
1302 wr = alloc_wrqe(wr_len, sc->txq);
1304 sc->stats_wr_nomem++;
1308 memset(crwr, 0, wr_len);
1311 * Read the existing IV from the request or generate a random
1312 * one if none is provided. Optionally copy the generated IV
1313 * into the output buffer if requested.
1315 * If the input IV is 12 bytes, append an explicit 4-byte
1318 if (op_type == CHCR_ENCRYPT_OP) {
1319 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1320 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1322 arc4rand(iv, s->blkcipher.iv_len, 0);
1323 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1324 crypto_copyback(crp->crp_flags, crp->crp_buf,
1325 crde->crd_inject, s->blkcipher.iv_len, iv);
1327 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1328 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1330 crypto_copydata(crp->crp_flags, crp->crp_buf,
1331 crde->crd_inject, s->blkcipher.iv_len, iv);
1333 if (s->blkcipher.iv_len == 12)
1334 *(uint32_t *)&iv[12] = htobe32(1);
1336 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1339 /* XXX: Hardcodes SGE loopback channel of 0. */
1340 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1341 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1342 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1343 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1344 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1345 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1347 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1350 * NB: cipherstop is explicitly set to 0. On encrypt it
1351 * should normally be set to 0 anyway (as the encrypt crd ends
1352 * at the end of the input). However, for decrypt the cipher
1353 * ends before the tag in the AUTHENC case (and authstop is
1354 * set to stop before the tag), but for GCM the cipher still
1355 * runs to the end of the buffer. Not sure if this is
1356 * intentional or a firmware quirk, but it is required for
1357 * working tag validation with GCM decryption.
1359 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1360 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1361 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1362 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1363 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1364 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1365 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1366 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1367 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1368 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1370 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1371 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1372 crwr->sec_cpl.seqno_numivs = htobe32(
1373 V_SCMD_SEQ_NO_CTRL(0) |
1374 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1375 V_SCMD_ENC_DEC_CTRL(op_type) |
1376 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1377 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1378 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1379 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1380 V_SCMD_IV_SIZE(iv_len / 2) |
1382 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1383 V_SCMD_IV_GEN_CTRL(0) |
1384 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1385 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1387 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1388 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1389 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1390 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1392 dst = (char *)(crwr + 1) + kctx_len;
1393 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1394 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1395 memcpy(dst, iv, iv_len);
1398 if (crda->crd_len != 0) {
1399 crypto_copydata(crp->crp_flags, crp->crp_buf,
1400 crda->crd_skip, crda->crd_len, dst);
1401 dst += crda->crd_len;
1403 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1404 crde->crd_len, dst);
1405 dst += crde->crd_len;
1406 if (op_type == CHCR_DECRYPT_OP)
1407 crypto_copydata(crp->crp_flags, crp->crp_buf,
1408 crda->crd_inject, hash_size_in_response, dst);
1410 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1412 /* XXX: TODO backpressure */
1413 t4_wrq_tx(sc->adapter, wr);
1419 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1420 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1424 * The updated IV to permit chained requests is at
1425 * cpl->data[2], but OCF doesn't permit chained requests.
1427 * Note that the hardware should always verify the GMAC hash.
1433 * Handle a GCM request that is not supported by the crypto engine by
1434 * performing the operation in software. Derived from swcr_authenc().
1437 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1438 struct cryptodesc *crda, struct cryptodesc *crde)
1440 struct auth_hash *axf;
1441 struct enc_xform *exf;
1444 char block[GMAC_BLOCK_LEN];
1445 char digest[GMAC_DIGEST_LEN];
1446 char iv[AES_BLOCK_LEN];
1452 /* Initialize the MAC. */
1453 switch (s->blkcipher.key_len) {
1455 axf = &auth_hash_nist_gmac_aes_128;
1458 axf = &auth_hash_nist_gmac_aes_192;
1461 axf = &auth_hash_nist_gmac_aes_256;
1467 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1468 if (auth_ctx == NULL) {
1472 axf->Init(auth_ctx);
1473 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1475 /* Initialize the cipher. */
1476 exf = &enc_xform_aes_nist_gcm;
1477 error = exf->setkey(&kschedule, s->blkcipher.enckey,
1478 s->blkcipher.key_len);
1483 * This assumes a 12-byte IV from the crp. See longer comment
1484 * above in ccr_gcm() for more details.
1486 if (crde->crd_flags & CRD_F_ENCRYPT) {
1487 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1488 memcpy(iv, crde->crd_iv, 12);
1490 arc4rand(iv, 12, 0);
1491 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1492 crypto_copyback(crp->crp_flags, crp->crp_buf,
1493 crde->crd_inject, 12, iv);
1495 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1496 memcpy(iv, crde->crd_iv, 12);
1498 crypto_copydata(crp->crp_flags, crp->crp_buf,
1499 crde->crd_inject, 12, iv);
1501 *(uint32_t *)&iv[12] = htobe32(1);
1503 axf->Reinit(auth_ctx, iv, sizeof(iv));
1506 for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1507 len = imin(crda->crd_len - i, sizeof(block));
1508 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1510 bzero(block + len, sizeof(block) - len);
1511 axf->Update(auth_ctx, block, sizeof(block));
1514 exf->reinit(kschedule, iv);
1516 /* Do encryption with MAC */
1517 for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1518 len = imin(crde->crd_len - i, sizeof(block));
1519 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1521 bzero(block + len, sizeof(block) - len);
1522 if (crde->crd_flags & CRD_F_ENCRYPT) {
1523 exf->encrypt(kschedule, block);
1524 axf->Update(auth_ctx, block, len);
1525 crypto_copyback(crp->crp_flags, crp->crp_buf,
1526 crde->crd_skip + i, len, block);
1528 axf->Update(auth_ctx, block, len);
1533 bzero(block, sizeof(block));
1534 ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1535 ((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
1536 axf->Update(auth_ctx, block, sizeof(block));
1539 axf->Final(digest, auth_ctx);
1541 /* Inject or validate tag. */
1542 if (crde->crd_flags & CRD_F_ENCRYPT) {
1543 crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1544 sizeof(digest), digest);
1547 char digest2[GMAC_DIGEST_LEN];
1549 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1550 sizeof(digest2), digest2);
1551 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1554 /* Tag matches, decrypt data. */
1555 for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1556 len = imin(crde->crd_len - i, sizeof(block));
1557 crypto_copydata(crp->crp_flags, crp->crp_buf,
1558 crde->crd_skip + i, len, block);
1559 bzero(block + len, sizeof(block) - len);
1560 exf->decrypt(kschedule, block);
1561 crypto_copyback(crp->crp_flags, crp->crp_buf,
1562 crde->crd_skip + i, len, block);
1568 exf->zerokey(&kschedule);
1570 if (auth_ctx != NULL) {
1571 memset(auth_ctx, 0, axf->ctxsize);
1572 free(auth_ctx, M_CCR);
1574 crp->crp_etype = error;
1579 generate_ccm_b0(struct cryptodesc *crda, struct cryptodesc *crde,
1580 u_int hash_size_in_response, const char *iv, char *b0)
1582 u_int i, payload_len;
1584 /* NB: L is already set in the first byte of the IV. */
1585 memcpy(b0, iv, CCM_B0_SIZE);
1587 /* Set length of hash in bits 3 - 5. */
1588 b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1590 /* Store the payload length as a big-endian value. */
1591 payload_len = crde->crd_len;
1592 for (i = 0; i < iv[0]; i++) {
1593 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1598 * If there is AAD in the request, set bit 6 in the flags
1599 * field and store the AAD length as a big-endian value at the
1600 * start of block 1. This only assumes a 16-bit AAD length
1601 * since T6 doesn't support large AAD sizes.
1603 if (crda->crd_len != 0) {
1605 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crda->crd_len);
1610 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1611 struct cryptodesc *crda, struct cryptodesc *crde)
1613 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1614 struct ulptx_idata *idata;
1615 struct chcr_wr *crwr;
1618 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1619 u_int aad_len, b0_len, hash_size_in_response, imm_len;
1620 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1621 u_int hmac_ctrl, input_len;
1622 int dsgl_nsegs, dsgl_len;
1623 int sgl_nsegs, sgl_len;
1626 if (s->blkcipher.key_len == 0)
1630 * The crypto engine doesn't handle CCM requests with an empty
1631 * payload, so handle those in software instead.
1633 if (crde->crd_len == 0)
1637 * AAD is only permitted before the cipher/plain text, not
1640 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1644 * CCM always includes block 0 in the AAD before AAD from the
1647 b0_len = CCM_B0_SIZE;
1648 if (crda->crd_len != 0)
1649 b0_len += CCM_AAD_FIELD_SIZE;
1650 aad_len = b0_len + crda->crd_len;
1653 * Always assume a 12 byte input IV for now since that is what
1654 * OCF always generates. The full IV in the work request is
1657 iv_len = AES_BLOCK_LEN;
1659 if (iv_len + aad_len > MAX_AAD_LEN)
1662 hash_size_in_response = s->ccm_mac.hash_len;
1663 if (crde->crd_flags & CRD_F_ENCRYPT)
1664 op_type = CHCR_ENCRYPT_OP;
1666 op_type = CHCR_DECRYPT_OP;
1669 * The output buffer consists of the cipher text followed by
1670 * the tag when encrypting. For decryption it only contains
1673 * Due to a firmware bug, the output buffer must include a
1674 * dummy output buffer for the IV and AAD prior to the real
1677 if (op_type == CHCR_ENCRYPT_OP) {
1678 if (iv_len + aad_len + crde->crd_len + hash_size_in_response >
1682 if (iv_len + aad_len + crde->crd_len > MAX_REQUEST_SIZE)
1685 sglist_reset(sc->sg_dsgl);
1686 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1690 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1694 if (op_type == CHCR_ENCRYPT_OP) {
1695 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1696 crda->crd_inject, hash_size_in_response);
1700 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1701 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1703 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1706 * The 'key' part of the key context consists of two copies of
1709 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2;
1710 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1713 * The input buffer consists of the IV, AAD (including block
1714 * 0), and then the cipher/plain text. For decryption
1715 * requests the hash is appended after the cipher text.
1717 * The IV is always stored at the start of the input buffer
1718 * even though it may be duplicated in the payload. The
1719 * crypto engine doesn't work properly if the IV offset points
1720 * inside of the AAD region, so a second copy is always
1723 input_len = aad_len + crde->crd_len;
1724 if (op_type == CHCR_DECRYPT_OP)
1725 input_len += hash_size_in_response;
1726 if (input_len > MAX_REQUEST_SIZE)
1728 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1729 imm_len = input_len;
1733 /* Block 0 is passed as immediate data. */
1736 sglist_reset(sc->sg_ulptx);
1737 if (crda->crd_len != 0) {
1738 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1739 crda->crd_skip, crda->crd_len);
1743 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1744 crde->crd_skip, crde->crd_len);
1747 if (op_type == CHCR_DECRYPT_OP) {
1748 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1749 crda->crd_inject, hash_size_in_response);
1753 sgl_nsegs = sc->sg_ulptx->sg_nseg;
1754 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1757 aad_start = iv_len + 1;
1758 aad_stop = aad_start + aad_len - 1;
1759 cipher_start = aad_stop + 1;
1760 if (op_type == CHCR_DECRYPT_OP)
1761 cipher_stop = hash_size_in_response;
1764 if (op_type == CHCR_DECRYPT_OP)
1765 auth_insert = hash_size_in_response;
1769 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1771 if (wr_len > SGE_MAX_WR_LEN)
1773 wr = alloc_wrqe(wr_len, sc->txq);
1775 sc->stats_wr_nomem++;
1779 memset(crwr, 0, wr_len);
1782 * Read the nonce from the request or generate a random one if
1783 * none is provided. Use the nonce to generate the full IV
1784 * with the counter set to 0.
1786 memset(iv, 0, iv_len);
1787 iv[0] = (15 - AES_CCM_IV_LEN) - 1;
1788 if (op_type == CHCR_ENCRYPT_OP) {
1789 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1790 memcpy(iv + 1, crde->crd_iv, AES_CCM_IV_LEN);
1792 arc4rand(iv + 1, AES_CCM_IV_LEN, 0);
1793 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1794 crypto_copyback(crp->crp_flags, crp->crp_buf,
1795 crde->crd_inject, AES_CCM_IV_LEN, iv + 1);
1797 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1798 memcpy(iv + 1, crde->crd_iv, AES_CCM_IV_LEN);
1800 crypto_copydata(crp->crp_flags, crp->crp_buf,
1801 crde->crd_inject, AES_CCM_IV_LEN, iv + 1);
1804 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1807 /* XXX: Hardcodes SGE loopback channel of 0. */
1808 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1809 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1810 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1811 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1812 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1813 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1815 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1818 * NB: cipherstop is explicitly set to 0. See comments above
1821 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1822 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1823 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1824 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1825 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1826 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1827 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1828 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1829 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1830 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1832 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1833 hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response);
1834 crwr->sec_cpl.seqno_numivs = htobe32(
1835 V_SCMD_SEQ_NO_CTRL(0) |
1836 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1837 V_SCMD_ENC_DEC_CTRL(op_type) |
1838 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1839 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) |
1840 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) |
1841 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1842 V_SCMD_IV_SIZE(iv_len / 2) |
1844 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1845 V_SCMD_IV_GEN_CTRL(0) |
1846 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1847 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1849 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1850 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1851 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16),
1852 s->blkcipher.enckey, s->blkcipher.key_len);
1854 dst = (char *)(crwr + 1) + kctx_len;
1855 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1856 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1857 memcpy(dst, iv, iv_len);
1859 generate_ccm_b0(crda, crde, hash_size_in_response, iv, dst);
1860 if (sgl_nsegs == 0) {
1862 if (crda->crd_len != 0) {
1863 crypto_copydata(crp->crp_flags, crp->crp_buf,
1864 crda->crd_skip, crda->crd_len, dst);
1865 dst += crda->crd_len;
1867 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1868 crde->crd_len, dst);
1869 dst += crde->crd_len;
1870 if (op_type == CHCR_DECRYPT_OP)
1871 crypto_copydata(crp->crp_flags, crp->crp_buf,
1872 crda->crd_inject, hash_size_in_response, dst);
1875 if (b0_len > CCM_B0_SIZE) {
1877 * If there is AAD, insert padding including a
1878 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1879 * is 16-byte aligned.
1881 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1882 ("b0_len mismatch"));
1883 memset(dst + CCM_AAD_FIELD_SIZE, 0,
1884 8 - CCM_AAD_FIELD_SIZE);
1885 idata = (void *)(dst + 8);
1886 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1887 idata->len = htobe32(0);
1888 dst = (void *)(idata + 1);
1890 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1893 /* XXX: TODO backpressure */
1894 t4_wrq_tx(sc->adapter, wr);
1900 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1901 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1905 * The updated IV to permit chained requests is at
1906 * cpl->data[2], but OCF doesn't permit chained requests.
1908 * Note that the hardware should always verify the CBC MAC
1915 * Handle a CCM request that is not supported by the crypto engine by
1916 * performing the operation in software. Derived from swcr_authenc().
1919 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp,
1920 struct cryptodesc *crda, struct cryptodesc *crde)
1922 struct auth_hash *axf;
1923 struct enc_xform *exf;
1924 union authctx *auth_ctx;
1926 char block[CCM_CBC_BLOCK_LEN];
1927 char digest[AES_CBC_MAC_HASH_LEN];
1928 char iv[AES_CCM_IV_LEN];
1934 /* Initialize the MAC. */
1935 switch (s->blkcipher.key_len) {
1937 axf = &auth_hash_ccm_cbc_mac_128;
1940 axf = &auth_hash_ccm_cbc_mac_192;
1943 axf = &auth_hash_ccm_cbc_mac_256;
1949 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1950 if (auth_ctx == NULL) {
1954 axf->Init(auth_ctx);
1955 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1957 /* Initialize the cipher. */
1958 exf = &enc_xform_ccm;
1959 error = exf->setkey(&kschedule, s->blkcipher.enckey,
1960 s->blkcipher.key_len);
1964 if (crde->crd_flags & CRD_F_ENCRYPT) {
1965 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1966 memcpy(iv, crde->crd_iv, AES_CCM_IV_LEN);
1968 arc4rand(iv, AES_CCM_IV_LEN, 0);
1969 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1970 crypto_copyback(crp->crp_flags, crp->crp_buf,
1971 crde->crd_inject, AES_CCM_IV_LEN, iv);
1973 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1974 memcpy(iv, crde->crd_iv, AES_CCM_IV_LEN);
1976 crypto_copydata(crp->crp_flags, crp->crp_buf,
1977 crde->crd_inject, AES_CCM_IV_LEN, iv);
1980 auth_ctx->aes_cbc_mac_ctx.authDataLength = crda->crd_len;
1981 auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crde->crd_len;
1982 axf->Reinit(auth_ctx, iv, sizeof(iv));
1985 for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1986 len = imin(crda->crd_len - i, sizeof(block));
1987 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1989 bzero(block + len, sizeof(block) - len);
1990 axf->Update(auth_ctx, block, sizeof(block));
1993 exf->reinit(kschedule, iv);
1995 /* Do encryption/decryption with MAC */
1996 for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1997 len = imin(crde->crd_len - i, sizeof(block));
1998 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
2000 bzero(block + len, sizeof(block) - len);
2001 if (crde->crd_flags & CRD_F_ENCRYPT) {
2002 axf->Update(auth_ctx, block, len);
2003 exf->encrypt(kschedule, block);
2004 crypto_copyback(crp->crp_flags, crp->crp_buf,
2005 crde->crd_skip + i, len, block);
2007 exf->decrypt(kschedule, block);
2008 axf->Update(auth_ctx, block, len);
2013 axf->Final(digest, auth_ctx);
2015 /* Inject or validate tag. */
2016 if (crde->crd_flags & CRD_F_ENCRYPT) {
2017 crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
2018 sizeof(digest), digest);
2021 char digest2[GMAC_DIGEST_LEN];
2023 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
2024 sizeof(digest2), digest2);
2025 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
2028 /* Tag matches, decrypt data. */
2029 exf->reinit(kschedule, iv);
2030 for (i = 0; i < crde->crd_len; i += sizeof(block)) {
2031 len = imin(crde->crd_len - i, sizeof(block));
2032 crypto_copydata(crp->crp_flags, crp->crp_buf,
2033 crde->crd_skip + i, len, block);
2034 bzero(block + len, sizeof(block) - len);
2035 exf->decrypt(kschedule, block);
2036 crypto_copyback(crp->crp_flags, crp->crp_buf,
2037 crde->crd_skip + i, len, block);
2043 exf->zerokey(&kschedule);
2045 if (auth_ctx != NULL) {
2046 memset(auth_ctx, 0, axf->ctxsize);
2047 free(auth_ctx, M_CCR);
2049 crp->crp_etype = error;
2054 ccr_identify(driver_t *driver, device_t parent)
2058 sc = device_get_softc(parent);
2059 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
2060 device_find_child(parent, "ccr", -1) == NULL)
2061 device_add_child(parent, "ccr", -1);
2065 ccr_probe(device_t dev)
2068 device_set_desc(dev, "Chelsio Crypto Accelerator");
2069 return (BUS_PROBE_DEFAULT);
2073 ccr_sysctls(struct ccr_softc *sc)
2075 struct sysctl_ctx_list *ctx;
2076 struct sysctl_oid *oid;
2077 struct sysctl_oid_list *children;
2079 ctx = device_get_sysctl_ctx(sc->dev);
2084 oid = device_get_sysctl_tree(sc->dev);
2085 children = SYSCTL_CHILDREN(oid);
2090 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2091 NULL, "statistics");
2092 children = SYSCTL_CHILDREN(oid);
2094 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
2095 &sc->stats_hash, 0, "Hash requests submitted");
2096 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
2097 &sc->stats_hmac, 0, "HMAC requests submitted");
2098 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
2099 &sc->stats_blkcipher_encrypt, 0,
2100 "Cipher encryption requests submitted");
2101 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
2102 &sc->stats_blkcipher_decrypt, 0,
2103 "Cipher decryption requests submitted");
2104 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
2105 &sc->stats_authenc_encrypt, 0,
2106 "Combined AES+HMAC encryption requests submitted");
2107 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
2108 &sc->stats_authenc_decrypt, 0,
2109 "Combined AES+HMAC decryption requests submitted");
2110 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
2111 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
2112 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
2113 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
2114 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD,
2115 &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted");
2116 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD,
2117 &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted");
2118 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
2119 &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
2120 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
2121 &sc->stats_inflight, 0, "Requests currently pending");
2122 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
2123 &sc->stats_mac_error, 0, "MAC errors");
2124 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
2125 &sc->stats_pad_error, 0, "Padding errors");
2126 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
2127 &sc->stats_bad_session, 0, "Requests with invalid session ID");
2128 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
2129 &sc->stats_sglist_error, 0,
2130 "Requests for which DMA mapping failed");
2131 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
2132 &sc->stats_process_error, 0, "Requests failed during queueing");
2133 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
2134 &sc->stats_sw_fallback, 0,
2135 "Requests processed by falling back to software");
2139 ccr_attach(device_t dev)
2141 struct ccr_softc *sc;
2144 sc = device_get_softc(dev);
2146 sc->adapter = device_get_softc(device_get_parent(dev));
2147 sc->txq = &sc->adapter->sge.ctrlq[0];
2148 sc->rxq = &sc->adapter->sge.rxq[0];
2149 cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
2150 CRYPTOCAP_F_HARDWARE);
2152 device_printf(dev, "could not get crypto driver id\n");
2156 sc->adapter->ccr_softc = sc;
2159 sc->tx_channel_id = 0;
2161 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
2162 sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2163 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2164 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
2165 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
2166 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
2169 crypto_register(cid, CRYPTO_SHA1, 0, 0);
2170 crypto_register(cid, CRYPTO_SHA2_224, 0, 0);
2171 crypto_register(cid, CRYPTO_SHA2_256, 0, 0);
2172 crypto_register(cid, CRYPTO_SHA2_384, 0, 0);
2173 crypto_register(cid, CRYPTO_SHA2_512, 0, 0);
2174 crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
2175 crypto_register(cid, CRYPTO_SHA2_224_HMAC, 0, 0);
2176 crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
2177 crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
2178 crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
2179 crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
2180 crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
2181 crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
2182 crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
2183 crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
2184 crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
2185 crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
2186 crypto_register(cid, CRYPTO_AES_CCM_16, 0, 0);
2187 crypto_register(cid, CRYPTO_AES_CCM_CBC_MAC, 0, 0);
2192 ccr_detach(device_t dev)
2194 struct ccr_softc *sc;
2196 sc = device_get_softc(dev);
2198 mtx_lock(&sc->lock);
2199 sc->detaching = true;
2200 mtx_unlock(&sc->lock);
2202 crypto_unregister_all(sc->cid);
2204 mtx_destroy(&sc->lock);
2205 sglist_free(sc->sg_iv_aad);
2206 free(sc->iv_aad_buf, M_CCR);
2207 sglist_free(sc->sg_dsgl);
2208 sglist_free(sc->sg_ulptx);
2209 sglist_free(sc->sg_crp);
2210 sc->adapter->ccr_softc = NULL;
2215 ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
2221 u32 = (uint32_t *)dst;
2222 u64 = (uint64_t *)dst;
2225 case CRYPTO_SHA1_HMAC:
2226 for (i = 0; i < SHA1_HASH_LEN / 4; i++)
2227 u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
2229 case CRYPTO_SHA2_224:
2230 case CRYPTO_SHA2_224_HMAC:
2231 for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
2232 u32[i] = htobe32(auth_ctx->sha224ctx.state[i]);
2234 case CRYPTO_SHA2_256:
2235 case CRYPTO_SHA2_256_HMAC:
2236 for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
2237 u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
2239 case CRYPTO_SHA2_384:
2240 case CRYPTO_SHA2_384_HMAC:
2241 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
2242 u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
2244 case CRYPTO_SHA2_512:
2245 case CRYPTO_SHA2_512_HMAC:
2246 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
2247 u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
2253 ccr_init_hash_digest(struct ccr_session *s, int cri_alg)
2255 union authctx auth_ctx;
2256 struct auth_hash *axf;
2258 axf = s->hmac.auth_hash;
2259 axf->Init(&auth_ctx);
2260 ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
2264 ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
2267 union authctx auth_ctx;
2268 struct auth_hash *axf;
2272 * If the key is larger than the block size, use the digest of
2273 * the key as the key instead.
2275 axf = s->hmac.auth_hash;
2277 if (klen > axf->blocksize) {
2278 axf->Init(&auth_ctx);
2279 axf->Update(&auth_ctx, key, klen);
2280 axf->Final(s->hmac.ipad, &auth_ctx);
2281 klen = axf->hashsize;
2283 memcpy(s->hmac.ipad, key, klen);
2285 memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
2286 memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
2288 for (i = 0; i < axf->blocksize; i++) {
2289 s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
2290 s->hmac.opad[i] ^= HMAC_OPAD_VAL;
2294 * Hash the raw ipad and opad and store the partial result in
2297 axf->Init(&auth_ctx);
2298 axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
2299 ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
2301 axf->Init(&auth_ctx);
2302 axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
2303 ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
2307 * Borrowed from AES_GMAC_Setkey().
2310 ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
2312 static char zeroes[GMAC_BLOCK_LEN];
2313 uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
2316 rounds = rijndaelKeySetupEnc(keysched, key, klen);
2317 rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
2321 ccr_aes_check_keylen(int alg, int klen)
2327 if (alg == CRYPTO_AES_XTS)
2333 if (alg != CRYPTO_AES_XTS)
2343 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
2345 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2346 unsigned int opad_present;
2348 if (alg == CRYPTO_AES_XTS)
2354 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2357 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2360 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2363 panic("should not get here");
2366 s->blkcipher.key_len = klen / 8;
2367 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
2369 case CRYPTO_AES_CBC:
2370 case CRYPTO_AES_XTS:
2371 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
2375 kctx_len = roundup2(s->blkcipher.key_len, 16);
2378 mk_size = s->hmac.mk_size;
2380 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2381 kctx_len += iopad_size * 2;
2384 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2386 kctx_len += GMAC_BLOCK_LEN;
2391 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2394 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2397 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2400 panic("should not get here");
2406 mk_size = CHCR_KEYCTX_NO_KEY;
2410 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2411 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2412 V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
2413 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2414 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
2415 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
2419 ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
2421 struct ccr_softc *sc;
2422 struct ccr_session *s;
2423 struct auth_hash *auth_hash;
2424 struct cryptoini *c, *hash, *cipher;
2425 unsigned int auth_mode, cipher_mode, iv_len, mk_size;
2426 unsigned int partial_digest_len;
2428 bool gcm_hash, hmac;
2438 auth_mode = SCMD_AUTH_MODE_NOP;
2439 cipher_mode = SCMD_CIPH_MODE_NOP;
2442 partial_digest_len = 0;
2443 for (c = cri; c != NULL; c = c->cri_next) {
2444 switch (c->cri_alg) {
2446 case CRYPTO_SHA2_224:
2447 case CRYPTO_SHA2_256:
2448 case CRYPTO_SHA2_384:
2449 case CRYPTO_SHA2_512:
2450 case CRYPTO_SHA1_HMAC:
2451 case CRYPTO_SHA2_224_HMAC:
2452 case CRYPTO_SHA2_256_HMAC:
2453 case CRYPTO_SHA2_384_HMAC:
2454 case CRYPTO_SHA2_512_HMAC:
2455 case CRYPTO_AES_128_NIST_GMAC:
2456 case CRYPTO_AES_192_NIST_GMAC:
2457 case CRYPTO_AES_256_NIST_GMAC:
2458 case CRYPTO_AES_CCM_CBC_MAC:
2462 switch (c->cri_alg) {
2464 case CRYPTO_SHA1_HMAC:
2465 auth_hash = &auth_hash_hmac_sha1;
2466 auth_mode = SCMD_AUTH_MODE_SHA1;
2467 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
2468 partial_digest_len = SHA1_HASH_LEN;
2470 case CRYPTO_SHA2_224:
2471 case CRYPTO_SHA2_224_HMAC:
2472 auth_hash = &auth_hash_hmac_sha2_224;
2473 auth_mode = SCMD_AUTH_MODE_SHA224;
2474 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2475 partial_digest_len = SHA2_256_HASH_LEN;
2477 case CRYPTO_SHA2_256:
2478 case CRYPTO_SHA2_256_HMAC:
2479 auth_hash = &auth_hash_hmac_sha2_256;
2480 auth_mode = SCMD_AUTH_MODE_SHA256;
2481 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2482 partial_digest_len = SHA2_256_HASH_LEN;
2484 case CRYPTO_SHA2_384:
2485 case CRYPTO_SHA2_384_HMAC:
2486 auth_hash = &auth_hash_hmac_sha2_384;
2487 auth_mode = SCMD_AUTH_MODE_SHA512_384;
2488 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2489 partial_digest_len = SHA2_512_HASH_LEN;
2491 case CRYPTO_SHA2_512:
2492 case CRYPTO_SHA2_512_HMAC:
2493 auth_hash = &auth_hash_hmac_sha2_512;
2494 auth_mode = SCMD_AUTH_MODE_SHA512_512;
2495 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2496 partial_digest_len = SHA2_512_HASH_LEN;
2498 case CRYPTO_AES_128_NIST_GMAC:
2499 case CRYPTO_AES_192_NIST_GMAC:
2500 case CRYPTO_AES_256_NIST_GMAC:
2502 auth_mode = SCMD_AUTH_MODE_GHASH;
2503 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2505 case CRYPTO_AES_CCM_CBC_MAC:
2506 auth_mode = SCMD_AUTH_MODE_CBCMAC;
2509 switch (c->cri_alg) {
2510 case CRYPTO_SHA1_HMAC:
2511 case CRYPTO_SHA2_224_HMAC:
2512 case CRYPTO_SHA2_256_HMAC:
2513 case CRYPTO_SHA2_384_HMAC:
2514 case CRYPTO_SHA2_512_HMAC:
2519 case CRYPTO_AES_CBC:
2520 case CRYPTO_AES_ICM:
2521 case CRYPTO_AES_NIST_GCM_16:
2522 case CRYPTO_AES_XTS:
2523 case CRYPTO_AES_CCM_16:
2527 switch (c->cri_alg) {
2528 case CRYPTO_AES_CBC:
2529 cipher_mode = SCMD_CIPH_MODE_AES_CBC;
2530 iv_len = AES_BLOCK_LEN;
2532 case CRYPTO_AES_ICM:
2533 cipher_mode = SCMD_CIPH_MODE_AES_CTR;
2534 iv_len = AES_BLOCK_LEN;
2536 case CRYPTO_AES_NIST_GCM_16:
2537 cipher_mode = SCMD_CIPH_MODE_AES_GCM;
2538 iv_len = AES_GCM_IV_LEN;
2540 case CRYPTO_AES_XTS:
2541 cipher_mode = SCMD_CIPH_MODE_AES_XTS;
2542 iv_len = AES_BLOCK_LEN;
2544 case CRYPTO_AES_CCM_16:
2545 cipher_mode = SCMD_CIPH_MODE_AES_CCM;
2546 iv_len = AES_CCM_IV_LEN;
2549 if (c->cri_key != NULL) {
2550 error = ccr_aes_check_keylen(c->cri_alg,
2560 if (gcm_hash != (cipher_mode == SCMD_CIPH_MODE_AES_GCM))
2562 if ((auth_mode == SCMD_AUTH_MODE_CBCMAC) !=
2563 (cipher_mode == SCMD_CIPH_MODE_AES_CCM))
2565 if (hash == NULL && cipher == NULL)
2568 if (hmac || gcm_hash || auth_mode == SCMD_AUTH_MODE_CBCMAC) {
2569 if (hash->cri_key == NULL)
2572 if (hash->cri_key != NULL)
2577 sc = device_get_softc(dev);
2580 * XXX: Don't create a session if the queues aren't
2581 * initialized. This is racy as the rxq can be destroyed by
2582 * the associated VI detaching. Eventually ccr should use
2585 if (sc->rxq->iq.adapter == NULL || sc->txq->adapter == NULL)
2588 mtx_lock(&sc->lock);
2589 if (sc->detaching) {
2590 mtx_unlock(&sc->lock);
2594 s = crypto_get_driver_session(cses);
2598 else if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2600 else if (hash != NULL && cipher != NULL)
2602 else if (hash != NULL) {
2608 MPASS(cipher != NULL);
2609 s->mode = BLKCIPHER;
2612 if (hash->cri_mlen == 0)
2613 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2615 s->gmac.hash_len = hash->cri_mlen;
2616 ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
2617 } else if (auth_mode == SCMD_AUTH_MODE_CBCMAC) {
2618 if (hash->cri_mlen == 0)
2619 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2621 s->ccm_mac.hash_len = hash->cri_mlen;
2622 } else if (hash != NULL) {
2623 s->hmac.auth_hash = auth_hash;
2624 s->hmac.auth_mode = auth_mode;
2625 s->hmac.mk_size = mk_size;
2626 s->hmac.partial_digest_len = partial_digest_len;
2627 if (hash->cri_mlen == 0)
2628 s->hmac.hash_len = auth_hash->hashsize;
2630 s->hmac.hash_len = hash->cri_mlen;
2632 ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
2635 ccr_init_hash_digest(s, hash->cri_alg);
2637 if (cipher != NULL) {
2638 s->blkcipher.cipher_mode = cipher_mode;
2639 s->blkcipher.iv_len = iv_len;
2640 if (cipher->cri_key != NULL)
2641 ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
2646 mtx_unlock(&sc->lock);
2651 ccr_freesession(device_t dev, crypto_session_t cses)
2653 struct ccr_softc *sc;
2654 struct ccr_session *s;
2656 sc = device_get_softc(dev);
2657 s = crypto_get_driver_session(cses);
2658 mtx_lock(&sc->lock);
2659 if (s->pending != 0)
2661 "session %p freed with %d pending requests\n", s,
2664 mtx_unlock(&sc->lock);
2668 ccr_process(device_t dev, struct cryptop *crp, int hint)
2670 struct ccr_softc *sc;
2671 struct ccr_session *s;
2672 struct cryptodesc *crd, *crda, *crde;
2678 crd = crp->crp_desc;
2679 s = crypto_get_driver_session(crp->crp_session);
2680 sc = device_get_softc(dev);
2682 mtx_lock(&sc->lock);
2683 error = ccr_populate_sglist(sc->sg_crp, crp);
2685 sc->stats_sglist_error++;
2691 error = ccr_hash(sc, s, crp);
2696 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2697 ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
2699 error = ccr_hash(sc, s, crp);
2704 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2705 error = ccr_aes_check_keylen(crd->crd_alg,
2709 ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2712 error = ccr_blkcipher(sc, s, crp);
2714 if (crd->crd_flags & CRD_F_ENCRYPT)
2715 sc->stats_blkcipher_encrypt++;
2717 sc->stats_blkcipher_decrypt++;
2722 switch (crd->crd_alg) {
2723 case CRYPTO_AES_CBC:
2724 case CRYPTO_AES_ICM:
2725 case CRYPTO_AES_XTS:
2726 /* Only encrypt-then-authenticate supported. */
2728 crda = crd->crd_next;
2729 if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2736 crde = crd->crd_next;
2737 if (crde->crd_flags & CRD_F_ENCRYPT) {
2745 if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2746 ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
2748 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2749 error = ccr_aes_check_keylen(crde->crd_alg,
2753 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2756 error = ccr_authenc(sc, s, crp, crda, crde);
2758 if (crde->crd_flags & CRD_F_ENCRYPT)
2759 sc->stats_authenc_encrypt++;
2761 sc->stats_authenc_decrypt++;
2766 if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2768 crda = crd->crd_next;
2771 crde = crd->crd_next;
2773 if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2774 ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
2775 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2776 error = ccr_aes_check_keylen(crde->crd_alg,
2780 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2783 if (crde->crd_len == 0) {
2784 mtx_unlock(&sc->lock);
2785 ccr_gcm_soft(s, crp, crda, crde);
2788 error = ccr_gcm(sc, s, crp, crda, crde);
2789 if (error == EMSGSIZE) {
2790 sc->stats_sw_fallback++;
2791 mtx_unlock(&sc->lock);
2792 ccr_gcm_soft(s, crp, crda, crde);
2796 if (crde->crd_flags & CRD_F_ENCRYPT)
2797 sc->stats_gcm_encrypt++;
2799 sc->stats_gcm_decrypt++;
2804 if (crd->crd_alg == CRYPTO_AES_CCM_16) {
2806 crda = crd->crd_next;
2809 crde = crd->crd_next;
2811 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2812 error = ccr_aes_check_keylen(crde->crd_alg,
2816 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2819 error = ccr_ccm(sc, s, crp, crda, crde);
2820 if (error == EMSGSIZE) {
2821 sc->stats_sw_fallback++;
2822 mtx_unlock(&sc->lock);
2823 ccr_ccm_soft(s, crp, crda, crde);
2827 if (crde->crd_flags & CRD_F_ENCRYPT)
2828 sc->stats_ccm_encrypt++;
2830 sc->stats_ccm_decrypt++;
2837 sc->stats_inflight++;
2839 sc->stats_process_error++;
2842 mtx_unlock(&sc->lock);
2845 crp->crp_etype = error;
2853 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2856 struct ccr_softc *sc = iq->adapter->ccr_softc;
2857 struct ccr_session *s;
2858 const struct cpl_fw6_pld *cpl;
2859 struct cryptop *crp;
2864 cpl = mtod(m, const void *);
2866 cpl = (const void *)(rss + 1);
2868 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2869 s = crypto_get_driver_session(crp->crp_session);
2870 status = be64toh(cpl->data[0]);
2871 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2876 mtx_lock(&sc->lock);
2878 sc->stats_inflight--;
2883 error = ccr_hash_done(sc, s, crp, cpl, error);
2886 error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2889 error = ccr_authenc_done(sc, s, crp, cpl, error);
2892 error = ccr_gcm_done(sc, s, crp, cpl, error);
2895 error = ccr_ccm_done(sc, s, crp, cpl, error);
2899 if (error == EBADMSG) {
2900 if (CHK_MAC_ERR_BIT(status))
2901 sc->stats_mac_error++;
2902 if (CHK_PAD_ERR_BIT(status))
2903 sc->stats_pad_error++;
2905 mtx_unlock(&sc->lock);
2906 crp->crp_etype = error;
2913 ccr_modevent(module_t mod, int cmd, void *arg)
2918 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2921 t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2924 return (EOPNOTSUPP);
2928 static device_method_t ccr_methods[] = {
2929 DEVMETHOD(device_identify, ccr_identify),
2930 DEVMETHOD(device_probe, ccr_probe),
2931 DEVMETHOD(device_attach, ccr_attach),
2932 DEVMETHOD(device_detach, ccr_detach),
2934 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2935 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2936 DEVMETHOD(cryptodev_process, ccr_process),
2941 static driver_t ccr_driver = {
2944 sizeof(struct ccr_softc)
2947 static devclass_t ccr_devclass;
2949 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2950 MODULE_VERSION(ccr, 1);
2951 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2952 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);