2 * Copyright (c) 2017 Chelsio Communications, Inc.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
42 #include "cryptodev_if.h"
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
48 * Requests consist of:
50 * +-------------------------------+
51 * | struct fw_crypto_lookaside_wr |
52 * +-------------------------------+
53 * | struct ulp_txpkt |
54 * +-------------------------------+
55 * | struct ulptx_idata |
56 * +-------------------------------+
57 * | struct cpl_tx_sec_pdu |
58 * +-------------------------------+
59 * | struct cpl_tls_tx_scmd_fmt |
60 * +-------------------------------+
61 * | key context header |
62 * +-------------------------------+
63 * | AES key | ----- For requests with AES
64 * +-------------------------------+ -
65 * | IPAD (16-byte aligned) | \
66 * +-------------------------------+ +---- For requests with HMAC
67 * | OPAD (16-byte aligned) | /
68 * +-------------------------------+ -
69 * | GMAC H | ----- For AES-GCM
70 * +-------------------------------+ -
71 * | struct cpl_rx_phys_dsgl | \
72 * +-------------------------------+ +---- Destination buffer for
73 * | PHYS_DSGL entries | / non-hash-only requests
74 * +-------------------------------+ -
75 * | 16 dummy bytes | ----- Only for hash-only requests
76 * +-------------------------------+
77 * | IV | ----- If immediate IV
78 * +-------------------------------+
79 * | Payload | ----- If immediate Payload
80 * +-------------------------------+ -
81 * | struct ulptx_sgl | \
82 * +-------------------------------+ +---- If payload via SGL
84 * +-------------------------------+ -
86 * Note that the key context must be padded to ensure 16-byte alignment.
87 * For HMAC requests, the key consists of the partial hash of the IPAD
88 * followed by the partial hash of the OPAD.
92 * +-------------------------------+
93 * | struct cpl_fw6_pld |
94 * +-------------------------------+
95 * | hash digest | ----- For HMAC request with
96 * +-------------------------------+ 'hash_size' set in work request
98 * A 32-bit big-endian error status word is supplied in the last 4
99 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
100 * "MAC" error and bit 1 indicates a "PAD" error.
102 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
103 * in the request is returned in data[1] of the CPL_FW6_PLD message.
105 * For block cipher replies, the updated IV is supplied in data[2] and
106 * data[3] of the CPL_FW6_PLD message.
108 * For hash replies where the work request set 'hash_size' to request
109 * a copy of the hash in the reply, the hash digest is supplied
110 * immediately following the CPL_FW6_PLD message.
114 * The crypto engine supports a maximum AAD size of 511 bytes.
116 #define MAX_AAD_LEN 511
119 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
120 * entries. While the CPL includes a 16-bit length field, the T6 can
121 * sometimes hang if an error occurs while processing a request with a
122 * single DSGL entry larger than 2k.
124 #define MAX_RX_PHYS_DSGL_SGE 32
125 #define DSGL_SGE_MAXLEN 2048
128 * The adapter only supports requests with a total input or output
129 * length of 64k-1 or smaller. Longer requests either result in hung
130 * requests or incorrect results.
132 #define MAX_REQUEST_SIZE 65535
134 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
136 struct ccr_session_hmac {
137 struct auth_hash *auth_hash;
139 unsigned int partial_digest_len;
140 unsigned int auth_mode;
141 unsigned int mk_size;
142 char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
143 char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
146 struct ccr_session_gmac {
148 char ghash_h[GMAC_BLOCK_LEN];
151 struct ccr_session_blkcipher {
152 unsigned int cipher_mode;
153 unsigned int key_len;
156 char enckey[CHCR_AES_MAX_KEY_LEN];
157 char deckey[CHCR_AES_MAX_KEY_LEN];
163 enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
165 struct ccr_session_hmac hmac;
166 struct ccr_session_gmac gmac;
168 struct ccr_session_blkcipher blkcipher;
172 struct adapter *adapter;
182 * Pre-allocate S/G lists used when preparing a work request.
183 * 'sg_crp' contains an sglist describing the entire buffer
184 * for a 'struct cryptop'. 'sg_ulptx' is used to describe
185 * the data the engine should DMA as input via ULPTX_SGL.
186 * 'sg_dsgl' is used to describe the destination that cipher
187 * text and a tag should be written to.
189 struct sglist *sg_crp;
190 struct sglist *sg_ulptx;
191 struct sglist *sg_dsgl;
194 * Pre-allocate a dummy output buffer for the IV and AAD for
198 struct sglist *sg_iv_aad;
201 uint64_t stats_blkcipher_encrypt;
202 uint64_t stats_blkcipher_decrypt;
204 uint64_t stats_authenc_encrypt;
205 uint64_t stats_authenc_decrypt;
206 uint64_t stats_gcm_encrypt;
207 uint64_t stats_gcm_decrypt;
208 uint64_t stats_wr_nomem;
209 uint64_t stats_inflight;
210 uint64_t stats_mac_error;
211 uint64_t stats_pad_error;
212 uint64_t stats_bad_session;
213 uint64_t stats_sglist_error;
214 uint64_t stats_process_error;
215 uint64_t stats_sw_fallback;
219 * Crypto requests involve two kind of scatter/gather lists.
221 * Non-hash-only requests require a PHYS_DSGL that describes the
222 * location to store the results of the encryption or decryption
223 * operation. This SGL uses a different format (PHYS_DSGL) and should
224 * exclude the crd_skip bytes at the start of the data as well as
225 * any AAD or IV. For authenticated encryption requests it should
226 * cover include the destination of the hash or tag.
228 * The input payload may either be supplied inline as immediate data,
229 * or via a standard ULP_TX SGL. This SGL should include AAD,
230 * ciphertext, and the hash or tag for authenticated decryption
233 * These scatter/gather lists can describe different subsets of the
234 * buffer described by the crypto operation. ccr_populate_sglist()
235 * generates a scatter/gather list that covers the entire crypto
236 * operation buffer that is then used to construct the other
237 * scatter/gather lists.
240 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
245 if (crp->crp_flags & CRYPTO_F_IMBUF)
246 error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
247 else if (crp->crp_flags & CRYPTO_F_IOV)
248 error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
250 error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
255 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
259 ccr_count_sgl(struct sglist *sg, int maxsegsize)
264 for (i = 0; i < sg->sg_nseg; i++)
265 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
269 /* These functions deal with PHYS_DSGL for the reply buffer. */
271 ccr_phys_dsgl_len(int nsegs)
275 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
276 if ((nsegs % 8) != 0) {
277 len += sizeof(uint16_t) * 8;
278 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
284 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
287 struct cpl_rx_phys_dsgl *cpl;
288 struct phys_sge_pairs *sgl;
295 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
296 V_CPL_RX_PHYS_DSGL_ISRDMA(0));
297 cpl->pcirlxorder_to_noofsgentr = htobe32(
298 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
299 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
300 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
301 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
302 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
303 cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
304 cpl->rss_hdr_int.hash_val = 0;
305 sgl = (struct phys_sge_pairs *)(cpl + 1);
307 for (i = 0; i < sg->sg_nseg; i++) {
308 seglen = sg->sg_segs[i].ss_len;
309 paddr = sg->sg_segs[i].ss_paddr;
311 sgl->addr[j] = htobe64(paddr);
312 if (seglen > DSGL_SGE_MAXLEN) {
313 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
314 paddr += DSGL_SGE_MAXLEN;
315 seglen -= DSGL_SGE_MAXLEN;
317 sgl->len[j] = htobe16(seglen);
325 } while (seglen != 0);
327 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
330 /* These functions deal with the ULPTX_SGL for input payload. */
332 ccr_ulptx_sgl_len(int nsegs)
336 nsegs--; /* first segment is part of ulptx_sgl */
337 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
338 return (roundup2(n, 16));
342 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
344 struct ulptx_sgl *usgl;
346 struct sglist_seg *ss;
350 MPASS(nsegs == sg->sg_nseg);
351 ss = &sg->sg_segs[0];
353 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
354 V_ULPTX_NSGE(nsegs));
355 usgl->len0 = htobe32(ss->ss_len);
356 usgl->addr0 = htobe64(ss->ss_paddr);
358 for (i = 0; i < sg->sg_nseg - 1; i++) {
359 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
360 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
367 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
370 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
372 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
379 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
380 u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size,
385 cctx_size = sizeof(struct _key_ctx) + kctx_len;
386 crwr->wreq.op_to_cctx_size = htobe32(
387 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
388 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
389 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
390 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
391 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
392 crwr->wreq.len16_pkd = htobe32(
393 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
394 crwr->wreq.session_id = 0;
395 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
396 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
397 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
398 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
399 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
400 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
401 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
402 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
403 crwr->wreq.key_addr = 0;
404 crwr->wreq.pld_size_hash_size = htobe32(
405 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
406 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
407 crwr->wreq.cookie = htobe64((uintptr_t)crp);
409 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
410 V_ULP_TXPKT_DATAMODIFY(0) |
411 V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
412 V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1));
413 crwr->ulptx.len = htobe32(
414 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
416 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
417 V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
418 crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
423 ccr_hmac(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
425 struct chcr_wr *crwr;
427 struct auth_hash *axf;
428 struct cryptodesc *crd;
430 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
431 u_int imm_len, iopad_size;
432 int error, sgl_nsegs, sgl_len;
436 /* Reject requests with too large of an input buffer. */
437 if (crd->crd_len > MAX_REQUEST_SIZE)
440 axf = s->hmac.auth_hash;
442 /* PADs must be 128-bit aligned. */
443 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
446 * The 'key' part of the context includes the aligned IPAD and
449 kctx_len = iopad_size * 2;
450 hash_size_in_response = axf->hashsize;
451 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
453 if (crd->crd_len == 0) {
454 imm_len = axf->blocksize;
457 } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
458 imm_len = crd->crd_len;
463 sglist_reset(sc->sg_ulptx);
464 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
465 crd->crd_skip, crd->crd_len);
468 sgl_nsegs = sc->sg_ulptx->sg_nseg;
469 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
472 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
473 if (wr_len > SGE_MAX_WR_LEN)
475 wr = alloc_wrqe(wr_len, sc->txq);
477 sc->stats_wr_nomem++;
481 memset(crwr, 0, wr_len);
483 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
484 hash_size_in_response, crp);
486 /* XXX: Hardcodes SGE loopback channel of 0. */
487 crwr->sec_cpl.op_ivinsrtofst = htobe32(
488 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
489 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
490 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
491 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
492 V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
494 crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
497 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
498 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
500 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
501 crwr->sec_cpl.seqno_numivs = htobe32(
502 V_SCMD_SEQ_NO_CTRL(0) |
503 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
504 V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) |
505 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
506 V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC));
507 crwr->sec_cpl.ivgen_hdrlen = htobe32(
508 V_SCMD_LAST_FRAG(0) |
509 V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
511 memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
512 memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
513 s->hmac.partial_digest_len);
515 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
516 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
517 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
518 V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) |
519 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
520 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
522 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
523 if (crd->crd_len == 0) {
525 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
526 htobe64(axf->blocksize << 3);
527 } else if (imm_len != 0)
528 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
531 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
533 /* XXX: TODO backpressure */
534 t4_wrq_tx(sc->adapter, wr);
540 ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
541 const struct cpl_fw6_pld *cpl, int error)
543 struct cryptodesc *crd;
547 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
548 s->hmac.hash_len, (c_caddr_t)(cpl + 1));
555 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
557 char iv[CHCR_MAX_CRYPTO_IV_LEN];
558 struct chcr_wr *crwr;
560 struct cryptodesc *crd;
562 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
564 int dsgl_nsegs, dsgl_len;
565 int sgl_nsegs, sgl_len;
570 if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
572 if (crd->crd_alg == CRYPTO_AES_CBC &&
573 (crd->crd_len % AES_BLOCK_LEN) != 0)
576 /* Reject requests with too large of an input buffer. */
577 if (crd->crd_len > MAX_REQUEST_SIZE)
580 if (crd->crd_flags & CRD_F_ENCRYPT)
581 op_type = CHCR_ENCRYPT_OP;
583 op_type = CHCR_DECRYPT_OP;
585 sglist_reset(sc->sg_dsgl);
586 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
590 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
591 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
593 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
595 /* The 'key' must be 128-bit aligned. */
596 kctx_len = roundup2(s->blkcipher.key_len, 16);
597 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
599 if (ccr_use_imm_data(transhdr_len, crd->crd_len +
600 s->blkcipher.iv_len)) {
601 imm_len = crd->crd_len;
606 sglist_reset(sc->sg_ulptx);
607 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
608 crd->crd_skip, crd->crd_len);
611 sgl_nsegs = sc->sg_ulptx->sg_nseg;
612 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
615 wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
616 roundup2(imm_len, 16) + sgl_len;
617 if (wr_len > SGE_MAX_WR_LEN)
619 wr = alloc_wrqe(wr_len, sc->txq);
621 sc->stats_wr_nomem++;
625 memset(crwr, 0, wr_len);
628 * Read the existing IV from the request or generate a random
629 * one if none is provided. Optionally copy the generated IV
630 * into the output buffer if requested.
632 if (op_type == CHCR_ENCRYPT_OP) {
633 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
634 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
636 arc4rand(iv, s->blkcipher.iv_len, 0);
637 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
638 crypto_copyback(crp->crp_flags, crp->crp_buf,
639 crd->crd_inject, s->blkcipher.iv_len, iv);
641 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
642 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
644 crypto_copydata(crp->crp_flags, crp->crp_buf,
645 crd->crd_inject, s->blkcipher.iv_len, iv);
648 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
651 /* XXX: Hardcodes SGE loopback channel of 0. */
652 crwr->sec_cpl.op_ivinsrtofst = htobe32(
653 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
654 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
655 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
656 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
657 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
659 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
661 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
662 V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
663 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
664 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
665 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
667 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
668 crwr->sec_cpl.seqno_numivs = htobe32(
669 V_SCMD_SEQ_NO_CTRL(0) |
670 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
671 V_SCMD_ENC_DEC_CTRL(op_type) |
672 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
673 V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) |
674 V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) |
675 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
677 crwr->sec_cpl.ivgen_hdrlen = htobe32(
678 V_SCMD_IV_GEN_CTRL(0) |
679 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
680 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
682 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
683 switch (crd->crd_alg) {
685 if (crd->crd_flags & CRD_F_ENCRYPT)
686 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
687 s->blkcipher.key_len);
689 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
690 s->blkcipher.key_len);
693 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
694 s->blkcipher.key_len);
697 key_half = s->blkcipher.key_len / 2;
698 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
700 if (crd->crd_flags & CRD_F_ENCRYPT)
701 memcpy(crwr->key_ctx.key + key_half,
702 s->blkcipher.enckey, key_half);
704 memcpy(crwr->key_ctx.key + key_half,
705 s->blkcipher.deckey, key_half);
709 dst = (char *)(crwr + 1) + kctx_len;
710 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
711 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
712 memcpy(dst, iv, s->blkcipher.iv_len);
713 dst += s->blkcipher.iv_len;
715 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
718 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
720 /* XXX: TODO backpressure */
721 t4_wrq_tx(sc->adapter, wr);
727 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
728 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
732 * The updated IV to permit chained requests is at
733 * cpl->data[2], but OCF doesn't permit chained requests.
739 * 'hashsize' is the length of a full digest. 'authsize' is the
740 * requested digest length for this operation which may be less
744 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
748 return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366);
750 return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT);
751 if (authsize == hashsize / 2)
752 return (CHCR_SCMD_HMAC_CTRL_DIV2);
753 return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC);
757 ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
758 struct cryptodesc *crda, struct cryptodesc *crde)
760 char iv[CHCR_MAX_CRYPTO_IV_LEN];
761 struct chcr_wr *crwr;
763 struct auth_hash *axf;
765 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
766 u_int hash_size_in_response, imm_len, iopad_size;
767 u_int aad_start, aad_len, aad_stop;
768 u_int auth_start, auth_stop, auth_insert;
769 u_int cipher_start, cipher_stop;
770 u_int hmac_ctrl, input_len;
771 int dsgl_nsegs, dsgl_len;
772 int sgl_nsegs, sgl_len;
776 * If there is a need in the future, requests with an empty
777 * payload could be supported as HMAC-only requests.
779 if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
781 if (crde->crd_alg == CRYPTO_AES_CBC &&
782 (crde->crd_len % AES_BLOCK_LEN) != 0)
786 * Compute the length of the AAD (data covered by the
787 * authentication descriptor but not the encryption
788 * descriptor). To simplify the logic, AAD is only permitted
789 * before the cipher/plain text, not after. This is true of
790 * all currently-generated requests.
792 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
794 if (crda->crd_skip < crde->crd_skip) {
795 if (crda->crd_skip + crda->crd_len > crde->crd_skip)
796 aad_len = (crde->crd_skip - crda->crd_skip);
798 aad_len = crda->crd_len;
801 if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
804 axf = s->hmac.auth_hash;
805 hash_size_in_response = s->hmac.hash_len;
806 if (crde->crd_flags & CRD_F_ENCRYPT)
807 op_type = CHCR_ENCRYPT_OP;
809 op_type = CHCR_DECRYPT_OP;
812 * The output buffer consists of the cipher text followed by
813 * the hash when encrypting. For decryption it only contains
816 * Due to a firmware bug, the output buffer must include a
817 * dummy output buffer for the IV and AAD prior to the real
820 if (op_type == CHCR_ENCRYPT_OP) {
821 if (s->blkcipher.iv_len + aad_len + crde->crd_len +
822 hash_size_in_response > MAX_REQUEST_SIZE)
825 if (s->blkcipher.iv_len + aad_len + crde->crd_len >
829 sglist_reset(sc->sg_dsgl);
830 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
831 s->blkcipher.iv_len + aad_len);
834 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
838 if (op_type == CHCR_ENCRYPT_OP) {
839 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
840 crda->crd_inject, hash_size_in_response);
844 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
845 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
847 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
849 /* PADs must be 128-bit aligned. */
850 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
853 * The 'key' part of the key context consists of the key followed
854 * by the IPAD and OPAD.
856 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
857 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
860 * The input buffer consists of the IV, any AAD, and then the
861 * cipher/plain text. For decryption requests the hash is
862 * appended after the cipher text.
864 * The IV is always stored at the start of the input buffer
865 * even though it may be duplicated in the payload. The
866 * crypto engine doesn't work properly if the IV offset points
867 * inside of the AAD region, so a second copy is always
870 input_len = aad_len + crde->crd_len;
873 * The firmware hangs if sent a request which is a
874 * bit smaller than MAX_REQUEST_SIZE. In particular, the
875 * firmware appears to require 512 - 16 bytes of spare room
876 * along with the size of the hash even if the hash isn't
877 * included in the input buffer.
879 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
882 if (op_type == CHCR_DECRYPT_OP)
883 input_len += hash_size_in_response;
884 if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
890 sglist_reset(sc->sg_ulptx);
892 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
893 crda->crd_skip, aad_len);
897 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
898 crde->crd_skip, crde->crd_len);
901 if (op_type == CHCR_DECRYPT_OP) {
902 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
903 crda->crd_inject, hash_size_in_response);
907 sgl_nsegs = sc->sg_ulptx->sg_nseg;
908 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
912 * Any auth-only data before the cipher region is marked as AAD.
913 * Auth-data that overlaps with the cipher region is placed in
917 aad_start = s->blkcipher.iv_len + 1;
918 aad_stop = aad_start + aad_len - 1;
923 cipher_start = s->blkcipher.iv_len + aad_len + 1;
924 if (op_type == CHCR_DECRYPT_OP)
925 cipher_stop = hash_size_in_response;
928 if (aad_len == crda->crd_len) {
933 auth_start = cipher_start;
935 auth_start = s->blkcipher.iv_len + crda->crd_skip -
937 auth_stop = (crde->crd_skip + crde->crd_len) -
938 (crda->crd_skip + crda->crd_len) + cipher_stop;
940 if (op_type == CHCR_DECRYPT_OP)
941 auth_insert = hash_size_in_response;
945 wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
946 roundup2(imm_len, 16) + sgl_len;
947 if (wr_len > SGE_MAX_WR_LEN)
949 wr = alloc_wrqe(wr_len, sc->txq);
951 sc->stats_wr_nomem++;
955 memset(crwr, 0, wr_len);
958 * Read the existing IV from the request or generate a random
959 * one if none is provided. Optionally copy the generated IV
960 * into the output buffer if requested.
962 if (op_type == CHCR_ENCRYPT_OP) {
963 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
964 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
966 arc4rand(iv, s->blkcipher.iv_len, 0);
967 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
968 crypto_copyback(crp->crp_flags, crp->crp_buf,
969 crde->crd_inject, s->blkcipher.iv_len, iv);
971 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
972 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
974 crypto_copydata(crp->crp_flags, crp->crp_buf,
975 crde->crd_inject, s->blkcipher.iv_len, iv);
978 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
979 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
981 /* XXX: Hardcodes SGE loopback channel of 0. */
982 crwr->sec_cpl.op_ivinsrtofst = htobe32(
983 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
984 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
985 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
986 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
987 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
989 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
991 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
992 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
993 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
994 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
995 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
996 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
997 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
998 V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
999 V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1000 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1002 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1003 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1004 crwr->sec_cpl.seqno_numivs = htobe32(
1005 V_SCMD_SEQ_NO_CTRL(0) |
1006 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1007 V_SCMD_ENC_DEC_CTRL(op_type) |
1008 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1009 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1010 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1011 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1012 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
1014 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1015 V_SCMD_IV_GEN_CTRL(0) |
1016 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1017 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1019 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1020 switch (crde->crd_alg) {
1021 case CRYPTO_AES_CBC:
1022 if (crde->crd_flags & CRD_F_ENCRYPT)
1023 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1024 s->blkcipher.key_len);
1026 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1027 s->blkcipher.key_len);
1029 case CRYPTO_AES_ICM:
1030 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1031 s->blkcipher.key_len);
1033 case CRYPTO_AES_XTS:
1034 key_half = s->blkcipher.key_len / 2;
1035 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1037 if (crde->crd_flags & CRD_F_ENCRYPT)
1038 memcpy(crwr->key_ctx.key + key_half,
1039 s->blkcipher.enckey, key_half);
1041 memcpy(crwr->key_ctx.key + key_half,
1042 s->blkcipher.deckey, key_half);
1046 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1047 memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
1048 memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
1050 dst = (char *)(crwr + 1) + kctx_len;
1051 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1052 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1053 memcpy(dst, iv, s->blkcipher.iv_len);
1054 dst += s->blkcipher.iv_len;
1057 crypto_copydata(crp->crp_flags, crp->crp_buf,
1058 crda->crd_skip, aad_len, dst);
1061 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1062 crde->crd_len, dst);
1063 dst += crde->crd_len;
1064 if (op_type == CHCR_DECRYPT_OP)
1065 crypto_copydata(crp->crp_flags, crp->crp_buf,
1066 crda->crd_inject, hash_size_in_response, dst);
1068 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1070 /* XXX: TODO backpressure */
1071 t4_wrq_tx(sc->adapter, wr);
1077 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1078 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1080 struct cryptodesc *crd;
1083 * The updated IV to permit chained requests is at
1084 * cpl->data[2], but OCF doesn't permit chained requests.
1086 * For a decryption request, the hardware may do a verification
1087 * of the HMAC which will fail if the existing HMAC isn't in the
1088 * buffer. If that happens, clear the error and copy the HMAC
1089 * from the CPL reply into the buffer.
1091 * For encryption requests, crd should be the cipher request
1092 * which will have CRD_F_ENCRYPT set. For decryption
1093 * requests, crp_desc will be the HMAC request which should
1094 * not have this flag set.
1096 crd = crp->crp_desc;
1097 if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1098 !(crd->crd_flags & CRD_F_ENCRYPT)) {
1099 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1100 s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1107 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1108 struct cryptodesc *crda, struct cryptodesc *crde)
1110 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1111 struct chcr_wr *crwr;
1114 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1115 u_int hash_size_in_response, imm_len;
1116 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1117 u_int hmac_ctrl, input_len;
1118 int dsgl_nsegs, dsgl_len;
1119 int sgl_nsegs, sgl_len;
1122 if (s->blkcipher.key_len == 0)
1126 * The crypto engine doesn't handle GCM requests with an empty
1127 * payload, so handle those in software instead.
1129 if (crde->crd_len == 0)
1133 * AAD is only permitted before the cipher/plain text, not
1136 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1139 if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
1142 hash_size_in_response = s->gmac.hash_len;
1143 if (crde->crd_flags & CRD_F_ENCRYPT)
1144 op_type = CHCR_ENCRYPT_OP;
1146 op_type = CHCR_DECRYPT_OP;
1149 * The IV handling for GCM in OCF is a bit more complicated in
1150 * that IPSec provides a full 16-byte IV (including the
1151 * counter), whereas the /dev/crypto interface sometimes
1152 * provides a full 16-byte IV (if no IV is provided in the
1153 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1155 * When provided a 12-byte IV, assume the IV is really 16 bytes
1156 * with a counter in the last 4 bytes initialized to 1.
1158 * While iv_len is checked below, the value is currently
1159 * always set to 12 when creating a GCM session in this driver
1160 * due to limitations in OCF (there is no way to know what the
1161 * IV length of a given request will be). This means that the
1162 * driver always assumes as 12-byte IV for now.
1164 if (s->blkcipher.iv_len == 12)
1165 iv_len = AES_BLOCK_LEN;
1167 iv_len = s->blkcipher.iv_len;
1170 * The output buffer consists of the cipher text followed by
1171 * the tag when encrypting. For decryption it only contains
1174 * Due to a firmware bug, the output buffer must include a
1175 * dummy output buffer for the IV and AAD prior to the real
1178 if (op_type == CHCR_ENCRYPT_OP) {
1179 if (iv_len + crda->crd_len + crde->crd_len +
1180 hash_size_in_response > MAX_REQUEST_SIZE)
1183 if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
1186 sglist_reset(sc->sg_dsgl);
1187 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1191 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1195 if (op_type == CHCR_ENCRYPT_OP) {
1196 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1197 crda->crd_inject, hash_size_in_response);
1201 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1202 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1204 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1207 * The 'key' part of the key context consists of the key followed
1208 * by the Galois hash key.
1210 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1211 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1214 * The input buffer consists of the IV, any AAD, and then the
1215 * cipher/plain text. For decryption requests the hash is
1216 * appended after the cipher text.
1218 * The IV is always stored at the start of the input buffer
1219 * even though it may be duplicated in the payload. The
1220 * crypto engine doesn't work properly if the IV offset points
1221 * inside of the AAD region, so a second copy is always
1224 input_len = crda->crd_len + crde->crd_len;
1225 if (op_type == CHCR_DECRYPT_OP)
1226 input_len += hash_size_in_response;
1227 if (input_len > MAX_REQUEST_SIZE)
1229 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1230 imm_len = input_len;
1235 sglist_reset(sc->sg_ulptx);
1236 if (crda->crd_len != 0) {
1237 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1238 crda->crd_skip, crda->crd_len);
1242 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1243 crde->crd_skip, crde->crd_len);
1246 if (op_type == CHCR_DECRYPT_OP) {
1247 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1248 crda->crd_inject, hash_size_in_response);
1252 sgl_nsegs = sc->sg_ulptx->sg_nseg;
1253 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1256 if (crda->crd_len != 0) {
1257 aad_start = iv_len + 1;
1258 aad_stop = aad_start + crda->crd_len - 1;
1263 cipher_start = iv_len + crda->crd_len + 1;
1264 if (op_type == CHCR_DECRYPT_OP)
1265 cipher_stop = hash_size_in_response;
1268 if (op_type == CHCR_DECRYPT_OP)
1269 auth_insert = hash_size_in_response;
1273 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1275 if (wr_len > SGE_MAX_WR_LEN)
1277 wr = alloc_wrqe(wr_len, sc->txq);
1279 sc->stats_wr_nomem++;
1283 memset(crwr, 0, wr_len);
1286 * Read the existing IV from the request or generate a random
1287 * one if none is provided. Optionally copy the generated IV
1288 * into the output buffer if requested.
1290 * If the input IV is 12 bytes, append an explicit 4-byte
1293 if (op_type == CHCR_ENCRYPT_OP) {
1294 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1295 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1297 arc4rand(iv, s->blkcipher.iv_len, 0);
1298 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1299 crypto_copyback(crp->crp_flags, crp->crp_buf,
1300 crde->crd_inject, s->blkcipher.iv_len, iv);
1302 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1303 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1305 crypto_copydata(crp->crp_flags, crp->crp_buf,
1306 crde->crd_inject, s->blkcipher.iv_len, iv);
1308 if (s->blkcipher.iv_len == 12)
1309 *(uint32_t *)&iv[12] = htobe32(1);
1311 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1314 /* XXX: Hardcodes SGE loopback channel of 0. */
1315 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1316 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1317 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1318 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1319 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1320 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1322 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1325 * NB: cipherstop is explicitly set to 0. On encrypt it
1326 * should normally be set to 0 anyway (as the encrypt crd ends
1327 * at the end of the input). However, for decrypt the cipher
1328 * ends before the tag in the AUTHENC case (and authstop is
1329 * set to stop before the tag), but for GCM the cipher still
1330 * runs to the end of the buffer. Not sure if this is
1331 * intentional or a firmware quirk, but it is required for
1332 * working tag validation with GCM decryption.
1334 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1335 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1336 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1337 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1338 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1339 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1340 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1341 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1342 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1343 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1345 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1346 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1347 crwr->sec_cpl.seqno_numivs = htobe32(
1348 V_SCMD_SEQ_NO_CTRL(0) |
1349 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1350 V_SCMD_ENC_DEC_CTRL(op_type) |
1351 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1352 V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
1353 V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
1354 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1355 V_SCMD_IV_SIZE(iv_len / 2) |
1357 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1358 V_SCMD_IV_GEN_CTRL(0) |
1359 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1360 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1362 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1363 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1364 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1365 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1367 dst = (char *)(crwr + 1) + kctx_len;
1368 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1369 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1370 memcpy(dst, iv, iv_len);
1373 if (crda->crd_len != 0) {
1374 crypto_copydata(crp->crp_flags, crp->crp_buf,
1375 crda->crd_skip, crda->crd_len, dst);
1376 dst += crda->crd_len;
1378 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1379 crde->crd_len, dst);
1380 dst += crde->crd_len;
1381 if (op_type == CHCR_DECRYPT_OP)
1382 crypto_copydata(crp->crp_flags, crp->crp_buf,
1383 crda->crd_inject, hash_size_in_response, dst);
1385 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1387 /* XXX: TODO backpressure */
1388 t4_wrq_tx(sc->adapter, wr);
1394 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1395 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1399 * The updated IV to permit chained requests is at
1400 * cpl->data[2], but OCF doesn't permit chained requests.
1402 * Note that the hardware should always verify the GMAC hash.
1408 * Handle a GCM request that is not supported by the crypto engine by
1409 * performing the operation in software. Derived from swcr_authenc().
1412 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1413 struct cryptodesc *crda, struct cryptodesc *crde)
1415 struct auth_hash *axf;
1416 struct enc_xform *exf;
1419 char block[GMAC_BLOCK_LEN];
1420 char digest[GMAC_DIGEST_LEN];
1421 char iv[AES_BLOCK_LEN];
1427 /* Initialize the MAC. */
1428 switch (s->blkcipher.key_len) {
1430 axf = &auth_hash_nist_gmac_aes_128;
1433 axf = &auth_hash_nist_gmac_aes_192;
1436 axf = &auth_hash_nist_gmac_aes_256;
1442 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1443 if (auth_ctx == NULL) {
1447 axf->Init(auth_ctx);
1448 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1450 /* Initialize the cipher. */
1451 exf = &enc_xform_aes_nist_gcm;
1452 error = exf->setkey(&kschedule, s->blkcipher.enckey,
1453 s->blkcipher.key_len);
1458 * This assumes a 12-byte IV from the crp. See longer comment
1459 * above in ccr_gcm() for more details.
1461 if (crde->crd_flags & CRD_F_ENCRYPT) {
1462 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1463 memcpy(iv, crde->crd_iv, 12);
1465 arc4rand(iv, 12, 0);
1466 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1467 crypto_copyback(crp->crp_flags, crp->crp_buf,
1468 crde->crd_inject, 12, iv);
1470 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1471 memcpy(iv, crde->crd_iv, 12);
1473 crypto_copydata(crp->crp_flags, crp->crp_buf,
1474 crde->crd_inject, 12, iv);
1476 *(uint32_t *)&iv[12] = htobe32(1);
1478 axf->Reinit(auth_ctx, iv, sizeof(iv));
1481 for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1482 len = imin(crda->crd_len - i, sizeof(block));
1483 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1485 bzero(block + len, sizeof(block) - len);
1486 axf->Update(auth_ctx, block, sizeof(block));
1489 exf->reinit(kschedule, iv);
1491 /* Do encryption with MAC */
1492 for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1493 len = imin(crde->crd_len - i, sizeof(block));
1494 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1496 bzero(block + len, sizeof(block) - len);
1497 if (crde->crd_flags & CRD_F_ENCRYPT) {
1498 exf->encrypt(kschedule, block);
1499 axf->Update(auth_ctx, block, len);
1500 crypto_copyback(crp->crp_flags, crp->crp_buf,
1501 crde->crd_skip + i, len, block);
1503 axf->Update(auth_ctx, block, len);
1508 bzero(block, sizeof(block));
1509 ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1510 ((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
1511 axf->Update(auth_ctx, block, sizeof(block));
1514 axf->Final(digest, auth_ctx);
1516 /* Inject or validate tag. */
1517 if (crde->crd_flags & CRD_F_ENCRYPT) {
1518 crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1519 sizeof(digest), digest);
1522 char digest2[GMAC_DIGEST_LEN];
1524 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1525 sizeof(digest2), digest2);
1526 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1529 /* Tag matches, decrypt data. */
1530 for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1531 len = imin(crde->crd_len - i, sizeof(block));
1532 crypto_copydata(crp->crp_flags, crp->crp_buf,
1533 crde->crd_skip + i, len, block);
1534 bzero(block + len, sizeof(block) - len);
1535 exf->decrypt(kschedule, block);
1536 crypto_copyback(crp->crp_flags, crp->crp_buf,
1537 crde->crd_skip + i, len, block);
1543 exf->zerokey(&kschedule);
1545 if (auth_ctx != NULL) {
1546 memset(auth_ctx, 0, axf->ctxsize);
1547 free(auth_ctx, M_CCR);
1549 crp->crp_etype = error;
1554 ccr_identify(driver_t *driver, device_t parent)
1558 sc = device_get_softc(parent);
1559 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1560 device_find_child(parent, "ccr", -1) == NULL)
1561 device_add_child(parent, "ccr", -1);
1565 ccr_probe(device_t dev)
1568 device_set_desc(dev, "Chelsio Crypto Accelerator");
1569 return (BUS_PROBE_DEFAULT);
1573 ccr_sysctls(struct ccr_softc *sc)
1575 struct sysctl_ctx_list *ctx;
1576 struct sysctl_oid *oid;
1577 struct sysctl_oid_list *children;
1579 ctx = device_get_sysctl_ctx(sc->dev);
1584 oid = device_get_sysctl_tree(sc->dev);
1585 children = SYSCTL_CHILDREN(oid);
1590 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
1591 NULL, "statistics");
1592 children = SYSCTL_CHILDREN(oid);
1594 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1595 &sc->stats_hmac, 0, "HMAC requests submitted");
1596 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
1597 &sc->stats_blkcipher_encrypt, 0,
1598 "Cipher encryption requests submitted");
1599 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
1600 &sc->stats_blkcipher_decrypt, 0,
1601 "Cipher decryption requests submitted");
1602 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
1603 &sc->stats_authenc_encrypt, 0,
1604 "Combined AES+HMAC encryption requests submitted");
1605 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
1606 &sc->stats_authenc_decrypt, 0,
1607 "Combined AES+HMAC decryption requests submitted");
1608 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
1609 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
1610 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
1611 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
1612 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1613 &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
1614 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1615 &sc->stats_inflight, 0, "Requests currently pending");
1616 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1617 &sc->stats_mac_error, 0, "MAC errors");
1618 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1619 &sc->stats_pad_error, 0, "Padding errors");
1620 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
1621 &sc->stats_bad_session, 0, "Requests with invalid session ID");
1622 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
1623 &sc->stats_sglist_error, 0,
1624 "Requests for which DMA mapping failed");
1625 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
1626 &sc->stats_process_error, 0, "Requests failed during queueing");
1627 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
1628 &sc->stats_sw_fallback, 0,
1629 "Requests processed by falling back to software");
1633 ccr_attach(device_t dev)
1635 struct ccr_softc *sc;
1639 * TODO: Crypto requests will panic if the parent device isn't
1640 * initialized so that the queues are up and running. Need to
1641 * figure out how to handle that correctly, maybe just reject
1642 * requests if the adapter isn't fully initialized?
1644 sc = device_get_softc(dev);
1646 sc->adapter = device_get_softc(device_get_parent(dev));
1647 sc->txq = &sc->adapter->sge.ctrlq[0];
1648 sc->rxq = &sc->adapter->sge.rxq[0];
1649 cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
1650 CRYPTOCAP_F_HARDWARE);
1652 device_printf(dev, "could not get crypto driver id\n");
1656 sc->adapter->ccr_softc = sc;
1659 sc->tx_channel_id = 0;
1661 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1662 sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1663 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1664 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
1665 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1666 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1669 crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
1670 crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1671 crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
1672 crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
1673 crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
1674 crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
1675 crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
1676 crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
1677 crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
1678 crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
1679 crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
1684 ccr_detach(device_t dev)
1686 struct ccr_softc *sc;
1688 sc = device_get_softc(dev);
1690 mtx_lock(&sc->lock);
1691 sc->detaching = true;
1692 mtx_unlock(&sc->lock);
1694 crypto_unregister_all(sc->cid);
1696 mtx_destroy(&sc->lock);
1697 sglist_free(sc->sg_iv_aad);
1698 free(sc->iv_aad_buf, M_CCR);
1699 sglist_free(sc->sg_dsgl);
1700 sglist_free(sc->sg_ulptx);
1701 sglist_free(sc->sg_crp);
1702 sc->adapter->ccr_softc = NULL;
1707 ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
1713 u32 = (uint32_t *)dst;
1714 u64 = (uint64_t *)dst;
1716 case CRYPTO_SHA1_HMAC:
1717 for (i = 0; i < SHA1_HASH_LEN / 4; i++)
1718 u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
1720 case CRYPTO_SHA2_256_HMAC:
1721 for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1722 u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
1724 case CRYPTO_SHA2_384_HMAC:
1725 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1726 u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
1728 case CRYPTO_SHA2_512_HMAC:
1729 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1730 u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
1736 ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
1739 union authctx auth_ctx;
1740 struct auth_hash *axf;
1744 * If the key is larger than the block size, use the digest of
1745 * the key as the key instead.
1747 axf = s->hmac.auth_hash;
1749 if (klen > axf->blocksize) {
1750 axf->Init(&auth_ctx);
1751 axf->Update(&auth_ctx, key, klen);
1752 axf->Final(s->hmac.ipad, &auth_ctx);
1753 klen = axf->hashsize;
1755 memcpy(s->hmac.ipad, key, klen);
1757 memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
1758 memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
1760 for (i = 0; i < axf->blocksize; i++) {
1761 s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
1762 s->hmac.opad[i] ^= HMAC_OPAD_VAL;
1766 * Hash the raw ipad and opad and store the partial result in
1769 axf->Init(&auth_ctx);
1770 axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
1771 ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1773 axf->Init(&auth_ctx);
1774 axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1775 ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
1779 * Borrowed from AES_GMAC_Setkey().
1782 ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
1784 static char zeroes[GMAC_BLOCK_LEN];
1785 uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
1788 rounds = rijndaelKeySetupEnc(keysched, key, klen);
1789 rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
1793 ccr_aes_check_keylen(int alg, int klen)
1799 if (alg == CRYPTO_AES_XTS)
1805 if (alg != CRYPTO_AES_XTS)
1815 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
1817 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
1818 unsigned int opad_present;
1820 if (alg == CRYPTO_AES_XTS)
1826 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
1829 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
1832 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
1835 panic("should not get here");
1838 s->blkcipher.key_len = klen / 8;
1839 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
1841 case CRYPTO_AES_CBC:
1842 case CRYPTO_AES_XTS:
1843 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
1847 kctx_len = roundup2(s->blkcipher.key_len, 16);
1850 mk_size = s->hmac.mk_size;
1852 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
1853 kctx_len += iopad_size * 2;
1856 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1858 kctx_len += GMAC_BLOCK_LEN;
1861 mk_size = CHCR_KEYCTX_NO_KEY;
1865 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
1866 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
1867 V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
1868 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
1869 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
1870 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
1874 ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
1876 struct ccr_softc *sc;
1877 struct ccr_session *s;
1878 struct auth_hash *auth_hash;
1879 struct cryptoini *c, *hash, *cipher;
1880 unsigned int auth_mode, cipher_mode, iv_len, mk_size;
1881 unsigned int partial_digest_len;
1892 auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
1893 cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP;
1896 partial_digest_len = 0;
1897 for (c = cri; c != NULL; c = c->cri_next) {
1898 switch (c->cri_alg) {
1899 case CRYPTO_SHA1_HMAC:
1900 case CRYPTO_SHA2_256_HMAC:
1901 case CRYPTO_SHA2_384_HMAC:
1902 case CRYPTO_SHA2_512_HMAC:
1903 case CRYPTO_AES_128_NIST_GMAC:
1904 case CRYPTO_AES_192_NIST_GMAC:
1905 case CRYPTO_AES_256_NIST_GMAC:
1909 switch (c->cri_alg) {
1910 case CRYPTO_SHA1_HMAC:
1911 auth_hash = &auth_hash_hmac_sha1;
1912 auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1913 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1914 partial_digest_len = SHA1_HASH_LEN;
1916 case CRYPTO_SHA2_256_HMAC:
1917 auth_hash = &auth_hash_hmac_sha2_256;
1918 auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1919 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1920 partial_digest_len = SHA2_256_HASH_LEN;
1922 case CRYPTO_SHA2_384_HMAC:
1923 auth_hash = &auth_hash_hmac_sha2_384;
1924 auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1925 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1926 partial_digest_len = SHA2_512_HASH_LEN;
1928 case CRYPTO_SHA2_512_HMAC:
1929 auth_hash = &auth_hash_hmac_sha2_512;
1930 auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1931 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1932 partial_digest_len = SHA2_512_HASH_LEN;
1934 case CRYPTO_AES_128_NIST_GMAC:
1935 case CRYPTO_AES_192_NIST_GMAC:
1936 case CRYPTO_AES_256_NIST_GMAC:
1938 auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
1939 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1943 case CRYPTO_AES_CBC:
1944 case CRYPTO_AES_ICM:
1945 case CRYPTO_AES_NIST_GCM_16:
1946 case CRYPTO_AES_XTS:
1950 switch (c->cri_alg) {
1951 case CRYPTO_AES_CBC:
1952 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
1953 iv_len = AES_BLOCK_LEN;
1955 case CRYPTO_AES_ICM:
1956 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1957 iv_len = AES_BLOCK_LEN;
1959 case CRYPTO_AES_NIST_GCM_16:
1960 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM;
1961 iv_len = AES_GCM_IV_LEN;
1963 case CRYPTO_AES_XTS:
1964 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1965 iv_len = AES_BLOCK_LEN;
1968 if (c->cri_key != NULL) {
1969 error = ccr_aes_check_keylen(c->cri_alg,
1979 if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM))
1981 if (hash == NULL && cipher == NULL)
1983 if (hash != NULL && hash->cri_key == NULL)
1986 sc = device_get_softc(dev);
1987 mtx_lock(&sc->lock);
1988 if (sc->detaching) {
1989 mtx_unlock(&sc->lock);
1993 s = crypto_get_driver_session(cses);
1997 else if (hash != NULL && cipher != NULL)
1999 else if (hash != NULL)
2002 MPASS(cipher != NULL);
2003 s->mode = BLKCIPHER;
2006 if (hash->cri_mlen == 0)
2007 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2009 s->gmac.hash_len = hash->cri_mlen;
2010 ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
2011 } else if (hash != NULL) {
2012 s->hmac.auth_hash = auth_hash;
2013 s->hmac.auth_mode = auth_mode;
2014 s->hmac.mk_size = mk_size;
2015 s->hmac.partial_digest_len = partial_digest_len;
2016 if (hash->cri_mlen == 0)
2017 s->hmac.hash_len = auth_hash->hashsize;
2019 s->hmac.hash_len = hash->cri_mlen;
2020 ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
2023 if (cipher != NULL) {
2024 s->blkcipher.cipher_mode = cipher_mode;
2025 s->blkcipher.iv_len = iv_len;
2026 if (cipher->cri_key != NULL)
2027 ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
2032 mtx_unlock(&sc->lock);
2037 ccr_freesession(device_t dev, crypto_session_t cses)
2039 struct ccr_softc *sc;
2040 struct ccr_session *s;
2042 sc = device_get_softc(dev);
2043 s = crypto_get_driver_session(cses);
2044 mtx_lock(&sc->lock);
2045 if (s->pending != 0)
2047 "session %p freed with %d pending requests\n", s,
2050 mtx_unlock(&sc->lock);
2054 ccr_process(device_t dev, struct cryptop *crp, int hint)
2056 struct ccr_softc *sc;
2057 struct ccr_session *s;
2058 struct cryptodesc *crd, *crda, *crde;
2064 crd = crp->crp_desc;
2065 s = crypto_get_driver_session(crp->crp_session);
2066 sc = device_get_softc(dev);
2068 mtx_lock(&sc->lock);
2069 error = ccr_populate_sglist(sc->sg_crp, crp);
2071 sc->stats_sglist_error++;
2077 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2078 ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
2080 error = ccr_hmac(sc, s, crp);
2085 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2086 error = ccr_aes_check_keylen(crd->crd_alg,
2090 ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2093 error = ccr_blkcipher(sc, s, crp);
2095 if (crd->crd_flags & CRD_F_ENCRYPT)
2096 sc->stats_blkcipher_encrypt++;
2098 sc->stats_blkcipher_decrypt++;
2103 switch (crd->crd_alg) {
2104 case CRYPTO_AES_CBC:
2105 case CRYPTO_AES_ICM:
2106 case CRYPTO_AES_XTS:
2107 /* Only encrypt-then-authenticate supported. */
2109 crda = crd->crd_next;
2110 if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2117 crde = crd->crd_next;
2118 if (crde->crd_flags & CRD_F_ENCRYPT) {
2126 if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2127 ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
2129 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2130 error = ccr_aes_check_keylen(crde->crd_alg,
2134 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2137 error = ccr_authenc(sc, s, crp, crda, crde);
2139 if (crde->crd_flags & CRD_F_ENCRYPT)
2140 sc->stats_authenc_encrypt++;
2142 sc->stats_authenc_decrypt++;
2147 if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2149 crda = crd->crd_next;
2152 crde = crd->crd_next;
2154 if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2155 ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
2156 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2157 error = ccr_aes_check_keylen(crde->crd_alg,
2161 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2164 if (crde->crd_len == 0) {
2165 mtx_unlock(&sc->lock);
2166 ccr_gcm_soft(s, crp, crda, crde);
2169 error = ccr_gcm(sc, s, crp, crda, crde);
2170 if (error == EMSGSIZE) {
2171 sc->stats_sw_fallback++;
2172 mtx_unlock(&sc->lock);
2173 ccr_gcm_soft(s, crp, crda, crde);
2177 if (crde->crd_flags & CRD_F_ENCRYPT)
2178 sc->stats_gcm_encrypt++;
2180 sc->stats_gcm_decrypt++;
2187 sc->stats_inflight++;
2189 sc->stats_process_error++;
2192 mtx_unlock(&sc->lock);
2195 crp->crp_etype = error;
2203 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2206 struct ccr_softc *sc = iq->adapter->ccr_softc;
2207 struct ccr_session *s;
2208 const struct cpl_fw6_pld *cpl;
2209 struct cryptop *crp;
2214 cpl = mtod(m, const void *);
2216 cpl = (const void *)(rss + 1);
2218 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2219 s = crypto_get_driver_session(crp->crp_session);
2220 status = be64toh(cpl->data[0]);
2221 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2226 mtx_lock(&sc->lock);
2228 sc->stats_inflight--;
2232 error = ccr_hmac_done(sc, s, crp, cpl, error);
2235 error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2238 error = ccr_authenc_done(sc, s, crp, cpl, error);
2241 error = ccr_gcm_done(sc, s, crp, cpl, error);
2245 if (error == EBADMSG) {
2246 if (CHK_MAC_ERR_BIT(status))
2247 sc->stats_mac_error++;
2248 if (CHK_PAD_ERR_BIT(status))
2249 sc->stats_pad_error++;
2251 mtx_unlock(&sc->lock);
2252 crp->crp_etype = error;
2259 ccr_modevent(module_t mod, int cmd, void *arg)
2264 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2267 t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2270 return (EOPNOTSUPP);
2274 static device_method_t ccr_methods[] = {
2275 DEVMETHOD(device_identify, ccr_identify),
2276 DEVMETHOD(device_probe, ccr_probe),
2277 DEVMETHOD(device_attach, ccr_attach),
2278 DEVMETHOD(device_detach, ccr_detach),
2280 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2281 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2282 DEVMETHOD(cryptodev_process, ccr_process),
2287 static driver_t ccr_driver = {
2290 sizeof(struct ccr_softc)
2293 static devclass_t ccr_devclass;
2295 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2296 MODULE_VERSION(ccr, 1);
2297 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2298 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);