2 * Copyright (c) 2017 Chelsio Communications, Inc.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
42 #include "cryptodev_if.h"
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
48 * Requests consist of:
50 * +-------------------------------+
51 * | struct fw_crypto_lookaside_wr |
52 * +-------------------------------+
53 * | struct ulp_txpkt |
54 * +-------------------------------+
55 * | struct ulptx_idata |
56 * +-------------------------------+
57 * | struct cpl_tx_sec_pdu |
58 * +-------------------------------+
59 * | struct cpl_tls_tx_scmd_fmt |
60 * +-------------------------------+
61 * | key context header |
62 * +-------------------------------+
63 * | AES key | ----- For requests with AES
64 * +-------------------------------+ -
65 * | IPAD (16-byte aligned) | \
66 * +-------------------------------+ +---- For requests with HMAC
67 * | OPAD (16-byte aligned) | /
68 * +-------------------------------+ -
69 * | GMAC H | ----- For AES-GCM
70 * +-------------------------------+ -
71 * | struct cpl_rx_phys_dsgl | \
72 * +-------------------------------+ +---- Destination buffer for
73 * | PHYS_DSGL entries | / non-hash-only requests
74 * +-------------------------------+ -
75 * | 16 dummy bytes | ----- Only for hash-only requests
76 * +-------------------------------+
77 * | IV | ----- If immediate IV
78 * +-------------------------------+
79 * | Payload | ----- If immediate Payload
80 * +-------------------------------+ -
81 * | struct ulptx_sgl | \
82 * +-------------------------------+ +---- If payload via SGL
84 * +-------------------------------+ -
86 * Note that the key context must be padded to ensure 16-byte alignment.
87 * For HMAC requests, the key consists of the partial hash of the IPAD
88 * followed by the partial hash of the OPAD.
92 * +-------------------------------+
93 * | struct cpl_fw6_pld |
94 * +-------------------------------+
95 * | hash digest | ----- For HMAC request with
96 * +-------------------------------+ 'hash_size' set in work request
98 * A 32-bit big-endian error status word is supplied in the last 4
99 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
100 * "MAC" error and bit 1 indicates a "PAD" error.
102 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
103 * in the request is returned in data[1] of the CPL_FW6_PLD message.
105 * For block cipher replies, the updated IV is supplied in data[2] and
106 * data[3] of the CPL_FW6_PLD message.
108 * For hash replies where the work request set 'hash_size' to request
109 * a copy of the hash in the reply, the hash digest is supplied
110 * immediately following the CPL_FW6_PLD message.
114 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32
117 #define MAX_RX_PHYS_DSGL_SGE 32
118 #define DSGL_SGE_MAXLEN 65535
121 * The adapter only supports requests with a total input or output
122 * length of 64k-1 or smaller. Longer requests either result in hung
123 * requests or incorrect results.
125 #define MAX_REQUEST_SIZE 65535
127 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
129 struct ccr_session_hmac {
130 struct auth_hash *auth_hash;
132 unsigned int partial_digest_len;
133 unsigned int auth_mode;
134 unsigned int mk_size;
135 char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
136 char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
139 struct ccr_session_gmac {
141 char ghash_h[GMAC_BLOCK_LEN];
144 struct ccr_session_blkcipher {
145 unsigned int cipher_mode;
146 unsigned int key_len;
149 char enckey[CHCR_AES_MAX_KEY_LEN];
150 char deckey[CHCR_AES_MAX_KEY_LEN];
156 enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
158 struct ccr_session_hmac hmac;
159 struct ccr_session_gmac gmac;
161 struct ccr_session_blkcipher blkcipher;
165 struct adapter *adapter;
169 struct ccr_session *sessions;
177 * Pre-allocate S/G lists used when preparing a work request.
178 * 'sg_crp' contains an sglist describing the entire buffer
179 * for a 'struct cryptop'. 'sg_ulptx' is used to describe
180 * the data the engine should DMA as input via ULPTX_SGL.
181 * 'sg_dsgl' is used to describe the destination that cipher
182 * text and a tag should be written to.
184 struct sglist *sg_crp;
185 struct sglist *sg_ulptx;
186 struct sglist *sg_dsgl;
189 uint64_t stats_blkcipher_encrypt;
190 uint64_t stats_blkcipher_decrypt;
192 uint64_t stats_authenc_encrypt;
193 uint64_t stats_authenc_decrypt;
194 uint64_t stats_gcm_encrypt;
195 uint64_t stats_gcm_decrypt;
196 uint64_t stats_wr_nomem;
197 uint64_t stats_inflight;
198 uint64_t stats_mac_error;
199 uint64_t stats_pad_error;
200 uint64_t stats_bad_session;
201 uint64_t stats_sglist_error;
202 uint64_t stats_process_error;
206 * Crypto requests involve two kind of scatter/gather lists.
208 * Non-hash-only requests require a PHYS_DSGL that describes the
209 * location to store the results of the encryption or decryption
210 * operation. This SGL uses a different format (PHYS_DSGL) and should
211 * exclude the crd_skip bytes at the start of the data as well as
212 * any AAD or IV. For authenticated encryption requests it should
213 * cover include the destination of the hash or tag.
215 * The input payload may either be supplied inline as immediate data,
216 * or via a standard ULP_TX SGL. This SGL should include AAD,
217 * ciphertext, and the hash or tag for authenticated decryption
220 * These scatter/gather lists can describe different subsets of the
221 * buffer described by the crypto operation. ccr_populate_sglist()
222 * generates a scatter/gather list that covers the entire crypto
223 * operation buffer that is then used to construct the other
224 * scatter/gather lists.
227 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
232 if (crp->crp_flags & CRYPTO_F_IMBUF)
233 error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
234 else if (crp->crp_flags & CRYPTO_F_IOV)
235 error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
237 error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
242 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
246 ccr_count_sgl(struct sglist *sg, int maxsegsize)
251 for (i = 0; i < sg->sg_nseg; i++)
252 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
256 /* These functions deal with PHYS_DSGL for the reply buffer. */
258 ccr_phys_dsgl_len(int nsegs)
262 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
263 if ((nsegs % 8) != 0) {
264 len += sizeof(uint16_t) * 8;
265 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
271 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
274 struct cpl_rx_phys_dsgl *cpl;
275 struct phys_sge_pairs *sgl;
282 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
283 V_CPL_RX_PHYS_DSGL_ISRDMA(0));
284 cpl->pcirlxorder_to_noofsgentr = htobe32(
285 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
286 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
287 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
288 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
289 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
290 cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
291 cpl->rss_hdr_int.hash_val = 0;
292 sgl = (struct phys_sge_pairs *)(cpl + 1);
294 for (i = 0; i < sg->sg_nseg; i++) {
295 seglen = sg->sg_segs[i].ss_len;
296 paddr = sg->sg_segs[i].ss_paddr;
298 sgl->addr[j] = htobe64(paddr);
299 if (seglen > DSGL_SGE_MAXLEN) {
300 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
301 paddr += DSGL_SGE_MAXLEN;
302 seglen -= DSGL_SGE_MAXLEN;
304 sgl->len[j] = htobe16(seglen);
312 } while (seglen != 0);
314 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
317 /* These functions deal with the ULPTX_SGL for input payload. */
319 ccr_ulptx_sgl_len(int nsegs)
323 nsegs--; /* first segment is part of ulptx_sgl */
324 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
325 return (roundup2(n, 16));
329 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
331 struct ulptx_sgl *usgl;
333 struct sglist_seg *ss;
337 MPASS(nsegs == sg->sg_nseg);
338 ss = &sg->sg_segs[0];
340 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
341 V_ULPTX_NSGE(nsegs));
342 usgl->len0 = htobe32(ss->ss_len);
343 usgl->addr0 = htobe64(ss->ss_paddr);
345 for (i = 0; i < sg->sg_nseg - 1; i++) {
346 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
347 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
354 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
357 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
359 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
366 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
367 u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size,
368 u_int iv_loc, struct cryptop *crp)
372 cctx_size = sizeof(struct _key_ctx) + kctx_len;
373 crwr->wreq.op_to_cctx_size = htobe32(
374 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
375 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
376 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
377 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
378 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
379 crwr->wreq.len16_pkd = htobe32(
380 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
381 crwr->wreq.session_id = htobe32(sid);
382 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
383 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
384 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
385 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
386 V_FW_CRYPTO_LOOKASIDE_WR_IV(iv_loc) |
387 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
388 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
389 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
390 crwr->wreq.key_addr = 0;
391 crwr->wreq.pld_size_hash_size = htobe32(
392 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
393 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
394 crwr->wreq.cookie = htobe64((uintptr_t)crp);
396 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
397 V_ULP_TXPKT_DATAMODIFY(0) |
398 V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
399 V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1));
400 crwr->ulptx.len = htobe32(
401 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
403 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
404 V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
405 crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
410 ccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
413 struct chcr_wr *crwr;
415 struct auth_hash *axf;
416 struct cryptodesc *crd;
418 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
419 u_int imm_len, iopad_size;
420 int error, sgl_nsegs, sgl_len;
424 /* Reject requests with too large of an input buffer. */
425 if (crd->crd_len > MAX_REQUEST_SIZE)
428 axf = s->hmac.auth_hash;
430 /* PADs must be 128-bit aligned. */
431 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
434 * The 'key' part of the context includes the aligned IPAD and
437 kctx_len = iopad_size * 2;
438 hash_size_in_response = axf->hashsize;
439 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
441 if (crd->crd_len == 0) {
442 imm_len = axf->blocksize;
445 } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
446 imm_len = crd->crd_len;
451 sglist_reset(sc->sg_ulptx);
452 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
453 crd->crd_skip, crd->crd_len);
456 sgl_nsegs = sc->sg_ulptx->sg_nseg;
457 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
460 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
461 wr = alloc_wrqe(wr_len, sc->txq);
463 sc->stats_wr_nomem++;
467 memset(crwr, 0, wr_len);
469 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
470 hash_size_in_response, IV_NOP, crp);
472 /* XXX: Hardcodes SGE loopback channel of 0. */
473 crwr->sec_cpl.op_ivinsrtofst = htobe32(
474 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
475 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
476 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
477 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
478 V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
480 crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
483 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
484 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
486 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
487 crwr->sec_cpl.seqno_numivs = htobe32(
488 V_SCMD_SEQ_NO_CTRL(0) |
489 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
490 V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) |
491 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
492 V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC));
493 crwr->sec_cpl.ivgen_hdrlen = htobe32(
494 V_SCMD_LAST_FRAG(0) |
495 V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
497 memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
498 memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
499 s->hmac.partial_digest_len);
501 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
502 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
503 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
504 V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) |
505 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
506 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
508 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
509 if (crd->crd_len == 0) {
511 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
512 htobe64(axf->blocksize << 3);
513 } else if (imm_len != 0)
514 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
517 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
519 /* XXX: TODO backpressure */
520 t4_wrq_tx(sc->adapter, wr);
526 ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
527 const struct cpl_fw6_pld *cpl, int error)
529 struct cryptodesc *crd;
533 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
534 s->hmac.hash_len, (c_caddr_t)(cpl + 1));
541 ccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
544 char iv[CHCR_MAX_CRYPTO_IV_LEN];
545 struct chcr_wr *crwr;
547 struct cryptodesc *crd;
549 u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len;
551 int dsgl_nsegs, dsgl_len;
552 int sgl_nsegs, sgl_len;
557 if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
559 if (crd->crd_alg == CRYPTO_AES_CBC &&
560 (crd->crd_len % AES_BLOCK_LEN) != 0)
563 /* Reject requests with too large of an input buffer. */
564 if (crd->crd_len > MAX_REQUEST_SIZE)
568 if (crd->crd_flags & CRD_F_ENCRYPT) {
569 op_type = CHCR_ENCRYPT_OP;
570 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
571 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
573 arc4rand(iv, s->blkcipher.iv_len, 0);
574 iv_loc = IV_IMMEDIATE;
575 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
576 crypto_copyback(crp->crp_flags, crp->crp_buf,
577 crd->crd_inject, s->blkcipher.iv_len, iv);
579 op_type = CHCR_DECRYPT_OP;
580 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
581 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
582 iv_loc = IV_IMMEDIATE;
587 sglist_reset(sc->sg_dsgl);
588 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
592 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
593 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
595 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
597 /* The 'key' must be 128-bit aligned. */
598 kctx_len = roundup2(s->blkcipher.key_len, 16);
599 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
601 if (ccr_use_imm_data(transhdr_len, crd->crd_len +
602 s->blkcipher.iv_len)) {
603 imm_len = crd->crd_len;
604 if (iv_loc == IV_DSGL) {
605 crypto_copydata(crp->crp_flags, crp->crp_buf,
606 crd->crd_inject, s->blkcipher.iv_len, iv);
607 iv_loc = IV_IMMEDIATE;
613 sglist_reset(sc->sg_ulptx);
614 if (iv_loc == IV_DSGL) {
615 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
616 crd->crd_inject, s->blkcipher.iv_len);
620 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
621 crd->crd_skip, crd->crd_len);
624 sgl_nsegs = sc->sg_ulptx->sg_nseg;
625 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
628 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
629 if (iv_loc == IV_IMMEDIATE)
630 wr_len += s->blkcipher.iv_len;
631 wr = alloc_wrqe(wr_len, sc->txq);
633 sc->stats_wr_nomem++;
637 memset(crwr, 0, wr_len);
639 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0,
642 /* XXX: Hardcodes SGE loopback channel of 0. */
643 crwr->sec_cpl.op_ivinsrtofst = htobe32(
644 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
645 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
646 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
647 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
648 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
650 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
652 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
653 V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
654 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
655 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
656 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
658 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
659 crwr->sec_cpl.seqno_numivs = htobe32(
660 V_SCMD_SEQ_NO_CTRL(0) |
661 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
662 V_SCMD_ENC_DEC_CTRL(op_type) |
663 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
664 V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) |
665 V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) |
666 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
668 crwr->sec_cpl.ivgen_hdrlen = htobe32(
669 V_SCMD_IV_GEN_CTRL(0) |
670 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
671 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
673 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
674 switch (crd->crd_alg) {
676 if (crd->crd_flags & CRD_F_ENCRYPT)
677 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
678 s->blkcipher.key_len);
680 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
681 s->blkcipher.key_len);
684 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
685 s->blkcipher.key_len);
688 key_half = s->blkcipher.key_len / 2;
689 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
691 if (crd->crd_flags & CRD_F_ENCRYPT)
692 memcpy(crwr->key_ctx.key + key_half,
693 s->blkcipher.enckey, key_half);
695 memcpy(crwr->key_ctx.key + key_half,
696 s->blkcipher.deckey, key_half);
700 dst = (char *)(crwr + 1) + kctx_len;
701 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
702 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
703 if (iv_loc == IV_IMMEDIATE) {
704 memcpy(dst, iv, s->blkcipher.iv_len);
705 dst += s->blkcipher.iv_len;
708 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
711 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
713 /* XXX: TODO backpressure */
714 t4_wrq_tx(sc->adapter, wr);
720 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
721 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
725 * The updated IV to permit chained requests is at
726 * cpl->data[2], but OCF doesn't permit chained requests.
732 * 'hashsize' is the length of a full digest. 'authsize' is the
733 * requested digest length for this operation which may be less
737 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
741 return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366);
743 return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT);
744 if (authsize == hashsize / 2)
745 return (CHCR_SCMD_HMAC_CTRL_DIV2);
746 return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC);
750 ccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
751 struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
753 char iv[CHCR_MAX_CRYPTO_IV_LEN];
754 struct chcr_wr *crwr;
756 struct auth_hash *axf;
758 u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len;
759 u_int hash_size_in_response, imm_len, iopad_size;
760 u_int aad_start, aad_len, aad_stop;
761 u_int auth_start, auth_stop, auth_insert;
762 u_int cipher_start, cipher_stop;
763 u_int hmac_ctrl, input_len;
764 int dsgl_nsegs, dsgl_len;
765 int sgl_nsegs, sgl_len;
769 * If there is a need in the future, requests with an empty
770 * payload could be supported as HMAC-only requests.
772 if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
774 if (crde->crd_alg == CRYPTO_AES_CBC &&
775 (crde->crd_len % AES_BLOCK_LEN) != 0)
779 * AAD is only permitted before the cipher/plain text, not
782 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
785 axf = s->hmac.auth_hash;
786 hash_size_in_response = s->hmac.hash_len;
789 * The IV is always stored at the start of the buffer even
790 * though it may be duplicated in the payload. The crypto
791 * engine doesn't work properly if the IV offset points inside
792 * of the AAD region, so a second copy is always required.
794 iv_loc = IV_IMMEDIATE;
795 if (crde->crd_flags & CRD_F_ENCRYPT) {
796 op_type = CHCR_ENCRYPT_OP;
797 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
798 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
800 arc4rand(iv, s->blkcipher.iv_len, 0);
801 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
802 crypto_copyback(crp->crp_flags, crp->crp_buf,
803 crde->crd_inject, s->blkcipher.iv_len, iv);
805 op_type = CHCR_DECRYPT_OP;
806 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
807 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
809 crypto_copydata(crp->crp_flags, crp->crp_buf,
810 crde->crd_inject, s->blkcipher.iv_len, iv);
814 * The output buffer consists of the cipher text followed by
815 * the hash when encrypting. For decryption it only contains
818 if (op_type == CHCR_ENCRYPT_OP) {
819 if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE)
822 if (crde->crd_len > MAX_REQUEST_SIZE)
825 sglist_reset(sc->sg_dsgl);
826 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
830 if (op_type == CHCR_ENCRYPT_OP) {
831 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
832 crda->crd_inject, hash_size_in_response);
836 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
837 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
839 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
841 /* PADs must be 128-bit aligned. */
842 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
845 * The 'key' part of the key context consists of the key followed
846 * by the IPAD and OPAD.
848 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
849 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
852 * The input buffer consists of the IV, any AAD, and then the
853 * cipher/plain text. For decryption requests the hash is
854 * appended after the cipher text.
856 if (crda->crd_skip < crde->crd_skip) {
857 if (crda->crd_skip + crda->crd_len > crde->crd_skip)
858 aad_len = (crde->crd_skip - crda->crd_skip);
860 aad_len = crda->crd_len;
863 input_len = aad_len + crde->crd_len;
866 * The firmware hangs if sent a request which is a
867 * bit smaller than MAX_REQUEST_SIZE. In particular, the
868 * firmware appears to require 512 - 16 bytes of spare room
869 * along with the size of the hash even if the hash isn't
870 * included in the input buffer.
872 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
875 if (op_type == CHCR_DECRYPT_OP)
876 input_len += hash_size_in_response;
877 if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
883 sglist_reset(sc->sg_ulptx);
885 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
886 crda->crd_skip, aad_len);
890 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
891 crde->crd_skip, crde->crd_len);
894 if (op_type == CHCR_DECRYPT_OP) {
895 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
896 crda->crd_inject, hash_size_in_response);
900 sgl_nsegs = sc->sg_ulptx->sg_nseg;
901 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
905 * Any auth-only data before the cipher region is marked as AAD.
906 * Auth-data that overlaps with the cipher region is placed in
910 aad_start = s->blkcipher.iv_len + 1;
911 aad_stop = aad_start + aad_len - 1;
916 cipher_start = s->blkcipher.iv_len + aad_len + 1;
917 if (op_type == CHCR_DECRYPT_OP)
918 cipher_stop = hash_size_in_response;
921 if (aad_len == crda->crd_len) {
926 auth_start = cipher_start;
928 auth_start = s->blkcipher.iv_len + crda->crd_skip -
930 auth_stop = (crde->crd_skip + crde->crd_len) -
931 (crda->crd_skip + crda->crd_len) + cipher_stop;
933 if (op_type == CHCR_DECRYPT_OP)
934 auth_insert = hash_size_in_response;
938 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
939 if (iv_loc == IV_IMMEDIATE)
940 wr_len += s->blkcipher.iv_len;
941 wr = alloc_wrqe(wr_len, sc->txq);
943 sc->stats_wr_nomem++;
947 memset(crwr, 0, wr_len);
949 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
950 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, iv_loc,
953 /* XXX: Hardcodes SGE loopback channel of 0. */
954 crwr->sec_cpl.op_ivinsrtofst = htobe32(
955 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
956 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
957 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
958 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
959 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
961 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
963 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
964 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
965 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
966 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
967 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
968 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
969 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
970 V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
971 V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
972 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
974 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
975 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
976 crwr->sec_cpl.seqno_numivs = htobe32(
977 V_SCMD_SEQ_NO_CTRL(0) |
978 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
979 V_SCMD_ENC_DEC_CTRL(op_type) |
980 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
981 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
982 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
983 V_SCMD_HMAC_CTRL(hmac_ctrl) |
984 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
986 crwr->sec_cpl.ivgen_hdrlen = htobe32(
987 V_SCMD_IV_GEN_CTRL(0) |
988 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
989 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
991 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
992 switch (crde->crd_alg) {
994 if (crde->crd_flags & CRD_F_ENCRYPT)
995 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
996 s->blkcipher.key_len);
998 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
999 s->blkcipher.key_len);
1001 case CRYPTO_AES_ICM:
1002 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1003 s->blkcipher.key_len);
1005 case CRYPTO_AES_XTS:
1006 key_half = s->blkcipher.key_len / 2;
1007 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1009 if (crde->crd_flags & CRD_F_ENCRYPT)
1010 memcpy(crwr->key_ctx.key + key_half,
1011 s->blkcipher.enckey, key_half);
1013 memcpy(crwr->key_ctx.key + key_half,
1014 s->blkcipher.deckey, key_half);
1018 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1019 memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
1020 memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
1022 dst = (char *)(crwr + 1) + kctx_len;
1023 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1024 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1025 if (iv_loc == IV_IMMEDIATE) {
1026 memcpy(dst, iv, s->blkcipher.iv_len);
1027 dst += s->blkcipher.iv_len;
1031 crypto_copydata(crp->crp_flags, crp->crp_buf,
1032 crda->crd_skip, aad_len, dst);
1035 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1036 crde->crd_len, dst);
1037 dst += crde->crd_len;
1038 if (op_type == CHCR_DECRYPT_OP)
1039 crypto_copydata(crp->crp_flags, crp->crp_buf,
1040 crda->crd_inject, hash_size_in_response, dst);
1042 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1044 /* XXX: TODO backpressure */
1045 t4_wrq_tx(sc->adapter, wr);
1051 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1052 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1054 struct cryptodesc *crd;
1057 * The updated IV to permit chained requests is at
1058 * cpl->data[2], but OCF doesn't permit chained requests.
1060 * For a decryption request, the hardware may do a verification
1061 * of the HMAC which will fail if the existing HMAC isn't in the
1062 * buffer. If that happens, clear the error and copy the HMAC
1063 * from the CPL reply into the buffer.
1065 * For encryption requests, crd should be the cipher request
1066 * which will have CRD_F_ENCRYPT set. For decryption
1067 * requests, crp_desc will be the HMAC request which should
1068 * not have this flag set.
1070 crd = crp->crp_desc;
1071 if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1072 !(crd->crd_flags & CRD_F_ENCRYPT)) {
1073 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1074 s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1081 ccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
1082 struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
1084 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1085 struct chcr_wr *crwr;
1088 u_int iv_len, iv_loc, kctx_len, op_type, transhdr_len, wr_len;
1089 u_int hash_size_in_response, imm_len;
1090 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1091 u_int hmac_ctrl, input_len;
1092 int dsgl_nsegs, dsgl_len;
1093 int sgl_nsegs, sgl_len;
1096 if (s->blkcipher.key_len == 0)
1100 * AAD is only permitted before the cipher/plain text, not
1103 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1106 hash_size_in_response = s->gmac.hash_len;
1109 * The IV is always stored at the start of the buffer even
1110 * though it may be duplicated in the payload. The crypto
1111 * engine doesn't work properly if the IV offset points inside
1112 * of the AAD region, so a second copy is always required.
1114 * The IV for GCM is further complicated in that IPSec
1115 * provides a full 16-byte IV (including the counter), whereas
1116 * the /dev/crypto interface sometimes provides a full 16-byte
1117 * IV (if no IV is provided in the ioctl) and sometimes a
1118 * 12-byte IV (if the IV was explicit). For now the driver
1119 * always assumes a 12-byte IV and initializes the low 4 byte
1122 iv_loc = IV_IMMEDIATE;
1123 if (crde->crd_flags & CRD_F_ENCRYPT) {
1124 op_type = CHCR_ENCRYPT_OP;
1125 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1126 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1128 arc4rand(iv, s->blkcipher.iv_len, 0);
1129 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1130 crypto_copyback(crp->crp_flags, crp->crp_buf,
1131 crde->crd_inject, s->blkcipher.iv_len, iv);
1133 op_type = CHCR_DECRYPT_OP;
1134 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1135 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1137 crypto_copydata(crp->crp_flags, crp->crp_buf,
1138 crde->crd_inject, s->blkcipher.iv_len, iv);
1142 * If the input IV is 12 bytes, append an explicit counter of
1145 if (s->blkcipher.iv_len == 12) {
1146 *(uint32_t *)&iv[12] = htobe32(1);
1147 iv_len = AES_BLOCK_LEN;
1149 iv_len = s->blkcipher.iv_len;
1152 * The output buffer consists of the cipher text followed by
1153 * the tag when encrypting. For decryption it only contains
1156 if (op_type == CHCR_ENCRYPT_OP) {
1157 if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE)
1160 if (crde->crd_len > MAX_REQUEST_SIZE)
1163 sglist_reset(sc->sg_dsgl);
1164 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1168 if (op_type == CHCR_ENCRYPT_OP) {
1169 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1170 crda->crd_inject, hash_size_in_response);
1174 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1175 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1177 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1180 * The 'key' part of the key context consists of the key followed
1181 * by the Galois hash key.
1183 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1184 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1187 * The input buffer consists of the IV, any AAD, and then the
1188 * cipher/plain text. For decryption requests the hash is
1189 * appended after the cipher text.
1191 input_len = crda->crd_len + crde->crd_len;
1192 if (op_type == CHCR_DECRYPT_OP)
1193 input_len += hash_size_in_response;
1194 if (input_len > MAX_REQUEST_SIZE)
1196 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1197 imm_len = input_len;
1202 sglist_reset(sc->sg_ulptx);
1203 if (crda->crd_len != 0) {
1204 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1205 crda->crd_skip, crda->crd_len);
1209 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1210 crde->crd_skip, crde->crd_len);
1213 if (op_type == CHCR_DECRYPT_OP) {
1214 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1215 crda->crd_inject, hash_size_in_response);
1219 sgl_nsegs = sc->sg_ulptx->sg_nseg;
1220 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1223 if (crda->crd_len != 0) {
1224 aad_start = iv_len + 1;
1225 aad_stop = aad_start + crda->crd_len - 1;
1230 cipher_start = iv_len + crda->crd_len + 1;
1231 if (op_type == CHCR_DECRYPT_OP)
1232 cipher_stop = hash_size_in_response;
1235 if (op_type == CHCR_DECRYPT_OP)
1236 auth_insert = hash_size_in_response;
1240 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
1241 if (iv_loc == IV_IMMEDIATE)
1243 wr = alloc_wrqe(wr_len, sc->txq);
1245 sc->stats_wr_nomem++;
1249 memset(crwr, 0, wr_len);
1251 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
1254 /* XXX: Hardcodes SGE loopback channel of 0. */
1255 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1256 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1257 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1258 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1259 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1260 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1262 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1265 * NB: cipherstop is explicitly set to 0. On encrypt it
1266 * should normally be set to 0 anyway (as the encrypt crd ends
1267 * at the end of the input). However, for decrypt the cipher
1268 * ends before the tag in the AUTHENC case (and authstop is
1269 * set to stop before the tag), but for GCM the cipher still
1270 * runs to the end of the buffer. Not sure if this is
1271 * intentional or a firmware quirk, but it is required for
1272 * working tag validation with GCM decryption.
1274 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1275 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1276 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1277 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1278 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1279 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1280 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1281 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1282 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1283 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1285 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1286 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1287 crwr->sec_cpl.seqno_numivs = htobe32(
1288 V_SCMD_SEQ_NO_CTRL(0) |
1289 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1290 V_SCMD_ENC_DEC_CTRL(op_type) |
1291 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1292 V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
1293 V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
1294 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1295 V_SCMD_IV_SIZE(iv_len / 2) |
1297 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1298 V_SCMD_IV_GEN_CTRL(0) |
1299 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1300 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
1302 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1303 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1304 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1305 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1307 dst = (char *)(crwr + 1) + kctx_len;
1308 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1309 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1310 if (iv_loc == IV_IMMEDIATE) {
1311 memcpy(dst, iv, iv_len);
1315 if (crda->crd_len != 0) {
1316 crypto_copydata(crp->crp_flags, crp->crp_buf,
1317 crda->crd_skip, crda->crd_len, dst);
1318 dst += crda->crd_len;
1320 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1321 crde->crd_len, dst);
1322 dst += crde->crd_len;
1323 if (op_type == CHCR_DECRYPT_OP)
1324 crypto_copydata(crp->crp_flags, crp->crp_buf,
1325 crda->crd_inject, hash_size_in_response, dst);
1327 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1329 /* XXX: TODO backpressure */
1330 t4_wrq_tx(sc->adapter, wr);
1336 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1337 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1341 * The updated IV to permit chained requests is at
1342 * cpl->data[2], but OCF doesn't permit chained requests.
1344 * Note that the hardware should always verify the GMAC hash.
1350 * Handle a GCM request with an empty payload by performing the
1351 * operation in software. Derived from swcr_authenc().
1354 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1355 struct cryptodesc *crda, struct cryptodesc *crde)
1357 struct aes_gmac_ctx gmac_ctx;
1358 char block[GMAC_BLOCK_LEN];
1359 char digest[GMAC_DIGEST_LEN];
1360 char iv[AES_BLOCK_LEN];
1364 * This assumes a 12-byte IV from the crp. See longer comment
1365 * above in ccr_gcm() for more details.
1367 if (crde->crd_flags & CRD_F_ENCRYPT) {
1368 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1369 memcpy(iv, crde->crd_iv, 12);
1371 arc4rand(iv, 12, 0);
1373 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1374 memcpy(iv, crde->crd_iv, 12);
1376 crypto_copydata(crp->crp_flags, crp->crp_buf,
1377 crde->crd_inject, 12, iv);
1379 *(uint32_t *)&iv[12] = htobe32(1);
1381 /* Initialize the MAC. */
1382 AES_GMAC_Init(&gmac_ctx);
1383 AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1384 AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
1387 for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1388 len = imin(crda->crd_len - i, sizeof(block));
1389 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1391 bzero(block + len, sizeof(block) - len);
1392 AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
1396 bzero(block, sizeof(block));
1397 ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1398 AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
1399 AES_GMAC_Final(digest, &gmac_ctx);
1401 if (crde->crd_flags & CRD_F_ENCRYPT) {
1402 crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1403 sizeof(digest), digest);
1406 char digest2[GMAC_DIGEST_LEN];
1408 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1409 sizeof(digest2), digest2);
1410 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
1413 crp->crp_etype = EBADMSG;
1419 ccr_identify(driver_t *driver, device_t parent)
1423 sc = device_get_softc(parent);
1424 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1425 device_find_child(parent, "ccr", -1) == NULL)
1426 device_add_child(parent, "ccr", -1);
1430 ccr_probe(device_t dev)
1433 device_set_desc(dev, "Chelsio Crypto Accelerator");
1434 return (BUS_PROBE_DEFAULT);
1438 ccr_sysctls(struct ccr_softc *sc)
1440 struct sysctl_ctx_list *ctx;
1441 struct sysctl_oid *oid;
1442 struct sysctl_oid_list *children;
1444 ctx = device_get_sysctl_ctx(sc->dev);
1449 oid = device_get_sysctl_tree(sc->dev);
1450 children = SYSCTL_CHILDREN(oid);
1455 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
1456 NULL, "statistics");
1457 children = SYSCTL_CHILDREN(oid);
1459 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1460 &sc->stats_hmac, 0, "HMAC requests submitted");
1461 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
1462 &sc->stats_blkcipher_encrypt, 0,
1463 "Cipher encryption requests submitted");
1464 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
1465 &sc->stats_blkcipher_decrypt, 0,
1466 "Cipher decryption requests submitted");
1467 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
1468 &sc->stats_authenc_encrypt, 0,
1469 "Combined AES+HMAC encryption requests submitted");
1470 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
1471 &sc->stats_authenc_decrypt, 0,
1472 "Combined AES+HMAC decryption requests submitted");
1473 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
1474 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
1475 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
1476 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
1477 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1478 &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
1479 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1480 &sc->stats_inflight, 0, "Requests currently pending");
1481 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1482 &sc->stats_mac_error, 0, "MAC errors");
1483 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1484 &sc->stats_pad_error, 0, "Padding errors");
1485 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
1486 &sc->stats_bad_session, 0, "Requests with invalid session ID");
1487 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
1488 &sc->stats_sglist_error, 0,
1489 "Requests for which DMA mapping failed");
1490 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
1491 &sc->stats_process_error, 0, "Requests failed during queueing");
1495 ccr_attach(device_t dev)
1497 struct ccr_softc *sc;
1501 * TODO: Crypto requests will panic if the parent device isn't
1502 * initialized so that the queues are up and running. Need to
1503 * figure out how to handle that correctly, maybe just reject
1504 * requests if the adapter isn't fully initialized?
1506 sc = device_get_softc(dev);
1508 sc->adapter = device_get_softc(device_get_parent(dev));
1509 sc->txq = &sc->adapter->sge.ctrlq[0];
1510 sc->rxq = &sc->adapter->sge.rxq[0];
1511 cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
1513 device_printf(dev, "could not get crypto driver id\n");
1517 sc->adapter->ccr_softc = sc;
1520 sc->tx_channel_id = 0;
1522 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1523 sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1524 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1525 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
1528 crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
1529 crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1530 crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
1531 crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
1532 crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
1533 crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
1534 crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
1535 crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
1536 crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
1537 crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
1538 crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
1543 ccr_detach(device_t dev)
1545 struct ccr_softc *sc;
1548 sc = device_get_softc(dev);
1550 mtx_lock(&sc->lock);
1551 for (i = 0; i < sc->nsessions; i++) {
1552 if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
1553 mtx_unlock(&sc->lock);
1557 sc->detaching = true;
1558 mtx_unlock(&sc->lock);
1560 crypto_unregister_all(sc->cid);
1561 free(sc->sessions, M_CCR);
1562 mtx_destroy(&sc->lock);
1563 sglist_free(sc->sg_dsgl);
1564 sglist_free(sc->sg_ulptx);
1565 sglist_free(sc->sg_crp);
1566 sc->adapter->ccr_softc = NULL;
1571 ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
1577 u32 = (uint32_t *)dst;
1578 u64 = (uint64_t *)dst;
1580 case CRYPTO_SHA1_HMAC:
1581 for (i = 0; i < SHA1_HASH_LEN / 4; i++)
1582 u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
1584 case CRYPTO_SHA2_256_HMAC:
1585 for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1586 u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
1588 case CRYPTO_SHA2_384_HMAC:
1589 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1590 u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
1592 case CRYPTO_SHA2_512_HMAC:
1593 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1594 u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
1600 ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
1603 union authctx auth_ctx;
1604 struct auth_hash *axf;
1608 * If the key is larger than the block size, use the digest of
1609 * the key as the key instead.
1611 axf = s->hmac.auth_hash;
1613 if (klen > axf->blocksize) {
1614 axf->Init(&auth_ctx);
1615 axf->Update(&auth_ctx, key, klen);
1616 axf->Final(s->hmac.ipad, &auth_ctx);
1617 klen = axf->hashsize;
1619 memcpy(s->hmac.ipad, key, klen);
1621 memset(s->hmac.ipad + klen, 0, axf->blocksize);
1622 memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
1624 for (i = 0; i < axf->blocksize; i++) {
1625 s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
1626 s->hmac.opad[i] ^= HMAC_OPAD_VAL;
1630 * Hash the raw ipad and opad and store the partial result in
1633 axf->Init(&auth_ctx);
1634 axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
1635 ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1637 axf->Init(&auth_ctx);
1638 axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1639 ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
1643 * Borrowed from AES_GMAC_Setkey().
1646 ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
1648 static char zeroes[GMAC_BLOCK_LEN];
1649 uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
1652 rounds = rijndaelKeySetupEnc(keysched, key, klen);
1653 rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
1657 ccr_aes_check_keylen(int alg, int klen)
1663 if (alg == CRYPTO_AES_XTS)
1669 if (alg != CRYPTO_AES_XTS)
1679 * Borrowed from cesa_prep_aes_key(). We should perhaps have a public
1680 * function to generate this instead.
1682 * NB: The crypto engine wants the words in the decryption key in reverse
1686 ccr_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
1688 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
1692 rijndaelKeySetupEnc(ek, enc_key, kbits);
1694 dkey += (kbits / 8) / 4;
1698 for (i = 0; i < 4; i++)
1699 *--dkey = htobe32(ek[4 * 10 + i]);
1702 for (i = 0; i < 2; i++)
1703 *--dkey = htobe32(ek[4 * 11 + 2 + i]);
1704 for (i = 0; i < 4; i++)
1705 *--dkey = htobe32(ek[4 * 12 + i]);
1708 for (i = 0; i < 4; i++)
1709 *--dkey = htobe32(ek[4 * 13 + i]);
1710 for (i = 0; i < 4; i++)
1711 *--dkey = htobe32(ek[4 * 14 + i]);
1714 MPASS(dkey == dec_key);
1718 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
1720 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
1721 unsigned int opad_present;
1723 if (alg == CRYPTO_AES_XTS)
1729 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
1732 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
1735 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
1738 panic("should not get here");
1741 s->blkcipher.key_len = klen / 8;
1742 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
1744 case CRYPTO_AES_CBC:
1745 case CRYPTO_AES_XTS:
1746 ccr_aes_getdeckey(s->blkcipher.deckey, key, kbits);
1750 kctx_len = roundup2(s->blkcipher.key_len, 16);
1753 mk_size = s->hmac.mk_size;
1755 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
1756 kctx_len += iopad_size * 2;
1759 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1761 kctx_len += GMAC_BLOCK_LEN;
1764 mk_size = CHCR_KEYCTX_NO_KEY;
1768 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
1769 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
1770 V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
1771 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
1772 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
1773 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
1777 ccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
1779 struct ccr_softc *sc;
1780 struct ccr_session *s;
1781 struct auth_hash *auth_hash;
1782 struct cryptoini *c, *hash, *cipher;
1783 unsigned int auth_mode, cipher_mode, iv_len, mk_size;
1784 unsigned int partial_digest_len;
1788 if (sidp == NULL || cri == NULL)
1795 auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
1796 cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP;
1799 partial_digest_len = 0;
1800 for (c = cri; c != NULL; c = c->cri_next) {
1801 switch (c->cri_alg) {
1802 case CRYPTO_SHA1_HMAC:
1803 case CRYPTO_SHA2_256_HMAC:
1804 case CRYPTO_SHA2_384_HMAC:
1805 case CRYPTO_SHA2_512_HMAC:
1806 case CRYPTO_AES_128_NIST_GMAC:
1807 case CRYPTO_AES_192_NIST_GMAC:
1808 case CRYPTO_AES_256_NIST_GMAC:
1812 switch (c->cri_alg) {
1813 case CRYPTO_SHA1_HMAC:
1814 auth_hash = &auth_hash_hmac_sha1;
1815 auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1816 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1817 partial_digest_len = SHA1_HASH_LEN;
1819 case CRYPTO_SHA2_256_HMAC:
1820 auth_hash = &auth_hash_hmac_sha2_256;
1821 auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1822 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1823 partial_digest_len = SHA2_256_HASH_LEN;
1825 case CRYPTO_SHA2_384_HMAC:
1826 auth_hash = &auth_hash_hmac_sha2_384;
1827 auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1828 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1829 partial_digest_len = SHA2_512_HASH_LEN;
1831 case CRYPTO_SHA2_512_HMAC:
1832 auth_hash = &auth_hash_hmac_sha2_512;
1833 auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1834 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1835 partial_digest_len = SHA2_512_HASH_LEN;
1837 case CRYPTO_AES_128_NIST_GMAC:
1838 case CRYPTO_AES_192_NIST_GMAC:
1839 case CRYPTO_AES_256_NIST_GMAC:
1841 auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
1842 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1846 case CRYPTO_AES_CBC:
1847 case CRYPTO_AES_ICM:
1848 case CRYPTO_AES_NIST_GCM_16:
1849 case CRYPTO_AES_XTS:
1853 switch (c->cri_alg) {
1854 case CRYPTO_AES_CBC:
1855 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
1856 iv_len = AES_BLOCK_LEN;
1858 case CRYPTO_AES_ICM:
1859 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1860 iv_len = AES_BLOCK_LEN;
1862 case CRYPTO_AES_NIST_GCM_16:
1863 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM;
1864 iv_len = AES_GCM_IV_LEN;
1866 case CRYPTO_AES_XTS:
1867 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1868 iv_len = AES_BLOCK_LEN;
1871 if (c->cri_key != NULL) {
1872 error = ccr_aes_check_keylen(c->cri_alg,
1882 if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM))
1884 if (hash == NULL && cipher == NULL)
1886 if (hash != NULL && hash->cri_key == NULL)
1889 sc = device_get_softc(dev);
1890 mtx_lock(&sc->lock);
1891 if (sc->detaching) {
1892 mtx_unlock(&sc->lock);
1896 for (i = 0; i < sc->nsessions; i++) {
1897 if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
1903 s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR,
1906 mtx_unlock(&sc->lock);
1909 if (sc->sessions != NULL)
1910 memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
1911 sess = sc->nsessions;
1912 free(sc->sessions, M_CCR);
1917 s = &sc->sessions[sess];
1921 else if (hash != NULL && cipher != NULL)
1923 else if (hash != NULL)
1926 MPASS(cipher != NULL);
1927 s->mode = BLKCIPHER;
1930 if (hash->cri_mlen == 0)
1931 s->gmac.hash_len = AES_GMAC_HASH_LEN;
1933 s->gmac.hash_len = hash->cri_mlen;
1934 ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
1935 } else if (hash != NULL) {
1936 s->hmac.auth_hash = auth_hash;
1937 s->hmac.auth_mode = auth_mode;
1938 s->hmac.mk_size = mk_size;
1939 s->hmac.partial_digest_len = partial_digest_len;
1940 if (hash->cri_mlen == 0)
1941 s->hmac.hash_len = auth_hash->hashsize;
1943 s->hmac.hash_len = hash->cri_mlen;
1944 ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
1947 if (cipher != NULL) {
1948 s->blkcipher.cipher_mode = cipher_mode;
1949 s->blkcipher.iv_len = iv_len;
1950 if (cipher->cri_key != NULL)
1951 ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
1956 mtx_unlock(&sc->lock);
1963 ccr_freesession(device_t dev, uint64_t tid)
1965 struct ccr_softc *sc;
1969 sc = device_get_softc(dev);
1970 sid = CRYPTO_SESID2LID(tid);
1971 mtx_lock(&sc->lock);
1972 if (sid >= sc->nsessions || !sc->sessions[sid].active)
1975 if (sc->sessions[sid].pending != 0)
1977 "session %d freed with %d pending requests\n", sid,
1978 sc->sessions[sid].pending);
1979 sc->sessions[sid].active = false;
1982 mtx_unlock(&sc->lock);
1987 ccr_process(device_t dev, struct cryptop *crp, int hint)
1989 struct ccr_softc *sc;
1990 struct ccr_session *s;
1991 struct cryptodesc *crd, *crda, *crde;
1998 crd = crp->crp_desc;
1999 sid = CRYPTO_SESID2LID(crp->crp_sid);
2000 sc = device_get_softc(dev);
2001 mtx_lock(&sc->lock);
2002 if (sid >= sc->nsessions || !sc->sessions[sid].active) {
2003 sc->stats_bad_session++;
2008 error = ccr_populate_sglist(sc->sg_crp, crp);
2010 sc->stats_sglist_error++;
2014 s = &sc->sessions[sid];
2017 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2018 ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
2020 error = ccr_hmac(sc, sid, s, crp);
2025 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2026 error = ccr_aes_check_keylen(crd->crd_alg,
2030 ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2033 error = ccr_blkcipher(sc, sid, s, crp);
2035 if (crd->crd_flags & CRD_F_ENCRYPT)
2036 sc->stats_blkcipher_encrypt++;
2038 sc->stats_blkcipher_decrypt++;
2043 switch (crd->crd_alg) {
2044 case CRYPTO_AES_CBC:
2045 case CRYPTO_AES_ICM:
2046 case CRYPTO_AES_XTS:
2047 /* Only encrypt-then-authenticate supported. */
2049 crda = crd->crd_next;
2050 if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2057 crde = crd->crd_next;
2058 if (crde->crd_flags & CRD_F_ENCRYPT) {
2066 if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2067 ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
2069 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2070 error = ccr_aes_check_keylen(crde->crd_alg,
2074 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2077 error = ccr_authenc(sc, sid, s, crp, crda, crde);
2079 if (crde->crd_flags & CRD_F_ENCRYPT)
2080 sc->stats_authenc_encrypt++;
2082 sc->stats_authenc_decrypt++;
2087 if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2089 crda = crd->crd_next;
2092 crde = crd->crd_next;
2094 if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2095 ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
2096 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2097 error = ccr_aes_check_keylen(crde->crd_alg,
2101 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2104 if (crde->crd_len == 0) {
2105 mtx_unlock(&sc->lock);
2106 ccr_gcm_soft(s, crp, crda, crde);
2109 error = ccr_gcm(sc, sid, s, crp, crda, crde);
2111 if (crde->crd_flags & CRD_F_ENCRYPT)
2112 sc->stats_gcm_encrypt++;
2114 sc->stats_gcm_decrypt++;
2121 sc->stats_inflight++;
2123 sc->stats_process_error++;
2126 mtx_unlock(&sc->lock);
2129 crp->crp_etype = error;
2137 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2140 struct ccr_softc *sc = iq->adapter->ccr_softc;
2141 struct ccr_session *s;
2142 const struct cpl_fw6_pld *cpl;
2143 struct cryptop *crp;
2144 uint32_t sid, status;
2148 cpl = mtod(m, const void *);
2150 cpl = (const void *)(rss + 1);
2152 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2153 sid = CRYPTO_SESID2LID(crp->crp_sid);
2154 status = be64toh(cpl->data[0]);
2155 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2160 mtx_lock(&sc->lock);
2161 MPASS(sid < sc->nsessions);
2162 s = &sc->sessions[sid];
2164 sc->stats_inflight--;
2168 error = ccr_hmac_done(sc, s, crp, cpl, error);
2171 error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2174 error = ccr_authenc_done(sc, s, crp, cpl, error);
2177 error = ccr_gcm_done(sc, s, crp, cpl, error);
2181 if (error == EBADMSG) {
2182 if (CHK_MAC_ERR_BIT(status))
2183 sc->stats_mac_error++;
2184 if (CHK_PAD_ERR_BIT(status))
2185 sc->stats_pad_error++;
2187 mtx_unlock(&sc->lock);
2188 crp->crp_etype = error;
2195 ccr_modevent(module_t mod, int cmd, void *arg)
2200 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2203 t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2206 return (EOPNOTSUPP);
2210 static device_method_t ccr_methods[] = {
2211 DEVMETHOD(device_identify, ccr_identify),
2212 DEVMETHOD(device_probe, ccr_probe),
2213 DEVMETHOD(device_attach, ccr_attach),
2214 DEVMETHOD(device_detach, ccr_detach),
2216 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2217 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2218 DEVMETHOD(cryptodev_process, ccr_process),
2223 static driver_t ccr_driver = {
2226 sizeof(struct ccr_softc)
2229 static devclass_t ccr_devclass;
2231 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2232 MODULE_VERSION(ccr, 1);
2233 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2234 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);