2 * Copyright (c) 2017 Chelsio Communications, Inc.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
42 #include "cryptodev_if.h"
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
48 * Requests consist of:
50 * +-------------------------------+
51 * | struct fw_crypto_lookaside_wr |
52 * +-------------------------------+
53 * | struct ulp_txpkt |
54 * +-------------------------------+
55 * | struct ulptx_idata |
56 * +-------------------------------+
57 * | struct cpl_tx_sec_pdu |
58 * +-------------------------------+
59 * | struct cpl_tls_tx_scmd_fmt |
60 * +-------------------------------+
61 * | key context header |
62 * +-------------------------------+
63 * | AES key | ----- For requests with AES
64 * +-------------------------------+
65 * | Hash state | ----- For hash-only requests
66 * +-------------------------------+ -
67 * | IPAD (16-byte aligned) | \
68 * +-------------------------------+ +---- For requests with HMAC
69 * | OPAD (16-byte aligned) | /
70 * +-------------------------------+ -
71 * | GMAC H | ----- For AES-GCM
72 * +-------------------------------+ -
73 * | struct cpl_rx_phys_dsgl | \
74 * +-------------------------------+ +---- Destination buffer for
75 * | PHYS_DSGL entries | / non-hash-only requests
76 * +-------------------------------+ -
77 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests
78 * +-------------------------------+
79 * | IV | ----- If immediate IV
80 * +-------------------------------+
81 * | Payload | ----- If immediate Payload
82 * +-------------------------------+ -
83 * | struct ulptx_sgl | \
84 * +-------------------------------+ +---- If payload via SGL
86 * +-------------------------------+ -
88 * Note that the key context must be padded to ensure 16-byte alignment.
89 * For HMAC requests, the key consists of the partial hash of the IPAD
90 * followed by the partial hash of the OPAD.
94 * +-------------------------------+
95 * | struct cpl_fw6_pld |
96 * +-------------------------------+
97 * | hash digest | ----- For HMAC request with
98 * +-------------------------------+ 'hash_size' set in work request
100 * A 32-bit big-endian error status word is supplied in the last 4
101 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
102 * "MAC" error and bit 1 indicates a "PAD" error.
104 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
105 * in the request is returned in data[1] of the CPL_FW6_PLD message.
107 * For block cipher replies, the updated IV is supplied in data[2] and
108 * data[3] of the CPL_FW6_PLD message.
110 * For hash replies where the work request set 'hash_size' to request
111 * a copy of the hash in the reply, the hash digest is supplied
112 * immediately following the CPL_FW6_PLD message.
116 * The crypto engine supports a maximum AAD size of 511 bytes.
118 #define MAX_AAD_LEN 511
121 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
122 * entries. While the CPL includes a 16-bit length field, the T6 can
123 * sometimes hang if an error occurs while processing a request with a
124 * single DSGL entry larger than 2k.
126 #define MAX_RX_PHYS_DSGL_SGE 32
127 #define DSGL_SGE_MAXLEN 2048
130 * The adapter only supports requests with a total input or output
131 * length of 64k-1 or smaller. Longer requests either result in hung
132 * requests or incorrect results.
134 #define MAX_REQUEST_SIZE 65535
136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
138 struct ccr_session_hmac {
139 struct auth_hash *auth_hash;
141 unsigned int partial_digest_len;
142 unsigned int auth_mode;
143 unsigned int mk_size;
144 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2];
147 struct ccr_session_gmac {
149 char ghash_h[GMAC_BLOCK_LEN];
152 struct ccr_session_ccm_mac {
156 struct ccr_session_blkcipher {
157 unsigned int cipher_mode;
158 unsigned int key_len;
161 char enckey[CHCR_AES_MAX_KEY_LEN];
162 char deckey[CHCR_AES_MAX_KEY_LEN];
169 u_int active_sessions;
175 enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode;
176 struct ccr_port *port;
178 struct ccr_session_hmac hmac;
179 struct ccr_session_gmac gmac;
180 struct ccr_session_ccm_mac ccm_mac;
182 struct ccr_session_blkcipher blkcipher;
186 struct adapter *adapter;
191 struct ccr_port ports[MAX_NPORTS];
195 * Pre-allocate S/G lists used when preparing a work request.
196 * 'sg_crp' contains an sglist describing the entire buffer
197 * for a 'struct cryptop'. 'sg_ulptx' is used to describe
198 * the data the engine should DMA as input via ULPTX_SGL.
199 * 'sg_dsgl' is used to describe the destination that cipher
200 * text and a tag should be written to.
202 struct sglist *sg_crp;
203 struct sglist *sg_ulptx;
204 struct sglist *sg_dsgl;
207 * Pre-allocate a dummy output buffer for the IV and AAD for
211 struct sglist *sg_iv_aad;
214 uint64_t stats_blkcipher_encrypt;
215 uint64_t stats_blkcipher_decrypt;
218 uint64_t stats_eta_encrypt;
219 uint64_t stats_eta_decrypt;
220 uint64_t stats_gcm_encrypt;
221 uint64_t stats_gcm_decrypt;
222 uint64_t stats_ccm_encrypt;
223 uint64_t stats_ccm_decrypt;
224 uint64_t stats_wr_nomem;
225 uint64_t stats_inflight;
226 uint64_t stats_mac_error;
227 uint64_t stats_pad_error;
228 uint64_t stats_bad_session;
229 uint64_t stats_sglist_error;
230 uint64_t stats_process_error;
231 uint64_t stats_sw_fallback;
235 * Crypto requests involve two kind of scatter/gather lists.
237 * Non-hash-only requests require a PHYS_DSGL that describes the
238 * location to store the results of the encryption or decryption
239 * operation. This SGL uses a different format (PHYS_DSGL) and should
240 * exclude the skip bytes at the start of the data as well as any AAD
241 * or IV. For authenticated encryption requests it should include the
242 * destination of the hash or tag.
244 * The input payload may either be supplied inline as immediate data,
245 * or via a standard ULP_TX SGL. This SGL should include AAD,
246 * ciphertext, and the hash or tag for authenticated decryption
249 * These scatter/gather lists can describe different subsets of the
250 * buffer described by the crypto operation. ccr_populate_sglist()
251 * generates a scatter/gather list that covers the entire crypto
252 * operation buffer that is then used to construct the other
253 * scatter/gather lists.
256 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
261 switch (crp->crp_buf_type) {
262 case CRYPTO_BUF_MBUF:
263 error = sglist_append_mbuf(sg, crp->crp_mbuf);
266 error = sglist_append_uio(sg, crp->crp_uio);
268 case CRYPTO_BUF_CONTIG:
269 error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
278 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
282 ccr_count_sgl(struct sglist *sg, int maxsegsize)
287 for (i = 0; i < sg->sg_nseg; i++)
288 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
292 /* These functions deal with PHYS_DSGL for the reply buffer. */
294 ccr_phys_dsgl_len(int nsegs)
298 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
299 if ((nsegs % 8) != 0) {
300 len += sizeof(uint16_t) * 8;
301 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
307 ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst,
311 struct cpl_rx_phys_dsgl *cpl;
312 struct phys_sge_pairs *sgl;
319 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
320 V_CPL_RX_PHYS_DSGL_ISRDMA(0));
321 cpl->pcirlxorder_to_noofsgentr = htobe32(
322 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
323 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
324 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
325 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
326 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
327 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id);
328 cpl->rss_hdr_int.hash_val = 0;
329 sgl = (struct phys_sge_pairs *)(cpl + 1);
331 for (i = 0; i < sg->sg_nseg; i++) {
332 seglen = sg->sg_segs[i].ss_len;
333 paddr = sg->sg_segs[i].ss_paddr;
335 sgl->addr[j] = htobe64(paddr);
336 if (seglen > DSGL_SGE_MAXLEN) {
337 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
338 paddr += DSGL_SGE_MAXLEN;
339 seglen -= DSGL_SGE_MAXLEN;
341 sgl->len[j] = htobe16(seglen);
349 } while (seglen != 0);
351 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
354 /* These functions deal with the ULPTX_SGL for input payload. */
356 ccr_ulptx_sgl_len(int nsegs)
360 nsegs--; /* first segment is part of ulptx_sgl */
361 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
362 return (roundup2(n, 16));
366 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
368 struct ulptx_sgl *usgl;
370 struct sglist_seg *ss;
374 MPASS(nsegs == sg->sg_nseg);
375 ss = &sg->sg_segs[0];
377 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
378 V_ULPTX_NSGE(nsegs));
379 usgl->len0 = htobe32(ss->ss_len);
380 usgl->addr0 = htobe64(ss->ss_paddr);
382 for (i = 0; i < sg->sg_nseg - 1; i++) {
383 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
384 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
391 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
394 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
396 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
403 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
404 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len,
405 u_int sgl_len, u_int hash_size, struct cryptop *crp)
407 u_int cctx_size, idata_len;
409 cctx_size = sizeof(struct _key_ctx) + kctx_len;
410 crwr->wreq.op_to_cctx_size = htobe32(
411 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
412 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
413 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
414 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
415 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
416 crwr->wreq.len16_pkd = htobe32(
417 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
418 crwr->wreq.session_id = 0;
419 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
420 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) |
421 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
422 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
423 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
424 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
425 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
426 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id));
427 crwr->wreq.key_addr = 0;
428 crwr->wreq.pld_size_hash_size = htobe32(
429 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
430 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
431 crwr->wreq.cookie = htobe64((uintptr_t)crp);
433 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
434 V_ULP_TXPKT_DATAMODIFY(0) |
435 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
436 V_ULP_TXPKT_DEST(0) |
437 V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1));
438 crwr->ulptx.len = htobe32(
439 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
441 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
442 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
443 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
444 if (imm_len % 16 != 0)
445 idata_len -= 16 - imm_len % 16;
446 crwr->sc_imm.len = htobe32(idata_len);
450 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
452 struct chcr_wr *crwr;
454 struct auth_hash *axf;
456 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
457 u_int hmac_ctrl, imm_len, iopad_size;
458 int error, sgl_nsegs, sgl_len, use_opad;
460 /* Reject requests with too large of an input buffer. */
461 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
464 axf = s->hmac.auth_hash;
466 if (s->mode == HMAC) {
468 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
471 hmac_ctrl = SCMD_HMAC_CTRL_NOP;
474 /* PADs must be 128-bit aligned. */
475 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
478 * The 'key' part of the context includes the aligned IPAD and
481 kctx_len = iopad_size;
483 kctx_len += iopad_size;
484 hash_size_in_response = axf->hashsize;
485 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
487 if (crp->crp_payload_length == 0) {
488 imm_len = axf->blocksize;
491 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
492 imm_len = crp->crp_payload_length;
497 sglist_reset(sc->sg_ulptx);
498 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
499 crp->crp_payload_start, crp->crp_payload_length);
502 sgl_nsegs = sc->sg_ulptx->sg_nseg;
503 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
506 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
507 if (wr_len > SGE_MAX_WR_LEN)
509 wr = alloc_wrqe(wr_len, s->port->txq);
511 sc->stats_wr_nomem++;
515 memset(crwr, 0, wr_len);
517 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
518 hash_size_in_response, crp);
520 crwr->sec_cpl.op_ivinsrtofst = htobe32(
521 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
522 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
523 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
524 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
525 V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
527 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
528 axf->blocksize : crp->crp_payload_length);
530 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
531 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
533 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
534 crwr->sec_cpl.seqno_numivs = htobe32(
535 V_SCMD_SEQ_NO_CTRL(0) |
536 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
537 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
538 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
539 V_SCMD_HMAC_CTRL(hmac_ctrl));
540 crwr->sec_cpl.ivgen_hdrlen = htobe32(
541 V_SCMD_LAST_FRAG(0) |
542 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
545 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
547 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
548 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
549 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
550 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
551 V_KEY_CONTEXT_SALT_PRESENT(1) |
552 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
553 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
555 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
556 if (crp->crp_payload_length == 0) {
559 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
560 htobe64(axf->blocksize << 3);
561 } else if (imm_len != 0)
562 crypto_copydata(crp, crp->crp_payload_start,
563 crp->crp_payload_length, dst);
565 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
567 /* XXX: TODO backpressure */
568 t4_wrq_tx(sc->adapter, wr);
574 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
575 const struct cpl_fw6_pld *cpl, int error)
577 uint8_t hash[HASH_MAX_LEN];
582 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
583 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
585 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
588 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
594 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
596 char iv[CHCR_MAX_CRYPTO_IV_LEN];
597 struct chcr_wr *crwr;
600 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
601 u_int imm_len, iv_len;
602 int dsgl_nsegs, dsgl_len;
603 int sgl_nsegs, sgl_len;
606 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0)
608 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
609 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
612 /* Reject requests with too large of an input buffer. */
613 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
616 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
617 op_type = CHCR_ENCRYPT_OP;
619 op_type = CHCR_DECRYPT_OP;
621 sglist_reset(sc->sg_dsgl);
622 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
623 crp->crp_payload_start, crp->crp_payload_length);
626 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
627 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
629 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
631 /* The 'key' must be 128-bit aligned. */
632 kctx_len = roundup2(s->blkcipher.key_len, 16);
633 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
635 /* For AES-XTS we send a 16-byte IV in the work request. */
636 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
637 iv_len = AES_BLOCK_LEN;
639 iv_len = s->blkcipher.iv_len;
641 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
642 imm_len = crp->crp_payload_length;
647 sglist_reset(sc->sg_ulptx);
648 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
649 crp->crp_payload_start, crp->crp_payload_length);
652 sgl_nsegs = sc->sg_ulptx->sg_nseg;
653 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
656 wr_len = roundup2(transhdr_len, 16) + iv_len +
657 roundup2(imm_len, 16) + sgl_len;
658 if (wr_len > SGE_MAX_WR_LEN)
660 wr = alloc_wrqe(wr_len, s->port->txq);
662 sc->stats_wr_nomem++;
666 memset(crwr, 0, wr_len);
669 * Read the existing IV from the request or generate a random
670 * one if none is provided.
672 if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
673 arc4rand(iv, s->blkcipher.iv_len, 0);
674 crypto_copyback(crp, crp->crp_iv_start, s->blkcipher.iv_len,
676 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
677 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
679 crypto_copydata(crp, crp->crp_iv_start, s->blkcipher.iv_len,
682 /* Zero the remainder of the IV for AES-XTS. */
683 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len);
685 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
688 crwr->sec_cpl.op_ivinsrtofst = htobe32(
689 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
690 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
691 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
692 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
693 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
695 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
697 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
698 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) |
699 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
700 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
701 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
703 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
704 crwr->sec_cpl.seqno_numivs = htobe32(
705 V_SCMD_SEQ_NO_CTRL(0) |
706 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
707 V_SCMD_ENC_DEC_CTRL(op_type) |
708 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
709 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
710 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
711 V_SCMD_IV_SIZE(iv_len / 2) |
713 crwr->sec_cpl.ivgen_hdrlen = htobe32(
714 V_SCMD_IV_GEN_CTRL(0) |
715 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
716 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
718 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
719 switch (s->blkcipher.cipher_mode) {
720 case SCMD_CIPH_MODE_AES_CBC:
721 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
722 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
723 s->blkcipher.key_len);
725 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
726 s->blkcipher.key_len);
728 case SCMD_CIPH_MODE_AES_CTR:
729 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
730 s->blkcipher.key_len);
732 case SCMD_CIPH_MODE_AES_XTS:
733 key_half = s->blkcipher.key_len / 2;
734 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
736 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
737 memcpy(crwr->key_ctx.key + key_half,
738 s->blkcipher.enckey, key_half);
740 memcpy(crwr->key_ctx.key + key_half,
741 s->blkcipher.deckey, key_half);
745 dst = (char *)(crwr + 1) + kctx_len;
746 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
747 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
748 memcpy(dst, iv, iv_len);
751 crypto_copydata(crp, crp->crp_payload_start,
752 crp->crp_payload_length, dst);
754 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
756 /* XXX: TODO backpressure */
757 t4_wrq_tx(sc->adapter, wr);
763 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
764 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
768 * The updated IV to permit chained requests is at
769 * cpl->data[2], but OCF doesn't permit chained requests.
775 * 'hashsize' is the length of a full digest. 'authsize' is the
776 * requested digest length for this operation which may be less
780 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
784 return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
786 return (SCMD_HMAC_CTRL_IPSEC_96BIT);
787 if (authsize == hashsize / 2)
788 return (SCMD_HMAC_CTRL_DIV2);
789 return (SCMD_HMAC_CTRL_NO_TRUNC);
793 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
795 char iv[CHCR_MAX_CRYPTO_IV_LEN];
796 struct chcr_wr *crwr;
798 struct auth_hash *axf;
800 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
801 u_int hash_size_in_response, imm_len, iopad_size, iv_len;
802 u_int aad_start, aad_stop;
804 u_int cipher_start, cipher_stop;
805 u_int hmac_ctrl, input_len;
806 int dsgl_nsegs, dsgl_len;
807 int sgl_nsegs, sgl_len;
811 * If there is a need in the future, requests with an empty
812 * payload could be supported as HMAC-only requests.
814 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0)
816 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
817 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
820 /* For AES-XTS we send a 16-byte IV in the work request. */
821 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
822 iv_len = AES_BLOCK_LEN;
824 iv_len = s->blkcipher.iv_len;
826 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
829 axf = s->hmac.auth_hash;
830 hash_size_in_response = s->hmac.hash_len;
831 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
832 op_type = CHCR_ENCRYPT_OP;
834 op_type = CHCR_DECRYPT_OP;
837 * The output buffer consists of the cipher text followed by
838 * the hash when encrypting. For decryption it only contains
841 * Due to a firmware bug, the output buffer must include a
842 * dummy output buffer for the IV and AAD prior to the real
845 if (op_type == CHCR_ENCRYPT_OP) {
846 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
847 hash_size_in_response > MAX_REQUEST_SIZE)
850 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
854 sglist_reset(sc->sg_dsgl);
855 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
856 iv_len + crp->crp_aad_length);
859 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
860 crp->crp_payload_start, crp->crp_payload_length);
863 if (op_type == CHCR_ENCRYPT_OP) {
864 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
865 crp->crp_digest_start, hash_size_in_response);
869 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
870 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
872 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
874 /* PADs must be 128-bit aligned. */
875 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
878 * The 'key' part of the key context consists of the key followed
879 * by the IPAD and OPAD.
881 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
882 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
885 * The input buffer consists of the IV, any AAD, and then the
886 * cipher/plain text. For decryption requests the hash is
887 * appended after the cipher text.
889 * The IV is always stored at the start of the input buffer
890 * even though it may be duplicated in the payload. The
891 * crypto engine doesn't work properly if the IV offset points
892 * inside of the AAD region, so a second copy is always
895 input_len = crp->crp_aad_length + crp->crp_payload_length;
898 * The firmware hangs if sent a request which is a
899 * bit smaller than MAX_REQUEST_SIZE. In particular, the
900 * firmware appears to require 512 - 16 bytes of spare room
901 * along with the size of the hash even if the hash isn't
902 * included in the input buffer.
904 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
907 if (op_type == CHCR_DECRYPT_OP)
908 input_len += hash_size_in_response;
910 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
916 sglist_reset(sc->sg_ulptx);
917 if (crp->crp_aad_length != 0) {
918 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
919 crp->crp_aad_start, crp->crp_aad_length);
923 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
924 crp->crp_payload_start, crp->crp_payload_length);
927 if (op_type == CHCR_DECRYPT_OP) {
928 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
929 crp->crp_digest_start, hash_size_in_response);
933 sgl_nsegs = sc->sg_ulptx->sg_nseg;
934 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
938 * Any auth-only data before the cipher region is marked as AAD.
939 * Auth-data that overlaps with the cipher region is placed in
942 if (crp->crp_aad_length != 0) {
943 aad_start = iv_len + 1;
944 aad_stop = aad_start + crp->crp_aad_length - 1;
949 cipher_start = iv_len + crp->crp_aad_length + 1;
950 if (op_type == CHCR_DECRYPT_OP)
951 cipher_stop = hash_size_in_response;
954 if (op_type == CHCR_DECRYPT_OP)
955 auth_insert = hash_size_in_response;
959 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
961 if (wr_len > SGE_MAX_WR_LEN)
963 wr = alloc_wrqe(wr_len, s->port->txq);
965 sc->stats_wr_nomem++;
969 memset(crwr, 0, wr_len);
972 * Read the existing IV from the request or generate a random
973 * one if none is provided.
975 if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
976 arc4rand(iv, s->blkcipher.iv_len, 0);
977 crypto_copyback(crp, crp->crp_iv_start, s->blkcipher.iv_len,
979 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
980 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
982 crypto_copydata(crp, crp->crp_iv_start, s->blkcipher.iv_len,
985 /* Zero the remainder of the IV for AES-XTS. */
986 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len);
988 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
989 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
991 crwr->sec_cpl.op_ivinsrtofst = htobe32(
992 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
993 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
994 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
995 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
996 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
998 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1000 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1001 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1002 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1003 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1004 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1005 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1006 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1007 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1008 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1009 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1011 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1012 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1013 crwr->sec_cpl.seqno_numivs = htobe32(
1014 V_SCMD_SEQ_NO_CTRL(0) |
1015 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1016 V_SCMD_ENC_DEC_CTRL(op_type) |
1017 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1018 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1019 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1020 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1021 V_SCMD_IV_SIZE(iv_len / 2) |
1023 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1024 V_SCMD_IV_GEN_CTRL(0) |
1025 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1026 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1028 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1029 switch (s->blkcipher.cipher_mode) {
1030 case SCMD_CIPH_MODE_AES_CBC:
1031 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1032 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1033 s->blkcipher.key_len);
1035 memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1036 s->blkcipher.key_len);
1038 case SCMD_CIPH_MODE_AES_CTR:
1039 memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1040 s->blkcipher.key_len);
1042 case SCMD_CIPH_MODE_AES_XTS:
1043 key_half = s->blkcipher.key_len / 2;
1044 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1046 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1047 memcpy(crwr->key_ctx.key + key_half,
1048 s->blkcipher.enckey, key_half);
1050 memcpy(crwr->key_ctx.key + key_half,
1051 s->blkcipher.deckey, key_half);
1055 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1056 memcpy(dst, s->hmac.pads, iopad_size * 2);
1058 dst = (char *)(crwr + 1) + kctx_len;
1059 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
1060 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1061 memcpy(dst, iv, iv_len);
1064 if (crp->crp_aad_length != 0) {
1065 crypto_copydata(crp, crp->crp_aad_start,
1066 crp->crp_aad_length, dst);
1067 dst += crp->crp_aad_length;
1069 crypto_copydata(crp, crp->crp_payload_start,
1070 crp->crp_payload_length, dst);
1071 dst += crp->crp_payload_length;
1072 if (op_type == CHCR_DECRYPT_OP)
1073 crypto_copydata(crp, crp->crp_digest_start,
1074 hash_size_in_response, dst);
1076 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1078 /* XXX: TODO backpressure */
1079 t4_wrq_tx(sc->adapter, wr);
1085 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s,
1086 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1090 * The updated IV to permit chained requests is at
1091 * cpl->data[2], but OCF doesn't permit chained requests.
1097 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1099 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1100 struct chcr_wr *crwr;
1103 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1104 u_int hash_size_in_response, imm_len;
1105 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1106 u_int hmac_ctrl, input_len;
1107 int dsgl_nsegs, dsgl_len;
1108 int sgl_nsegs, sgl_len;
1111 if (s->blkcipher.key_len == 0)
1115 * The crypto engine doesn't handle GCM requests with an empty
1116 * payload, so handle those in software instead.
1118 if (crp->crp_payload_length == 0)
1121 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
1124 hash_size_in_response = s->gmac.hash_len;
1125 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1126 op_type = CHCR_ENCRYPT_OP;
1128 op_type = CHCR_DECRYPT_OP;
1131 * The IV handling for GCM in OCF is a bit more complicated in
1132 * that IPSec provides a full 16-byte IV (including the
1133 * counter), whereas the /dev/crypto interface sometimes
1134 * provides a full 16-byte IV (if no IV is provided in the
1135 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1137 * When provided a 12-byte IV, assume the IV is really 16 bytes
1138 * with a counter in the last 4 bytes initialized to 1.
1140 * While iv_len is checked below, the value is currently
1141 * always set to 12 when creating a GCM session in this driver
1142 * due to limitations in OCF (there is no way to know what the
1143 * IV length of a given request will be). This means that the
1144 * driver always assumes as 12-byte IV for now.
1146 if (s->blkcipher.iv_len == 12)
1147 iv_len = AES_BLOCK_LEN;
1149 iv_len = s->blkcipher.iv_len;
1152 * GCM requests should always provide an explicit IV.
1154 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1158 * The output buffer consists of the cipher text followed by
1159 * the tag when encrypting. For decryption it only contains
1162 * Due to a firmware bug, the output buffer must include a
1163 * dummy output buffer for the IV and AAD prior to the real
1166 if (op_type == CHCR_ENCRYPT_OP) {
1167 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
1168 hash_size_in_response > MAX_REQUEST_SIZE)
1171 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
1175 sglist_reset(sc->sg_dsgl);
1176 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1177 crp->crp_aad_length);
1180 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1181 crp->crp_payload_start, crp->crp_payload_length);
1184 if (op_type == CHCR_ENCRYPT_OP) {
1185 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1186 crp->crp_digest_start, hash_size_in_response);
1190 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1191 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1193 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1196 * The 'key' part of the key context consists of the key followed
1197 * by the Galois hash key.
1199 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1200 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1203 * The input buffer consists of the IV, any AAD, and then the
1204 * cipher/plain text. For decryption requests the hash is
1205 * appended after the cipher text.
1207 * The IV is always stored at the start of the input buffer
1208 * even though it may be duplicated in the payload. The
1209 * crypto engine doesn't work properly if the IV offset points
1210 * inside of the AAD region, so a second copy is always
1213 input_len = crp->crp_aad_length + crp->crp_payload_length;
1214 if (op_type == CHCR_DECRYPT_OP)
1215 input_len += hash_size_in_response;
1216 if (input_len > MAX_REQUEST_SIZE)
1218 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1219 imm_len = input_len;
1224 sglist_reset(sc->sg_ulptx);
1225 if (crp->crp_aad_length != 0) {
1226 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1227 crp->crp_aad_start, crp->crp_aad_length);
1231 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1232 crp->crp_payload_start, crp->crp_payload_length);
1235 if (op_type == CHCR_DECRYPT_OP) {
1236 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1237 crp->crp_digest_start, hash_size_in_response);
1241 sgl_nsegs = sc->sg_ulptx->sg_nseg;
1242 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1245 if (crp->crp_aad_length != 0) {
1246 aad_start = iv_len + 1;
1247 aad_stop = aad_start + crp->crp_aad_length - 1;
1252 cipher_start = iv_len + crp->crp_aad_length + 1;
1253 if (op_type == CHCR_DECRYPT_OP)
1254 cipher_stop = hash_size_in_response;
1257 if (op_type == CHCR_DECRYPT_OP)
1258 auth_insert = hash_size_in_response;
1262 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1264 if (wr_len > SGE_MAX_WR_LEN)
1266 wr = alloc_wrqe(wr_len, s->port->txq);
1268 sc->stats_wr_nomem++;
1272 memset(crwr, 0, wr_len);
1274 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
1275 if (s->blkcipher.iv_len == 12)
1276 *(uint32_t *)&iv[12] = htobe32(1);
1278 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1281 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1282 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1283 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
1284 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1285 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1286 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1288 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1291 * NB: cipherstop is explicitly set to 0. On encrypt it
1292 * should normally be set to 0 anyway. However, for decrypt
1293 * the cipher ends before the tag in the ETA case (and
1294 * authstop is set to stop before the tag), but for GCM the
1295 * cipher still runs to the end of the buffer. Not sure if
1296 * this is intentional or a firmware quirk, but it is required
1297 * for working tag validation with GCM decryption.
1299 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1300 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1301 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1302 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1303 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1304 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1305 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1306 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1307 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1308 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1310 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1311 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1312 crwr->sec_cpl.seqno_numivs = htobe32(
1313 V_SCMD_SEQ_NO_CTRL(0) |
1314 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1315 V_SCMD_ENC_DEC_CTRL(op_type) |
1316 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1317 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1318 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1319 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1320 V_SCMD_IV_SIZE(iv_len / 2) |
1322 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1323 V_SCMD_IV_GEN_CTRL(0) |
1324 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1325 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1327 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1328 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1329 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1330 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1332 dst = (char *)(crwr + 1) + kctx_len;
1333 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
1334 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1335 memcpy(dst, iv, iv_len);
1338 if (crp->crp_aad_length != 0) {
1339 crypto_copydata(crp, crp->crp_aad_start,
1340 crp->crp_aad_length, dst);
1341 dst += crp->crp_aad_length;
1343 crypto_copydata(crp, crp->crp_payload_start,
1344 crp->crp_payload_length, dst);
1345 dst += crp->crp_payload_length;
1346 if (op_type == CHCR_DECRYPT_OP)
1347 crypto_copydata(crp, crp->crp_digest_start,
1348 hash_size_in_response, dst);
1350 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1352 /* XXX: TODO backpressure */
1353 t4_wrq_tx(sc->adapter, wr);
1359 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1360 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1364 * The updated IV to permit chained requests is at
1365 * cpl->data[2], but OCF doesn't permit chained requests.
1367 * Note that the hardware should always verify the GMAC hash.
1373 * Handle a GCM request that is not supported by the crypto engine by
1374 * performing the operation in software. Derived from swcr_authenc().
1377 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp)
1379 struct auth_hash *axf;
1380 struct enc_xform *exf;
1383 char block[GMAC_BLOCK_LEN];
1384 char digest[GMAC_DIGEST_LEN];
1385 char iv[AES_BLOCK_LEN];
1391 /* Initialize the MAC. */
1392 switch (s->blkcipher.key_len) {
1394 axf = &auth_hash_nist_gmac_aes_128;
1397 axf = &auth_hash_nist_gmac_aes_192;
1400 axf = &auth_hash_nist_gmac_aes_256;
1406 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1407 if (auth_ctx == NULL) {
1411 axf->Init(auth_ctx);
1412 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1414 /* Initialize the cipher. */
1415 exf = &enc_xform_aes_nist_gcm;
1416 error = exf->setkey(&kschedule, s->blkcipher.enckey,
1417 s->blkcipher.key_len);
1422 * This assumes a 12-byte IV from the crp. See longer comment
1423 * above in ccr_gcm() for more details.
1425 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
1429 memcpy(iv, crp->crp_iv, 12);
1430 *(uint32_t *)&iv[12] = htobe32(1);
1432 axf->Reinit(auth_ctx, iv, sizeof(iv));
1435 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
1436 len = imin(crp->crp_aad_length - i, sizeof(block));
1437 crypto_copydata(crp, crp->crp_aad_start + i, len, block);
1438 bzero(block + len, sizeof(block) - len);
1439 axf->Update(auth_ctx, block, sizeof(block));
1442 exf->reinit(kschedule, iv);
1444 /* Do encryption with MAC */
1445 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) {
1446 len = imin(crp->crp_payload_length - i, sizeof(block));
1447 crypto_copydata(crp, crp->crp_payload_start + i, len, block);
1448 bzero(block + len, sizeof(block) - len);
1449 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1450 exf->encrypt(kschedule, block);
1451 axf->Update(auth_ctx, block, len);
1452 crypto_copyback(crp, crp->crp_payload_start + i, len,
1455 axf->Update(auth_ctx, block, len);
1460 bzero(block, sizeof(block));
1461 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8);
1462 ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8);
1463 axf->Update(auth_ctx, block, sizeof(block));
1466 axf->Final(digest, auth_ctx);
1468 /* Inject or validate tag. */
1469 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1470 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
1474 char digest2[GMAC_DIGEST_LEN];
1476 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
1478 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1481 /* Tag matches, decrypt data. */
1482 for (i = 0; i < crp->crp_payload_length;
1483 i += sizeof(block)) {
1484 len = imin(crp->crp_payload_length - i,
1486 crypto_copydata(crp, crp->crp_payload_start + i,
1488 bzero(block + len, sizeof(block) - len);
1489 exf->decrypt(kschedule, block);
1490 crypto_copyback(crp, crp->crp_payload_start + i,
1497 exf->zerokey(&kschedule);
1499 if (auth_ctx != NULL) {
1500 memset(auth_ctx, 0, axf->ctxsize);
1501 free(auth_ctx, M_CCR);
1503 crp->crp_etype = error;
1508 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response,
1509 const char *iv, char *b0)
1511 u_int i, payload_len;
1513 /* NB: L is already set in the first byte of the IV. */
1514 memcpy(b0, iv, CCM_B0_SIZE);
1516 /* Set length of hash in bits 3 - 5. */
1517 b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1519 /* Store the payload length as a big-endian value. */
1520 payload_len = crp->crp_payload_length;
1521 for (i = 0; i < iv[0]; i++) {
1522 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1527 * If there is AAD in the request, set bit 6 in the flags
1528 * field and store the AAD length as a big-endian value at the
1529 * start of block 1. This only assumes a 16-bit AAD length
1530 * since T6 doesn't support large AAD sizes.
1532 if (crp->crp_aad_length != 0) {
1534 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
1539 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1541 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1542 struct ulptx_idata *idata;
1543 struct chcr_wr *crwr;
1546 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1547 u_int aad_len, b0_len, hash_size_in_response, imm_len;
1548 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1549 u_int hmac_ctrl, input_len;
1550 int dsgl_nsegs, dsgl_len;
1551 int sgl_nsegs, sgl_len;
1554 if (s->blkcipher.key_len == 0)
1558 * The crypto engine doesn't handle CCM requests with an empty
1559 * payload, so handle those in software instead.
1561 if (crp->crp_payload_length == 0)
1565 * CCM always includes block 0 in the AAD before AAD from the
1568 b0_len = CCM_B0_SIZE;
1569 if (crp->crp_aad_length != 0)
1570 b0_len += CCM_AAD_FIELD_SIZE;
1571 aad_len = b0_len + crp->crp_aad_length;
1574 * CCM requests should always provide an explicit IV (really
1577 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1581 * Always assume a 12 byte input nonce for now since that is
1582 * what OCF always generates. The full IV in the work request
1585 iv_len = AES_BLOCK_LEN;
1587 if (iv_len + aad_len > MAX_AAD_LEN)
1590 hash_size_in_response = s->ccm_mac.hash_len;
1591 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1592 op_type = CHCR_ENCRYPT_OP;
1594 op_type = CHCR_DECRYPT_OP;
1597 * The output buffer consists of the cipher text followed by
1598 * the tag when encrypting. For decryption it only contains
1601 * Due to a firmware bug, the output buffer must include a
1602 * dummy output buffer for the IV and AAD prior to the real
1605 if (op_type == CHCR_ENCRYPT_OP) {
1606 if (iv_len + aad_len + crp->crp_payload_length +
1607 hash_size_in_response > MAX_REQUEST_SIZE)
1610 if (iv_len + aad_len + crp->crp_payload_length >
1614 sglist_reset(sc->sg_dsgl);
1615 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1619 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1620 crp->crp_payload_start, crp->crp_payload_length);
1623 if (op_type == CHCR_ENCRYPT_OP) {
1624 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1625 crp->crp_digest_start, hash_size_in_response);
1629 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1630 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1632 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1635 * The 'key' part of the key context consists of two copies of
1638 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2;
1639 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1642 * The input buffer consists of the IV, AAD (including block
1643 * 0), and then the cipher/plain text. For decryption
1644 * requests the hash is appended after the cipher text.
1646 * The IV is always stored at the start of the input buffer
1647 * even though it may be duplicated in the payload. The
1648 * crypto engine doesn't work properly if the IV offset points
1649 * inside of the AAD region, so a second copy is always
1652 input_len = aad_len + crp->crp_payload_length;
1653 if (op_type == CHCR_DECRYPT_OP)
1654 input_len += hash_size_in_response;
1655 if (input_len > MAX_REQUEST_SIZE)
1657 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1658 imm_len = input_len;
1662 /* Block 0 is passed as immediate data. */
1665 sglist_reset(sc->sg_ulptx);
1666 if (crp->crp_aad_length != 0) {
1667 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1668 crp->crp_aad_start, crp->crp_aad_length);
1672 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1673 crp->crp_payload_start, crp->crp_payload_length);
1676 if (op_type == CHCR_DECRYPT_OP) {
1677 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1678 crp->crp_digest_start, hash_size_in_response);
1682 sgl_nsegs = sc->sg_ulptx->sg_nseg;
1683 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1686 aad_start = iv_len + 1;
1687 aad_stop = aad_start + aad_len - 1;
1688 cipher_start = aad_stop + 1;
1689 if (op_type == CHCR_DECRYPT_OP)
1690 cipher_stop = hash_size_in_response;
1693 if (op_type == CHCR_DECRYPT_OP)
1694 auth_insert = hash_size_in_response;
1698 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1700 if (wr_len > SGE_MAX_WR_LEN)
1702 wr = alloc_wrqe(wr_len, s->port->txq);
1704 sc->stats_wr_nomem++;
1708 memset(crwr, 0, wr_len);
1711 * Read the nonce from the request. Use the nonce to generate
1712 * the full IV with the counter set to 0.
1714 memset(iv, 0, iv_len);
1715 iv[0] = (15 - AES_CCM_IV_LEN) - 1;
1716 memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN);
1718 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1721 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1722 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1723 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
1724 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1725 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1726 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1728 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1731 * NB: cipherstop is explicitly set to 0. See comments above
1734 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1735 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1736 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1737 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1738 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1739 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1740 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1741 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1742 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1743 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1745 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1746 hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response);
1747 crwr->sec_cpl.seqno_numivs = htobe32(
1748 V_SCMD_SEQ_NO_CTRL(0) |
1749 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1750 V_SCMD_ENC_DEC_CTRL(op_type) |
1751 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1752 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) |
1753 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) |
1754 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1755 V_SCMD_IV_SIZE(iv_len / 2) |
1757 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1758 V_SCMD_IV_GEN_CTRL(0) |
1759 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1760 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1762 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1763 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1764 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16),
1765 s->blkcipher.enckey, s->blkcipher.key_len);
1767 dst = (char *)(crwr + 1) + kctx_len;
1768 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
1769 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1770 memcpy(dst, iv, iv_len);
1772 generate_ccm_b0(crp, hash_size_in_response, iv, dst);
1773 if (sgl_nsegs == 0) {
1775 if (crp->crp_aad_length != 0) {
1776 crypto_copydata(crp, crp->crp_aad_start,
1777 crp->crp_aad_length, dst);
1778 dst += crp->crp_aad_length;
1780 crypto_copydata(crp, crp->crp_payload_start,
1781 crp->crp_payload_length, dst);
1782 dst += crp->crp_payload_length;
1783 if (op_type == CHCR_DECRYPT_OP)
1784 crypto_copydata(crp, crp->crp_digest_start,
1785 hash_size_in_response, dst);
1788 if (b0_len > CCM_B0_SIZE) {
1790 * If there is AAD, insert padding including a
1791 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1792 * is 16-byte aligned.
1794 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1795 ("b0_len mismatch"));
1796 memset(dst + CCM_AAD_FIELD_SIZE, 0,
1797 8 - CCM_AAD_FIELD_SIZE);
1798 idata = (void *)(dst + 8);
1799 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1800 idata->len = htobe32(0);
1801 dst = (void *)(idata + 1);
1803 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1806 /* XXX: TODO backpressure */
1807 t4_wrq_tx(sc->adapter, wr);
1813 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1814 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1818 * The updated IV to permit chained requests is at
1819 * cpl->data[2], but OCF doesn't permit chained requests.
1821 * Note that the hardware should always verify the CBC MAC
1828 * Handle a CCM request that is not supported by the crypto engine by
1829 * performing the operation in software. Derived from swcr_authenc().
1832 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp)
1834 struct auth_hash *axf;
1835 struct enc_xform *exf;
1836 union authctx *auth_ctx;
1838 char block[CCM_CBC_BLOCK_LEN];
1839 char digest[AES_CBC_MAC_HASH_LEN];
1840 char iv[AES_CCM_IV_LEN];
1846 /* Initialize the MAC. */
1847 switch (s->blkcipher.key_len) {
1849 axf = &auth_hash_ccm_cbc_mac_128;
1852 axf = &auth_hash_ccm_cbc_mac_192;
1855 axf = &auth_hash_ccm_cbc_mac_256;
1861 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1862 if (auth_ctx == NULL) {
1866 axf->Init(auth_ctx);
1867 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1869 /* Initialize the cipher. */
1870 exf = &enc_xform_ccm;
1871 error = exf->setkey(&kschedule, s->blkcipher.enckey,
1872 s->blkcipher.key_len);
1876 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
1880 memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN);
1882 auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
1883 auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
1884 axf->Reinit(auth_ctx, iv, sizeof(iv));
1887 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
1888 len = imin(crp->crp_aad_length - i, sizeof(block));
1889 crypto_copydata(crp, crp->crp_aad_start + i, len, block);
1890 bzero(block + len, sizeof(block) - len);
1891 axf->Update(auth_ctx, block, sizeof(block));
1894 exf->reinit(kschedule, iv);
1896 /* Do encryption/decryption with MAC */
1897 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) {
1898 len = imin(crp->crp_payload_length - i, sizeof(block));
1899 crypto_copydata(crp, crp->crp_payload_start + i, len, block);
1900 bzero(block + len, sizeof(block) - len);
1901 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1902 axf->Update(auth_ctx, block, len);
1903 exf->encrypt(kschedule, block);
1904 crypto_copyback(crp, crp->crp_payload_start + i, len,
1907 exf->decrypt(kschedule, block);
1908 axf->Update(auth_ctx, block, len);
1913 axf->Final(digest, auth_ctx);
1915 /* Inject or validate tag. */
1916 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1917 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
1921 char digest2[AES_CBC_MAC_HASH_LEN];
1923 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
1925 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1928 /* Tag matches, decrypt data. */
1929 exf->reinit(kschedule, iv);
1930 for (i = 0; i < crp->crp_payload_length;
1931 i += sizeof(block)) {
1932 len = imin(crp->crp_payload_length - i,
1934 crypto_copydata(crp, crp->crp_payload_start + i,
1936 bzero(block + len, sizeof(block) - len);
1937 exf->decrypt(kschedule, block);
1938 crypto_copyback(crp, crp->crp_payload_start + i,
1945 exf->zerokey(&kschedule);
1947 if (auth_ctx != NULL) {
1948 memset(auth_ctx, 0, axf->ctxsize);
1949 free(auth_ctx, M_CCR);
1951 crp->crp_etype = error;
1956 ccr_identify(driver_t *driver, device_t parent)
1960 sc = device_get_softc(parent);
1961 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1962 device_find_child(parent, "ccr", -1) == NULL)
1963 device_add_child(parent, "ccr", -1);
1967 ccr_probe(device_t dev)
1970 device_set_desc(dev, "Chelsio Crypto Accelerator");
1971 return (BUS_PROBE_DEFAULT);
1975 ccr_sysctls(struct ccr_softc *sc)
1977 struct sysctl_ctx_list *ctx;
1978 struct sysctl_oid *oid, *port_oid;
1979 struct sysctl_oid_list *children;
1983 ctx = device_get_sysctl_ctx(sc->dev);
1988 oid = device_get_sysctl_tree(sc->dev);
1989 children = SYSCTL_CHILDREN(oid);
1991 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW,
1992 &sc->port_mask, 0, "Mask of enabled ports");
1997 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1998 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1999 children = SYSCTL_CHILDREN(oid);
2001 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
2002 &sc->stats_hash, 0, "Hash requests submitted");
2003 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
2004 &sc->stats_hmac, 0, "HMAC requests submitted");
2005 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
2006 &sc->stats_blkcipher_encrypt, 0,
2007 "Cipher encryption requests submitted");
2008 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
2009 &sc->stats_blkcipher_decrypt, 0,
2010 "Cipher decryption requests submitted");
2011 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD,
2012 &sc->stats_eta_encrypt, 0,
2013 "Combined AES+HMAC encryption requests submitted");
2014 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD,
2015 &sc->stats_eta_decrypt, 0,
2016 "Combined AES+HMAC decryption requests submitted");
2017 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
2018 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
2019 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
2020 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
2021 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD,
2022 &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted");
2023 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD,
2024 &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted");
2025 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
2026 &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
2027 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
2028 &sc->stats_inflight, 0, "Requests currently pending");
2029 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
2030 &sc->stats_mac_error, 0, "MAC errors");
2031 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
2032 &sc->stats_pad_error, 0, "Padding errors");
2033 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
2034 &sc->stats_bad_session, 0, "Requests with invalid session ID");
2035 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
2036 &sc->stats_sglist_error, 0,
2037 "Requests for which DMA mapping failed");
2038 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
2039 &sc->stats_process_error, 0, "Requests failed during queueing");
2040 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
2041 &sc->stats_sw_fallback, 0,
2042 "Requests processed by falling back to software");
2045 * dev.ccr.X.stats.port
2047 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port",
2048 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics");
2050 for (i = 0; i < nitems(sc->ports); i++) {
2051 if (sc->ports[i].rxq == NULL)
2055 * dev.ccr.X.stats.port.Y
2057 snprintf(buf, sizeof(buf), "%d", i);
2058 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO,
2059 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf);
2060 children = SYSCTL_CHILDREN(oid);
2062 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions",
2063 CTLFLAG_RD, &sc->ports[i].active_sessions, 0,
2064 "Count of active sessions");
2069 ccr_init_port(struct ccr_softc *sc, int port)
2072 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port];
2073 sc->ports[port].rxq =
2074 &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq];
2075 sc->ports[port].tx_channel_id = port;
2076 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1,
2077 "Too many ports to fit in port_mask");
2078 sc->port_mask |= 1u << port;
2082 ccr_attach(device_t dev)
2084 struct ccr_softc *sc;
2088 sc = device_get_softc(dev);
2090 sc->adapter = device_get_softc(device_get_parent(dev));
2091 for_each_port(sc->adapter, i) {
2092 ccr_init_port(sc, i);
2094 cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
2095 CRYPTOCAP_F_HARDWARE);
2097 device_printf(dev, "could not get crypto driver id\n");
2101 sc->adapter->ccr_softc = sc;
2103 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
2104 sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2105 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2106 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
2107 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
2108 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
2115 ccr_detach(device_t dev)
2117 struct ccr_softc *sc;
2119 sc = device_get_softc(dev);
2121 mtx_lock(&sc->lock);
2122 sc->detaching = true;
2123 mtx_unlock(&sc->lock);
2125 crypto_unregister_all(sc->cid);
2127 mtx_destroy(&sc->lock);
2128 sglist_free(sc->sg_iv_aad);
2129 free(sc->iv_aad_buf, M_CCR);
2130 sglist_free(sc->sg_dsgl);
2131 sglist_free(sc->sg_ulptx);
2132 sglist_free(sc->sg_crp);
2133 sc->adapter->ccr_softc = NULL;
2138 ccr_init_hash_digest(struct ccr_session *s)
2140 union authctx auth_ctx;
2141 struct auth_hash *axf;
2143 axf = s->hmac.auth_hash;
2144 axf->Init(&auth_ctx);
2145 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
2149 ccr_aes_check_keylen(int alg, int klen)
2155 if (alg == CRYPTO_AES_XTS)
2161 if (alg != CRYPTO_AES_XTS)
2171 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
2173 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2174 unsigned int opad_present;
2176 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
2177 kbits = (klen / 2) * 8;
2182 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2185 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2188 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2191 panic("should not get here");
2194 s->blkcipher.key_len = klen;
2195 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
2196 switch (s->blkcipher.cipher_mode) {
2197 case SCMD_CIPH_MODE_AES_CBC:
2198 case SCMD_CIPH_MODE_AES_XTS:
2199 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
2203 kctx_len = roundup2(s->blkcipher.key_len, 16);
2206 mk_size = s->hmac.mk_size;
2208 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2209 kctx_len += iopad_size * 2;
2212 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2214 kctx_len += GMAC_BLOCK_LEN;
2219 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2222 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2225 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2228 panic("should not get here");
2234 mk_size = CHCR_KEYCTX_NO_KEY;
2238 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2239 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2240 V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode ==
2241 SCMD_CIPH_MODE_AES_XTS) |
2242 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2243 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
2244 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
2248 ccr_auth_supported(const struct crypto_session_params *csp)
2251 switch (csp->csp_auth_alg) {
2253 case CRYPTO_SHA2_224:
2254 case CRYPTO_SHA2_256:
2255 case CRYPTO_SHA2_384:
2256 case CRYPTO_SHA2_512:
2257 case CRYPTO_SHA1_HMAC:
2258 case CRYPTO_SHA2_224_HMAC:
2259 case CRYPTO_SHA2_256_HMAC:
2260 case CRYPTO_SHA2_384_HMAC:
2261 case CRYPTO_SHA2_512_HMAC:
2270 ccr_cipher_supported(const struct crypto_session_params *csp)
2273 switch (csp->csp_cipher_alg) {
2274 case CRYPTO_AES_CBC:
2275 if (csp->csp_ivlen != AES_BLOCK_LEN)
2278 case CRYPTO_AES_ICM:
2279 if (csp->csp_ivlen != AES_BLOCK_LEN)
2282 case CRYPTO_AES_XTS:
2283 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2289 return (ccr_aes_check_keylen(csp->csp_cipher_alg,
2290 csp->csp_cipher_klen));
2294 ccr_cipher_mode(const struct crypto_session_params *csp)
2297 switch (csp->csp_cipher_alg) {
2298 case CRYPTO_AES_CBC:
2299 return (SCMD_CIPH_MODE_AES_CBC);
2300 case CRYPTO_AES_ICM:
2301 return (SCMD_CIPH_MODE_AES_CTR);
2302 case CRYPTO_AES_NIST_GCM_16:
2303 return (SCMD_CIPH_MODE_AES_GCM);
2304 case CRYPTO_AES_XTS:
2305 return (SCMD_CIPH_MODE_AES_XTS);
2306 case CRYPTO_AES_CCM_16:
2307 return (SCMD_CIPH_MODE_AES_CCM);
2309 return (SCMD_CIPH_MODE_NOP);
2314 ccr_probesession(device_t dev, const struct crypto_session_params *csp)
2316 unsigned int cipher_mode;
2318 if (csp->csp_flags != 0)
2320 switch (csp->csp_mode) {
2321 case CSP_MODE_DIGEST:
2322 if (!ccr_auth_supported(csp))
2325 case CSP_MODE_CIPHER:
2326 if (!ccr_cipher_supported(csp))
2330 switch (csp->csp_cipher_alg) {
2331 case CRYPTO_AES_NIST_GCM_16:
2332 if (csp->csp_ivlen != AES_GCM_IV_LEN)
2334 if (csp->csp_auth_mlen < 0 ||
2335 csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
2338 case CRYPTO_AES_CCM_16:
2339 if (csp->csp_ivlen != AES_CCM_IV_LEN)
2341 if (csp->csp_auth_mlen < 0 ||
2342 csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN)
2350 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp))
2357 if (csp->csp_cipher_klen != 0) {
2358 cipher_mode = ccr_cipher_mode(csp);
2359 if (cipher_mode == SCMD_CIPH_MODE_NOP)
2363 return (CRYPTODEV_PROBE_HARDWARE);
2367 * Select an available port with the lowest number of active sessions.
2369 static struct ccr_port *
2370 ccr_choose_port(struct ccr_softc *sc)
2372 struct ccr_port *best, *p;
2375 mtx_assert(&sc->lock, MA_OWNED);
2377 for (i = 0; i < nitems(sc->ports); i++) {
2380 /* Ignore non-existent ports. */
2385 * XXX: Ignore ports whose queues aren't initialized.
2386 * This is racy as the rxq can be destroyed by the
2387 * associated VI detaching. Eventually ccr should use
2390 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL)
2393 if ((sc->port_mask & (1u << i)) == 0)
2397 p->active_sessions < best->active_sessions)
2404 ccr_newsession(device_t dev, crypto_session_t cses,
2405 const struct crypto_session_params *csp)
2407 struct ccr_softc *sc;
2408 struct ccr_session *s;
2409 struct auth_hash *auth_hash;
2410 unsigned int auth_mode, cipher_mode, mk_size;
2411 unsigned int partial_digest_len;
2413 switch (csp->csp_auth_alg) {
2415 case CRYPTO_SHA1_HMAC:
2416 auth_hash = &auth_hash_hmac_sha1;
2417 auth_mode = SCMD_AUTH_MODE_SHA1;
2418 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
2419 partial_digest_len = SHA1_HASH_LEN;
2421 case CRYPTO_SHA2_224:
2422 case CRYPTO_SHA2_224_HMAC:
2423 auth_hash = &auth_hash_hmac_sha2_224;
2424 auth_mode = SCMD_AUTH_MODE_SHA224;
2425 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2426 partial_digest_len = SHA2_256_HASH_LEN;
2428 case CRYPTO_SHA2_256:
2429 case CRYPTO_SHA2_256_HMAC:
2430 auth_hash = &auth_hash_hmac_sha2_256;
2431 auth_mode = SCMD_AUTH_MODE_SHA256;
2432 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2433 partial_digest_len = SHA2_256_HASH_LEN;
2435 case CRYPTO_SHA2_384:
2436 case CRYPTO_SHA2_384_HMAC:
2437 auth_hash = &auth_hash_hmac_sha2_384;
2438 auth_mode = SCMD_AUTH_MODE_SHA512_384;
2439 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2440 partial_digest_len = SHA2_512_HASH_LEN;
2442 case CRYPTO_SHA2_512:
2443 case CRYPTO_SHA2_512_HMAC:
2444 auth_hash = &auth_hash_hmac_sha2_512;
2445 auth_mode = SCMD_AUTH_MODE_SHA512_512;
2446 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2447 partial_digest_len = SHA2_512_HASH_LEN;
2451 auth_mode = SCMD_AUTH_MODE_NOP;
2453 partial_digest_len = 0;
2457 cipher_mode = ccr_cipher_mode(csp);
2460 switch (csp->csp_mode) {
2461 case CSP_MODE_CIPHER:
2462 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2463 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2464 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2465 panic("invalid cipher algo");
2467 case CSP_MODE_DIGEST:
2468 if (auth_mode == SCMD_AUTH_MODE_NOP)
2469 panic("invalid auth algo");
2472 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM &&
2473 cipher_mode != SCMD_CIPH_MODE_AES_CCM)
2474 panic("invalid aead cipher algo");
2475 if (auth_mode != SCMD_AUTH_MODE_NOP)
2476 panic("invalid aead auth aglo");
2479 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2480 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2481 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2482 panic("invalid cipher algo");
2483 if (auth_mode == SCMD_AUTH_MODE_NOP)
2484 panic("invalid auth algo");
2487 panic("invalid csp mode");
2491 sc = device_get_softc(dev);
2493 mtx_lock(&sc->lock);
2494 if (sc->detaching) {
2495 mtx_unlock(&sc->lock);
2499 s = crypto_get_driver_session(cses);
2500 s->port = ccr_choose_port(sc);
2501 if (s->port == NULL) {
2502 mtx_unlock(&sc->lock);
2506 switch (csp->csp_mode) {
2508 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2516 case CSP_MODE_DIGEST:
2517 if (csp->csp_auth_klen != 0)
2522 case CSP_MODE_CIPHER:
2523 s->mode = BLKCIPHER;
2527 if (s->mode == GCM) {
2528 if (csp->csp_auth_mlen == 0)
2529 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2531 s->gmac.hash_len = csp->csp_auth_mlen;
2532 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
2534 } else if (s->mode == CCM) {
2535 if (csp->csp_auth_mlen == 0)
2536 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2538 s->ccm_mac.hash_len = csp->csp_auth_mlen;
2539 } else if (auth_mode != SCMD_AUTH_MODE_NOP) {
2540 s->hmac.auth_hash = auth_hash;
2541 s->hmac.auth_mode = auth_mode;
2542 s->hmac.mk_size = mk_size;
2543 s->hmac.partial_digest_len = partial_digest_len;
2544 if (csp->csp_auth_mlen == 0)
2545 s->hmac.hash_len = auth_hash->hashsize;
2547 s->hmac.hash_len = csp->csp_auth_mlen;
2548 if (csp->csp_auth_key != NULL)
2549 t4_init_hmac_digest(auth_hash, partial_digest_len,
2550 csp->csp_auth_key, csp->csp_auth_klen,
2553 ccr_init_hash_digest(s);
2555 if (cipher_mode != SCMD_CIPH_MODE_NOP) {
2556 s->blkcipher.cipher_mode = cipher_mode;
2557 s->blkcipher.iv_len = csp->csp_ivlen;
2558 if (csp->csp_cipher_key != NULL)
2559 ccr_aes_setkey(s, csp->csp_cipher_key,
2560 csp->csp_cipher_klen);
2564 s->port->active_sessions++;
2565 mtx_unlock(&sc->lock);
2570 ccr_freesession(device_t dev, crypto_session_t cses)
2572 struct ccr_softc *sc;
2573 struct ccr_session *s;
2575 sc = device_get_softc(dev);
2576 s = crypto_get_driver_session(cses);
2577 mtx_lock(&sc->lock);
2578 if (s->pending != 0)
2580 "session %p freed with %d pending requests\n", s,
2583 s->port->active_sessions--;
2584 mtx_unlock(&sc->lock);
2588 ccr_process(device_t dev, struct cryptop *crp, int hint)
2590 const struct crypto_session_params *csp;
2591 struct ccr_softc *sc;
2592 struct ccr_session *s;
2595 csp = crypto_get_params(crp->crp_session);
2596 s = crypto_get_driver_session(crp->crp_session);
2597 sc = device_get_softc(dev);
2599 mtx_lock(&sc->lock);
2600 error = ccr_populate_sglist(sc->sg_crp, crp);
2602 sc->stats_sglist_error++;
2608 error = ccr_hash(sc, s, crp);
2613 if (crp->crp_auth_key != NULL)
2614 t4_init_hmac_digest(s->hmac.auth_hash,
2615 s->hmac.partial_digest_len, crp->crp_auth_key,
2616 csp->csp_auth_klen, s->hmac.pads);
2617 error = ccr_hash(sc, s, crp);
2622 if (crp->crp_cipher_key != NULL)
2623 ccr_aes_setkey(s, crp->crp_cipher_key,
2624 csp->csp_cipher_klen);
2625 error = ccr_blkcipher(sc, s, crp);
2627 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2628 sc->stats_blkcipher_encrypt++;
2630 sc->stats_blkcipher_decrypt++;
2634 if (crp->crp_auth_key != NULL)
2635 t4_init_hmac_digest(s->hmac.auth_hash,
2636 s->hmac.partial_digest_len, crp->crp_auth_key,
2637 csp->csp_auth_klen, s->hmac.pads);
2638 if (crp->crp_cipher_key != NULL)
2639 ccr_aes_setkey(s, crp->crp_cipher_key,
2640 csp->csp_cipher_klen);
2641 error = ccr_eta(sc, s, crp);
2643 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2644 sc->stats_eta_encrypt++;
2646 sc->stats_eta_decrypt++;
2650 if (crp->crp_cipher_key != NULL) {
2651 t4_init_gmac_hash(crp->crp_cipher_key,
2652 csp->csp_cipher_klen, s->gmac.ghash_h);
2653 ccr_aes_setkey(s, crp->crp_cipher_key,
2654 csp->csp_cipher_klen);
2656 if (crp->crp_payload_length == 0) {
2657 mtx_unlock(&sc->lock);
2658 ccr_gcm_soft(s, crp);
2661 error = ccr_gcm(sc, s, crp);
2662 if (error == EMSGSIZE) {
2663 sc->stats_sw_fallback++;
2664 mtx_unlock(&sc->lock);
2665 ccr_gcm_soft(s, crp);
2669 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2670 sc->stats_gcm_encrypt++;
2672 sc->stats_gcm_decrypt++;
2676 if (crp->crp_cipher_key != NULL) {
2677 ccr_aes_setkey(s, crp->crp_cipher_key,
2678 csp->csp_cipher_klen);
2680 error = ccr_ccm(sc, s, crp);
2681 if (error == EMSGSIZE) {
2682 sc->stats_sw_fallback++;
2683 mtx_unlock(&sc->lock);
2684 ccr_ccm_soft(s, crp);
2688 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2689 sc->stats_ccm_encrypt++;
2691 sc->stats_ccm_decrypt++;
2698 sc->stats_inflight++;
2700 sc->stats_process_error++;
2703 mtx_unlock(&sc->lock);
2706 crp->crp_etype = error;
2714 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2717 struct ccr_softc *sc = iq->adapter->ccr_softc;
2718 struct ccr_session *s;
2719 const struct cpl_fw6_pld *cpl;
2720 struct cryptop *crp;
2725 cpl = mtod(m, const void *);
2727 cpl = (const void *)(rss + 1);
2729 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2730 s = crypto_get_driver_session(crp->crp_session);
2731 status = be64toh(cpl->data[0]);
2732 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2737 mtx_lock(&sc->lock);
2739 sc->stats_inflight--;
2744 error = ccr_hash_done(sc, s, crp, cpl, error);
2747 error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2750 error = ccr_eta_done(sc, s, crp, cpl, error);
2753 error = ccr_gcm_done(sc, s, crp, cpl, error);
2756 error = ccr_ccm_done(sc, s, crp, cpl, error);
2760 if (error == EBADMSG) {
2761 if (CHK_MAC_ERR_BIT(status))
2762 sc->stats_mac_error++;
2763 if (CHK_PAD_ERR_BIT(status))
2764 sc->stats_pad_error++;
2766 mtx_unlock(&sc->lock);
2767 crp->crp_etype = error;
2774 ccr_modevent(module_t mod, int cmd, void *arg)
2779 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2782 t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2785 return (EOPNOTSUPP);
2789 static device_method_t ccr_methods[] = {
2790 DEVMETHOD(device_identify, ccr_identify),
2791 DEVMETHOD(device_probe, ccr_probe),
2792 DEVMETHOD(device_attach, ccr_attach),
2793 DEVMETHOD(device_detach, ccr_detach),
2795 DEVMETHOD(cryptodev_probesession, ccr_probesession),
2796 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2797 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2798 DEVMETHOD(cryptodev_process, ccr_process),
2803 static driver_t ccr_driver = {
2806 sizeof(struct ccr_softc)
2809 static devclass_t ccr_devclass;
2811 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2812 MODULE_VERSION(ccr, 1);
2813 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2814 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);