2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
44 #include <opencrypto/cryptodev.h>
48 crypto_session_t mac_sid;
53 /* Only used for TLS 1.0 with the implicit IV. */
58 char iv[AES_BLOCK_LEN];
61 struct ocf_operation {
62 struct ocf_session *os;
66 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
68 SYSCTL_DECL(_kern_ipc_tls);
69 SYSCTL_DECL(_kern_ipc_tls_stats);
71 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
72 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
73 "Kernel TLS offload via OCF stats");
75 static counter_u64_t ocf_tls10_cbc_crypts;
76 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts,
77 CTLFLAG_RD, &ocf_tls10_cbc_crypts,
78 "Total number of OCF TLS 1.0 CBC encryption operations");
80 static counter_u64_t ocf_tls11_cbc_crypts;
81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts,
82 CTLFLAG_RD, &ocf_tls11_cbc_crypts,
83 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
85 static counter_u64_t ocf_tls12_gcm_crypts;
86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts,
87 CTLFLAG_RD, &ocf_tls12_gcm_crypts,
88 "Total number of OCF TLS 1.2 GCM encryption operations");
90 static counter_u64_t ocf_tls13_gcm_crypts;
91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts,
92 CTLFLAG_RD, &ocf_tls13_gcm_crypts,
93 "Total number of OCF TLS 1.3 GCM encryption operations");
95 static counter_u64_t ocf_inplace;
96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
97 CTLFLAG_RD, &ocf_inplace,
98 "Total number of OCF in-place operations");
100 static counter_u64_t ocf_separate_output;
101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
102 CTLFLAG_RD, &ocf_separate_output,
103 "Total number of OCF operations with a separate output buffer");
105 static counter_u64_t ocf_retries;
106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
108 "Number of OCF encryption operation retries");
111 ktls_ocf_callback(struct cryptop *crp)
113 struct ocf_operation *oo;
115 oo = crp->crp_opaque;
116 mtx_lock(&oo->os->lock);
118 mtx_unlock(&oo->os->lock);
124 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
126 struct ocf_operation oo;
132 crp->crp_opaque = &oo;
133 crp->crp_callback = ktls_ocf_callback;
135 error = crypto_dispatch(crp);
141 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
142 mtx_unlock(&os->lock);
144 if (crp->crp_etype != EAGAIN) {
145 error = crp->crp_etype;
150 crp->crp_flags &= ~CRYPTO_F_DONE;
152 counter_u64_add(ocf_retries, 1);
158 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls,
159 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
160 struct iovec *outiov, int iovcnt, uint64_t seqno,
161 uint8_t record_type __unused)
163 struct uio uio, out_uio;
164 struct tls_mac_data ad;
166 struct ocf_session *os;
167 struct iovec iov[iovcnt + 2];
168 struct iovec out_iov[iovcnt + 1];
170 uint16_t tls_comp_len;
177 if (os->implicit_iv) {
179 KASSERT(!os->in_progress,
180 ("concurrent implicit IV encryptions"));
181 if (os->next_seqno != seqno) {
182 printf("KTLS CBC: TLS records out of order. "
183 "Expected %ju, got %ju\n",
184 (uintmax_t)os->next_seqno, (uintmax_t)seqno);
185 mtx_unlock(&os->lock);
188 os->in_progress = true;
189 mtx_unlock(&os->lock);
194 * Compute the payload length.
196 * XXX: This could be easily computed O(1) from the mbuf
197 * fields, but we don't have those accessible here. Can
198 * at least compute inplace as well while we are here.
202 for (i = 0; i < iovcnt; i++) {
203 tls_comp_len += iniov[i].iov_len;
204 if (iniov[i].iov_base != outiov[i].iov_base)
208 /* Initialize the AAD. */
209 ad.seq = htobe64(seqno);
210 ad.type = hdr->tls_type;
211 ad.tls_vmajor = hdr->tls_vmajor;
212 ad.tls_vminor = hdr->tls_vminor;
213 ad.tls_length = htons(tls_comp_len);
215 /* First, compute the MAC. */
216 iov[0].iov_base = &ad;
217 iov[0].iov_len = sizeof(ad);
218 memcpy(&iov[1], iniov, sizeof(*iniov) * iovcnt);
219 iov[iovcnt + 1].iov_base = trailer;
220 iov[iovcnt + 1].iov_len = os->mac_len;
222 uio.uio_iovcnt = iovcnt + 2;
224 uio.uio_segflg = UIO_SYSSPACE;
225 uio.uio_td = curthread;
226 uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
228 crypto_initreq(&crp, os->mac_sid);
229 crp.crp_payload_start = 0;
230 crp.crp_payload_length = sizeof(ad) + tls_comp_len;
231 crp.crp_digest_start = crp.crp_payload_length;
232 crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
233 crp.crp_flags = CRYPTO_F_CBIMM;
234 crypto_use_uio(&crp, &uio);
235 error = ktls_ocf_dispatch(os, &crp);
237 crypto_destroyreq(&crp);
240 if (os->implicit_iv) {
242 os->in_progress = false;
243 mtx_unlock(&os->lock);
249 /* Second, add the padding. */
250 pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) %
252 for (i = 0; i < pad + 1; i++)
253 trailer[os->mac_len + i] = pad;
255 /* Finally, encrypt the record. */
258 * Don't recopy the input iovec, instead just adjust the
259 * trailer length and skip over the AAD vector in the uio.
261 iov[iovcnt + 1].iov_len += pad + 1;
262 uio.uio_iov = iov + 1;
263 uio.uio_iovcnt = iovcnt + 1;
264 uio.uio_resid = tls_comp_len + iov[iovcnt + 1].iov_len;
265 KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0,
266 ("invalid encryption size"));
268 crypto_initreq(&crp, os->sid);
269 crp.crp_payload_start = 0;
270 crp.crp_payload_length = uio.uio_resid;
271 crp.crp_op = CRYPTO_OP_ENCRYPT;
272 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
274 memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
276 memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
277 crypto_use_uio(&crp, &uio);
279 memcpy(out_iov, outiov, sizeof(*iniov) * iovcnt);
280 out_iov[iovcnt] = iov[iovcnt + 1];
281 out_uio.uio_iov = out_iov;
282 out_uio.uio_iovcnt = iovcnt + 1;
283 out_uio.uio_offset = 0;
284 out_uio.uio_segflg = UIO_SYSSPACE;
285 out_uio.uio_td = curthread;
286 out_uio.uio_resid = uio.uio_resid;
287 crypto_use_output_uio(&crp, &out_uio);
291 counter_u64_add(ocf_tls10_cbc_crypts, 1);
293 counter_u64_add(ocf_tls11_cbc_crypts, 1);
295 counter_u64_add(ocf_inplace, 1);
297 counter_u64_add(ocf_separate_output, 1);
298 error = ktls_ocf_dispatch(os, &crp);
300 crypto_destroyreq(&crp);
302 if (os->implicit_iv) {
303 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
304 ("trailer too short to read IV"));
305 memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN,
309 os->next_seqno = seqno + 1;
310 os->in_progress = false;
311 mtx_unlock(&os->lock);
318 ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
319 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
320 struct iovec *outiov, int iovcnt, uint64_t seqno,
321 uint8_t record_type __unused)
323 struct uio uio, out_uio, *tag_uio;
324 struct tls_aead_data ad;
326 struct ocf_session *os;
327 struct iovec iov[iovcnt + 1];
329 uint16_t tls_comp_len;
335 uio.uio_iovcnt = iovcnt;
337 uio.uio_segflg = UIO_SYSSPACE;
338 uio.uio_td = curthread;
340 out_uio.uio_iov = outiov;
341 out_uio.uio_iovcnt = iovcnt;
342 out_uio.uio_offset = 0;
343 out_uio.uio_segflg = UIO_SYSSPACE;
344 out_uio.uio_td = curthread;
346 crypto_initreq(&crp, os->sid);
349 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
350 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
353 tls_comp_len = ntohs(hdr->tls_length) -
354 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
355 ad.seq = htobe64(seqno);
356 ad.type = hdr->tls_type;
357 ad.tls_vmajor = hdr->tls_vmajor;
358 ad.tls_vminor = hdr->tls_vminor;
359 ad.tls_length = htons(tls_comp_len);
361 crp.crp_aad_length = sizeof(ad);
363 /* Compute payload length and determine if encryption is in place. */
365 crp.crp_payload_start = 0;
366 for (i = 0; i < iovcnt; i++) {
367 if (iniov[i].iov_base != outiov[i].iov_base)
369 crp.crp_payload_length += iniov[i].iov_len;
371 uio.uio_resid = crp.crp_payload_length;
372 out_uio.uio_resid = crp.crp_payload_length;
379 /* Duplicate iovec and append vector for tag. */
380 memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
381 iov[iovcnt].iov_base = trailer;
382 iov[iovcnt].iov_len = AES_GMAC_HASH_LEN;
383 tag_uio->uio_iov = iov;
384 tag_uio->uio_iovcnt++;
385 crp.crp_digest_start = tag_uio->uio_resid;
386 tag_uio->uio_resid += AES_GMAC_HASH_LEN;
388 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
389 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
390 crypto_use_uio(&crp, &uio);
392 crypto_use_output_uio(&crp, &out_uio);
394 counter_u64_add(ocf_tls12_gcm_crypts, 1);
396 counter_u64_add(ocf_inplace, 1);
398 counter_u64_add(ocf_separate_output, 1);
399 error = ktls_ocf_dispatch(os, &crp);
401 crypto_destroyreq(&crp);
406 ktls_ocf_tls12_gcm_decrypt(struct ktls_session *tls,
407 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
410 struct tls_aead_data ad;
412 struct ocf_session *os;
413 struct ocf_operation oo;
415 uint16_t tls_comp_len;
422 crypto_initreq(&crp, os->sid);
425 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
426 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
429 tls_comp_len = ntohs(hdr->tls_length) -
430 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
431 ad.seq = htobe64(seqno);
432 ad.type = hdr->tls_type;
433 ad.tls_vmajor = hdr->tls_vmajor;
434 ad.tls_vminor = hdr->tls_vminor;
435 ad.tls_length = htons(tls_comp_len);
437 crp.crp_aad_length = sizeof(ad);
439 crp.crp_payload_start = tls->params.tls_hlen;
440 crp.crp_payload_length = tls_comp_len;
441 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
443 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
444 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
445 crypto_use_mbuf(&crp, m);
447 counter_u64_add(ocf_tls12_gcm_crypts, 1);
448 error = ktls_ocf_dispatch(os, &crp);
450 crypto_destroyreq(&crp);
451 *trailer_len = AES_GMAC_HASH_LEN;
456 ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
457 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
458 struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type)
460 struct uio uio, out_uio;
461 struct tls_aead_data_13 ad;
464 struct ocf_session *os;
465 struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1];
471 crypto_initreq(&crp, os->sid);
473 /* Setup the nonce. */
474 memcpy(nonce, tls->params.iv, tls->params.iv_len);
475 *(uint64_t *)(nonce + 4) ^= htobe64(seqno);
478 ad.type = hdr->tls_type;
479 ad.tls_vmajor = hdr->tls_vmajor;
480 ad.tls_vminor = hdr->tls_vminor;
481 ad.tls_length = hdr->tls_length;
483 crp.crp_aad_length = sizeof(ad);
485 /* Compute payload length and determine if encryption is in place. */
487 crp.crp_payload_start = 0;
488 for (i = 0; i < iovcnt; i++) {
489 if (iniov[i].iov_base != outiov[i].iov_base)
491 crp.crp_payload_length += iniov[i].iov_len;
494 /* Store the record type as the first byte of the trailer. */
495 trailer[0] = record_type;
496 crp.crp_payload_length++;
497 crp.crp_digest_start = crp.crp_payload_length;
500 * Duplicate the input iov to append the trailer. Always
501 * include the full trailer as input to get the record_type
502 * even if only the first byte is used.
504 memcpy(iov, iniov, iovcnt * sizeof(*iov));
505 iov[iovcnt].iov_base = trailer;
506 iov[iovcnt].iov_len = AES_GMAC_HASH_LEN + 1;
508 uio.uio_iovcnt = iovcnt + 1;
510 uio.uio_resid = crp.crp_payload_length + AES_GMAC_HASH_LEN;
511 uio.uio_segflg = UIO_SYSSPACE;
512 uio.uio_td = curthread;
513 crypto_use_uio(&crp, &uio);
516 /* Duplicate the output iov to append the trailer. */
517 memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov));
518 out_iov[iovcnt] = iov[iovcnt];
520 out_uio.uio_iov = out_iov;
521 out_uio.uio_iovcnt = iovcnt + 1;
522 out_uio.uio_offset = 0;
523 out_uio.uio_resid = crp.crp_payload_length +
525 out_uio.uio_segflg = UIO_SYSSPACE;
526 out_uio.uio_td = curthread;
527 crypto_use_output_uio(&crp, &out_uio);
530 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
531 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
533 memcpy(crp.crp_iv, nonce, sizeof(nonce));
535 counter_u64_add(ocf_tls13_gcm_crypts, 1);
537 counter_u64_add(ocf_inplace, 1);
539 counter_u64_add(ocf_separate_output, 1);
540 error = ktls_ocf_dispatch(os, &crp);
542 crypto_destroyreq(&crp);
547 ktls_ocf_free(struct ktls_session *tls)
549 struct ocf_session *os;
552 crypto_freesession(os->sid);
553 mtx_destroy(&os->lock);
554 zfree(os, M_KTLS_OCF);
558 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
560 struct crypto_session_params csp, mac_csp;
561 struct ocf_session *os;
564 memset(&csp, 0, sizeof(csp));
565 memset(&mac_csp, 0, sizeof(mac_csp));
566 mac_csp.csp_mode = CSP_MODE_NONE;
569 switch (tls->params.cipher_algorithm) {
570 case CRYPTO_AES_NIST_GCM_16:
571 switch (tls->params.cipher_key_len) {
579 /* Only TLS 1.2 and 1.3 are supported. */
580 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
581 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
582 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
583 return (EPROTONOSUPPORT);
585 /* TLS 1.3 is not yet supported for receive. */
586 if (direction == KTLS_RX &&
587 tls->params.tls_vminor == TLS_MINOR_VER_THREE)
588 return (EPROTONOSUPPORT);
590 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
591 csp.csp_mode = CSP_MODE_AEAD;
592 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
593 csp.csp_cipher_key = tls->params.cipher_key;
594 csp.csp_cipher_klen = tls->params.cipher_key_len;
595 csp.csp_ivlen = AES_GCM_IV_LEN;
598 switch (tls->params.cipher_key_len) {
606 switch (tls->params.auth_algorithm) {
607 case CRYPTO_SHA1_HMAC:
608 mac_len = SHA1_HASH_LEN;
610 case CRYPTO_SHA2_256_HMAC:
611 mac_len = SHA2_256_HASH_LEN;
613 case CRYPTO_SHA2_384_HMAC:
614 mac_len = SHA2_384_HASH_LEN;
620 /* Only TLS 1.0-1.2 are supported. */
621 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
622 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
623 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
624 return (EPROTONOSUPPORT);
626 /* AES-CBC is not supported for receive. */
627 if (direction == KTLS_RX)
628 return (EPROTONOSUPPORT);
630 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
631 csp.csp_mode = CSP_MODE_CIPHER;
632 csp.csp_cipher_alg = CRYPTO_AES_CBC;
633 csp.csp_cipher_key = tls->params.cipher_key;
634 csp.csp_cipher_klen = tls->params.cipher_key_len;
635 csp.csp_ivlen = AES_BLOCK_LEN;
637 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
638 mac_csp.csp_mode = CSP_MODE_DIGEST;
639 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
640 mac_csp.csp_auth_key = tls->params.auth_key;
641 mac_csp.csp_auth_klen = tls->params.auth_key_len;
644 return (EPROTONOSUPPORT);
647 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
651 error = crypto_newsession(&os->sid, &csp,
652 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
654 free(os, M_KTLS_OCF);
658 if (mac_csp.csp_mode != CSP_MODE_NONE) {
659 error = crypto_newsession(&os->mac_sid, &mac_csp,
660 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
662 crypto_freesession(os->sid);
663 free(os, M_KTLS_OCF);
666 os->mac_len = mac_len;
669 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
671 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
672 if (direction == KTLS_TX) {
673 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
674 tls->sw_encrypt = ktls_ocf_tls13_gcm_encrypt;
676 tls->sw_encrypt = ktls_ocf_tls12_gcm_encrypt;
678 tls->sw_decrypt = ktls_ocf_tls12_gcm_decrypt;
681 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
682 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
683 os->implicit_iv = true;
684 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
687 tls->free = ktls_ocf_free;
691 struct ktls_crypto_backend ocf_backend = {
694 .api_version = KTLS_API_VERSION,
699 ktls_ocf_modevent(module_t mod, int what, void *arg)
705 ocf_tls10_cbc_crypts = counter_u64_alloc(M_WAITOK);
706 ocf_tls11_cbc_crypts = counter_u64_alloc(M_WAITOK);
707 ocf_tls12_gcm_crypts = counter_u64_alloc(M_WAITOK);
708 ocf_tls13_gcm_crypts = counter_u64_alloc(M_WAITOK);
709 ocf_inplace = counter_u64_alloc(M_WAITOK);
710 ocf_separate_output = counter_u64_alloc(M_WAITOK);
711 ocf_retries = counter_u64_alloc(M_WAITOK);
712 return (ktls_crypto_backend_register(&ocf_backend));
714 error = ktls_crypto_backend_deregister(&ocf_backend);
717 counter_u64_free(ocf_tls10_cbc_crypts);
718 counter_u64_free(ocf_tls11_cbc_crypts);
719 counter_u64_free(ocf_tls12_gcm_crypts);
720 counter_u64_free(ocf_tls13_gcm_crypts);
721 counter_u64_free(ocf_inplace);
722 counter_u64_free(ocf_separate_output);
723 counter_u64_free(ocf_retries);
730 static moduledata_t ktls_ocf_moduledata = {
736 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);